sglang 0.3.1.post2__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. sglang/bench_latency.py +12 -11
  2. sglang/bench_server_latency.py +0 -6
  3. sglang/srt/hf_transformers_utils.py +1 -0
  4. sglang/srt/layers/activation.py +3 -2
  5. sglang/srt/layers/attention_backend.py +6 -12
  6. sglang/srt/layers/fused_moe/patch.py +117 -0
  7. sglang/srt/layers/linear.py +1133 -0
  8. sglang/srt/layers/quantization/__init__.py +76 -0
  9. sglang/srt/layers/quantization/base_config.py +122 -0
  10. sglang/srt/managers/schedule_batch.py +3 -5
  11. sglang/srt/managers/tokenizer_manager.py +1 -0
  12. sglang/srt/managers/tp_worker.py +1 -1
  13. sglang/srt/mem_cache/radix_cache.py +5 -5
  14. sglang/srt/model_executor/cuda_graph_runner.py +10 -6
  15. sglang/srt/model_executor/forward_batch_info.py +2 -4
  16. sglang/srt/model_executor/model_runner.py +0 -3
  17. sglang/srt/models/baichuan.py +1 -1
  18. sglang/srt/models/chatglm.py +6 -6
  19. sglang/srt/models/commandr.py +7 -7
  20. sglang/srt/models/dbrx.py +7 -7
  21. sglang/srt/models/deepseek.py +7 -7
  22. sglang/srt/models/deepseek_v2.py +7 -7
  23. sglang/srt/models/exaone.py +6 -6
  24. sglang/srt/models/gemma.py +6 -6
  25. sglang/srt/models/gemma2.py +6 -6
  26. sglang/srt/models/gpt_bigcode.py +6 -6
  27. sglang/srt/models/grok.py +6 -6
  28. sglang/srt/models/internlm2.py +6 -6
  29. sglang/srt/models/llama.py +14 -6
  30. sglang/srt/models/llama_classification.py +1 -1
  31. sglang/srt/models/llava.py +1 -1
  32. sglang/srt/models/llavavid.py +1 -1
  33. sglang/srt/models/minicpm.py +6 -6
  34. sglang/srt/models/minicpm3.py +1 -1
  35. sglang/srt/models/mixtral.py +6 -6
  36. sglang/srt/models/mixtral_quant.py +6 -6
  37. sglang/srt/models/olmoe.py +1 -1
  38. sglang/srt/models/qwen.py +6 -6
  39. sglang/srt/models/qwen2.py +6 -6
  40. sglang/srt/models/qwen2_moe.py +7 -7
  41. sglang/srt/models/stablelm.py +6 -6
  42. sglang/srt/models/xverse.py +1 -1
  43. sglang/srt/models/xverse_moe.py +1 -1
  44. sglang/srt/models/yivl.py +1 -1
  45. sglang/srt/openai_api/adapter.py +7 -0
  46. sglang/srt/utils.py +21 -1
  47. sglang/test/runners.py +7 -9
  48. sglang/test/test_utils.py +39 -2
  49. sglang/version.py +1 -1
  50. {sglang-0.3.1.post2.dist-info → sglang-0.3.2.dist-info}/METADATA +8 -6
  51. {sglang-0.3.1.post2.dist-info → sglang-0.3.2.dist-info}/RECORD +54 -50
  52. {sglang-0.3.1.post2.dist-info → sglang-0.3.2.dist-info}/LICENSE +0 -0
  53. {sglang-0.3.1.post2.dist-info → sglang-0.3.2.dist-info}/WHEEL +0 -0
  54. {sglang-0.3.1.post2.dist-info → sglang-0.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1133 @@
1
+ # Adapted from https://raw.githubusercontent.com/vllm-project/vllm/v0.5.5/vllm/model_executor/layers/linear.py
2
+
3
+ import logging
4
+ from abc import abstractmethod
5
+ from typing import Dict, List, Optional, Tuple
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch.nn.parameter import Parameter, UninitializedParameter
10
+ from vllm.distributed import (
11
+ divide,
12
+ get_tensor_model_parallel_rank,
13
+ get_tensor_model_parallel_world_size,
14
+ split_tensor_along_last_dim,
15
+ tensor_model_parallel_all_gather,
16
+ tensor_model_parallel_all_reduce,
17
+ )
18
+
19
+ # workaround
20
+ from vllm.model_executor.layers.linear import LinearBase
21
+ from vllm.model_executor.parameter import (
22
+ BasevLLMParameter,
23
+ PackedvLLMParameter,
24
+ PerTensorScaleParameter,
25
+ )
26
+
27
+ from sglang.srt.layers.quantization.base_config import (
28
+ QuantizationConfig,
29
+ QuantizeMethodBase,
30
+ )
31
+ from sglang.srt.utils import set_weight_attrs
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+ WEIGHT_LOADER_V2_SUPPORTED = [
36
+ "CompressedTensorsLinearMethod",
37
+ "AWQMarlinLinearMethod",
38
+ "AWQLinearMethod",
39
+ "GPTQMarlinLinearMethod",
40
+ "Fp8LinearMethod",
41
+ "MarlinLinearMethod",
42
+ ]
43
+
44
+
45
+ def adjust_marlin_shard(param, shard_size, shard_offset):
46
+ marlin_tile_size = getattr(param, "marlin_tile_size", None)
47
+ if marlin_tile_size is None:
48
+ return shard_size, shard_offset
49
+
50
+ return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
51
+
52
+
53
+ def adjust_bitsandbytes_shard(
54
+ param: Parameter, qkv_offsets: Dict[str, Tuple[int, int]], loaded_shard_id: str
55
+ ) -> Tuple[int, int]:
56
+ """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
57
+
58
+ total, _ = qkv_offsets["total"]
59
+ orig_offset, orig_size = qkv_offsets[loaded_shard_id]
60
+
61
+ quantized_total = param.data.shape[0]
62
+ quantized_offset = orig_offset * quantized_total // total
63
+ quantized_size = orig_size * quantized_total // total
64
+
65
+ return quantized_size, quantized_offset
66
+
67
+
68
+ def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
69
+ """For fused modules (QKV and MLP) we have an array of length
70
+ N that holds 1 scale for each "logical" matrix. So the param
71
+ is an array of length N. The loaded_weight corresponds to
72
+ one of the shards on disk. Here, we slice the param based on
73
+ the shard_id for loading.
74
+ """
75
+ qkv_idxs = {"q": 0, "k": 1, "v": 2}
76
+
77
+ if isinstance(shard_id, str):
78
+ shard_id = qkv_idxs[shard_id]
79
+ elif not isinstance(shard_id, int):
80
+ raise ValueError(f"Unknown Shard Id {shard_id}")
81
+
82
+ # AutoFP8 scales do not have a shape
83
+ # compressed-tensors scales do have a shape
84
+ if len(loaded_weight.shape) != 0:
85
+ assert loaded_weight.shape[0] == 1
86
+ loaded_weight = loaded_weight[0]
87
+
88
+ return param[shard_id], loaded_weight
89
+
90
+
91
+ class LinearMethodBase(QuantizeMethodBase):
92
+ """Base class for different (maybe quantized) linear methods."""
93
+
94
+ @abstractmethod
95
+ def create_weights(
96
+ self,
97
+ layer: torch.nn.Module,
98
+ input_size_per_partition: int,
99
+ output_partition_sizes: List[int],
100
+ input_size: int,
101
+ output_size: int,
102
+ params_dtype: torch.dtype,
103
+ **extra_weight_attrs,
104
+ ):
105
+ """Create weights for a linear layer.
106
+ The weights will be set as attributes of the layer.
107
+
108
+ Args:
109
+ layer: The layer that is using the LinearMethodBase factory.
110
+ input_size_per_partition: Size of the weight input dim on rank X.
111
+ output_partition_sizes: Sizes of the output dim of each logical
112
+ weight on rank X. E.g., output_partition_sizes for QKVLinear
113
+ is a list contains the width of Wq, Wk, Wv on rank X.
114
+ input_size: Size of the input dim of the weight across all ranks.
115
+ output_size: Size of the output dim of the weight across all ranks.
116
+ params_dtype: Datatype of the parameters.
117
+ """
118
+ raise NotImplementedError
119
+
120
+ @abstractmethod
121
+ def apply(
122
+ self,
123
+ layer: torch.nn.Module,
124
+ x: torch.Tensor,
125
+ bias: Optional[torch.Tensor] = None,
126
+ ) -> torch.Tensor:
127
+ """Apply the weights in layer to the input tensor.
128
+ Expects create_weights to have been called before on the layer."""
129
+ raise NotImplementedError
130
+
131
+
132
+ class UnquantizedLinearMethod(LinearMethodBase):
133
+ """Linear method without quantization."""
134
+
135
+ def create_weights(
136
+ self,
137
+ layer: torch.nn.Module,
138
+ input_size_per_partition: int,
139
+ output_partition_sizes: List[int],
140
+ input_size: int,
141
+ output_size: int,
142
+ params_dtype: torch.dtype,
143
+ **extra_weight_attrs,
144
+ ):
145
+ weight = Parameter(
146
+ torch.empty(
147
+ sum(output_partition_sizes),
148
+ input_size_per_partition,
149
+ dtype=params_dtype,
150
+ ),
151
+ requires_grad=False,
152
+ )
153
+ set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
154
+ layer.register_parameter("weight", weight)
155
+ set_weight_attrs(weight, extra_weight_attrs)
156
+
157
+ def apply(
158
+ self,
159
+ layer: torch.nn.Module,
160
+ x: torch.Tensor,
161
+ bias: Optional[torch.Tensor] = None,
162
+ ) -> torch.Tensor:
163
+
164
+ return F.linear(x, layer.weight, bias)
165
+
166
+
167
+ class ReplicatedLinear(LinearBase):
168
+ """Replicated linear layer.
169
+
170
+ Args:
171
+ input_size: input dimension of the linear layer.
172
+ output_size: output dimension of the linear layer.
173
+ bias: If true, add bias.
174
+ skip_bias_add: If true, skip adding bias but instead return it.
175
+ params_dtype: Data type for the parameters.
176
+ quant_config: Quantization configure.
177
+ prefix: The name of the layer in the state dict, including all parents
178
+ (e.g. model.layers.0.qkv_proj)
179
+ """
180
+
181
+ def __init__(
182
+ self,
183
+ input_size: int,
184
+ output_size: int,
185
+ bias: bool = True,
186
+ skip_bias_add: bool = False,
187
+ params_dtype: Optional[torch.dtype] = None,
188
+ quant_config: Optional[QuantizationConfig] = None,
189
+ prefix: str = "",
190
+ ):
191
+ super().__init__(
192
+ input_size,
193
+ output_size,
194
+ skip_bias_add,
195
+ params_dtype,
196
+ quant_config,
197
+ prefix=prefix,
198
+ )
199
+
200
+ # All the linear layer supports quant method.
201
+ assert self.quant_method is not None
202
+ self.quant_method.create_weights(
203
+ self,
204
+ self.input_size,
205
+ [self.output_size],
206
+ self.input_size,
207
+ self.output_size,
208
+ self.params_dtype,
209
+ weight_loader=self.weight_loader,
210
+ prefix=prefix,
211
+ )
212
+
213
+ if bias:
214
+ self.bias = Parameter(
215
+ torch.empty(self.output_size, dtype=self.params_dtype)
216
+ )
217
+ set_weight_attrs(
218
+ self.bias,
219
+ {
220
+ "output_dim": 0,
221
+ "weight_loader": self.weight_loader,
222
+ },
223
+ )
224
+ else:
225
+ self.register_parameter("bias", None)
226
+
227
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
228
+ # If the weight on disk does not have a shape, give it one
229
+ # (such scales for AutoFp8).
230
+ if len(loaded_weight.shape) == 0:
231
+ loaded_weight = loaded_weight.reshape(1)
232
+
233
+ assert param.size() == loaded_weight.size()
234
+ param.data.copy_(loaded_weight)
235
+
236
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
237
+ bias = self.bias if not self.skip_bias_add else None
238
+ assert self.quant_method is not None
239
+ output = self.quant_method.apply(self, x, bias)
240
+ output_bias = self.bias if self.skip_bias_add else None
241
+ return output, output_bias
242
+
243
+ def extra_repr(self) -> str:
244
+ s = f"in_features={self.input_size}"
245
+ s += f", output_features={self.output_size}"
246
+ s += f", bias={self.bias is not None}"
247
+ return s
248
+
249
+
250
+ class ColumnParallelLinear(LinearBase):
251
+ """Linear layer with column parallelism.
252
+
253
+ The linear layer is defined as Y = XA + b. A is parallelized along
254
+ its second dimension as A = [A_1, ..., A_p].
255
+
256
+ Args:
257
+ input_size: first dimension of matrix A.
258
+ output_size: second dimension of matrix A.
259
+ bias: If true, add bias.
260
+ gather_output: If true, call all-gather on output and make Y available
261
+ to all GPUs, otherwise, every GPU will have its output
262
+ which is Y_i = XA_i
263
+ skip_bias_add: This was added to enable performance optimizations where
264
+ bias can be fused with other element-wise operations. we
265
+ skip adding bias but instead return it.
266
+ params_dtype: Data type for the parameters.
267
+ quant_config: Quantization configure.
268
+ output_sizes: list of output sizes packed into one output, like for QKV
269
+ the list would be size 3.
270
+ prefix: The name of the layer in the state dict, including all parents
271
+ (e.g. model.layers.0.qkv_proj)
272
+ """
273
+
274
+ def __init__(
275
+ self,
276
+ input_size: int,
277
+ output_size: int,
278
+ bias: bool = True,
279
+ gather_output: bool = False,
280
+ skip_bias_add: bool = False,
281
+ params_dtype: Optional[torch.dtype] = None,
282
+ quant_config: Optional[QuantizationConfig] = None,
283
+ output_sizes: Optional[List[int]] = None,
284
+ prefix: str = "",
285
+ ):
286
+ super().__init__(
287
+ input_size, output_size, skip_bias_add, params_dtype, quant_config, prefix
288
+ )
289
+
290
+ self.gather_output = gather_output
291
+
292
+ # Divide the weight matrix along the last dimension.
293
+ tp_size = get_tensor_model_parallel_world_size()
294
+ assert self.quant_method is not None
295
+ self.output_size_per_partition = divide(self.output_size, tp_size)
296
+ self.output_partition_sizes = [self.output_size_per_partition]
297
+ # If QKV or MergedColumn, use output size of each partition.
298
+ if hasattr(self, "output_sizes"):
299
+ self.output_partition_sizes = [
300
+ divide(output_size, tp_size) for output_size in self.output_sizes
301
+ ]
302
+
303
+ if output_sizes is None:
304
+ output_sizes = [output_size]
305
+
306
+ self.quant_method.create_weights(
307
+ layer=self,
308
+ input_size_per_partition=self.input_size,
309
+ output_partition_sizes=self.output_partition_sizes,
310
+ input_size=self.input_size,
311
+ output_size=self.output_size,
312
+ params_dtype=self.params_dtype,
313
+ weight_loader=(
314
+ self.weight_loader_v2
315
+ if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED
316
+ else self.weight_loader
317
+ ),
318
+ prefix=prefix,
319
+ )
320
+ if bias:
321
+ self.bias = Parameter(
322
+ torch.empty(self.output_size_per_partition, dtype=params_dtype)
323
+ )
324
+ set_weight_attrs(
325
+ self.bias,
326
+ {
327
+ "output_dim": 0,
328
+ "weight_loader": self.weight_loader,
329
+ },
330
+ )
331
+ else:
332
+ self.register_parameter("bias", None)
333
+
334
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
335
+ tp_rank = get_tensor_model_parallel_rank()
336
+ output_dim = getattr(param, "output_dim", None)
337
+
338
+ # Special case for GGUF
339
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
340
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
341
+ if is_gguf_weight_type:
342
+ param.weight_type = loaded_weight.item()
343
+
344
+ # Materialize GGUF UninitializedParameter
345
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
346
+ param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)
347
+
348
+ param_data = param.data
349
+ if output_dim is not None:
350
+ shard_size = param_data.shape[output_dim]
351
+ start_idx = tp_rank * shard_size
352
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
353
+
354
+ # Special case for loading scales off disk, which often do not
355
+ # have a shape (such as in the case of AutoFP8).
356
+ if len(loaded_weight.shape) == 0:
357
+ loaded_weight = loaded_weight.reshape(1)
358
+
359
+ assert param_data.shape == loaded_weight.shape
360
+ param_data.copy_(loaded_weight)
361
+
362
+ def weight_loader_v2(self, param: Parameter, loaded_weight: torch.Tensor):
363
+ # Special case for loading scales off disk, which often do not
364
+ # have a shape (such as in the case of AutoFP8).
365
+ if len(loaded_weight.shape) == 0:
366
+ assert loaded_weight.numel() == 1
367
+ loaded_weight = loaded_weight.reshape(1)
368
+ param.load_column_parallel_weight(loaded_weight=loaded_weight)
369
+
370
+ def forward(self, input_):
371
+ bias = self.bias if not self.skip_bias_add else None
372
+
373
+ # Matrix multiply.
374
+ assert self.quant_method is not None
375
+ output_parallel = self.quant_method.apply(self, input_, bias)
376
+ if self.gather_output:
377
+ # All-gather across the partitions.
378
+ output = tensor_model_parallel_all_gather(output_parallel)
379
+ else:
380
+ output = output_parallel
381
+ output_bias = self.bias if self.skip_bias_add else None
382
+ return output, output_bias
383
+
384
+ def extra_repr(self) -> str:
385
+ s = f"in_features={self.input_size}"
386
+ s += f", output_features={self.output_size_per_partition}"
387
+ s += f", bias={self.bias is not None}"
388
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
389
+ s += f", gather_output={self.gather_output}"
390
+ return s
391
+
392
+
393
+ class MergedColumnParallelLinear(ColumnParallelLinear):
394
+ """Packed linear layers with column parallelism.
395
+
396
+ Similar to ColumnParallelLinear, but the weight matrix is concatenated
397
+ along the output dimension. When the weight matrix is loaded, the
398
+ different partitions are sharded separately.
399
+
400
+ Args:
401
+ input_size: input dimension of the linear layer.
402
+ output_sizes: list of output dimensions of the linear layer.
403
+ bias: If true, add bias.
404
+ gather_output: If true, call all-gather on output and make the output
405
+ available to all GPUs, otherwise, every GPU will have
406
+ its own output.
407
+ skip_bias_add: This was added to enable performance optimizations where
408
+ bias can be fused with other element-wise operations. we
409
+ skip adding bias but instead return it.
410
+ params_dtype: Data type for the parameters.
411
+ quant_config: Quantization configure.
412
+ prefix: The name of the layer in the state dict, including all parents
413
+ (e.g. model.layers.0.qkv_proj)
414
+ """
415
+
416
+ def __init__(
417
+ self,
418
+ input_size: int,
419
+ output_sizes: List[int],
420
+ bias: bool = True,
421
+ gather_output: bool = False,
422
+ skip_bias_add: bool = False,
423
+ params_dtype: Optional[torch.dtype] = None,
424
+ quant_config: Optional[QuantizationConfig] = None,
425
+ prefix: str = "",
426
+ ):
427
+ self.output_sizes = output_sizes
428
+ tp_size = get_tensor_model_parallel_world_size()
429
+ assert all(output_size % tp_size == 0 for output_size in output_sizes)
430
+ super().__init__(
431
+ input_size=input_size,
432
+ output_size=sum(output_sizes),
433
+ bias=bias,
434
+ gather_output=gather_output,
435
+ skip_bias_add=skip_bias_add,
436
+ params_dtype=params_dtype,
437
+ quant_config=quant_config,
438
+ prefix=prefix,
439
+ )
440
+
441
+ def weight_loader(
442
+ self,
443
+ param: Parameter,
444
+ loaded_weight: torch.Tensor,
445
+ loaded_shard_id: Optional[int] = None,
446
+ ):
447
+
448
+ # Special case for GGUF
449
+ # initialize GGUF param after we know the quantize type
450
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
451
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
452
+ if is_gguf_weight_type:
453
+ param.data[loaded_shard_id].copy_(loaded_weight)
454
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
455
+ return
456
+
457
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
458
+ from gguf.constants import GGML_QUANT_SIZES
459
+
460
+ ori_shape = param.tensor_shape
461
+ weight_types = self.qweight_type.shard_weight_type.values()
462
+ row_size = []
463
+ for weight_type in weight_types:
464
+ block_size, type_size = GGML_QUANT_SIZES[weight_type]
465
+ row_size.append(ori_shape[1] // block_size * type_size)
466
+ q_shape = (ori_shape[0], max(row_size))
467
+ param.materialize(q_shape, dtype=loaded_weight.dtype)
468
+
469
+ param_data = param.data
470
+ output_dim = getattr(param, "output_dim", None)
471
+ # Special case for AQLM codebooks.
472
+ is_metadata = getattr(param, "is_metadata", False)
473
+ # Special case for per-tensor scale to load scalar into fused array.
474
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
475
+
476
+ if loaded_shard_id is None:
477
+ # Loaded weight is already fused on disk (qkv/mlp).
478
+ if output_dim is None:
479
+ if needs_scalar_to_array:
480
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
481
+ param_data, loaded_weight, 0
482
+ )
483
+
484
+ assert param_data.shape == loaded_weight.shape
485
+ param_data.copy_(loaded_weight)
486
+ return
487
+ current_shard_offset = 0
488
+ shard_offsets: List[Tuple[int, int, int]] = []
489
+ for i, output_size in enumerate(self.output_sizes):
490
+ shard_offsets.append((i, current_shard_offset, output_size))
491
+ current_shard_offset += output_size
492
+ packed_dim = getattr(param, "packed_dim", None)
493
+ for shard_id, shard_offset, shard_size in shard_offsets:
494
+ # Special case for Quantization.
495
+ # If quantized, we need to adjust the offset and size to account
496
+ # for the packing.
497
+ if packed_dim == output_dim:
498
+ shard_size = shard_size // param.pack_factor
499
+ shard_offset = shard_offset // param.pack_factor
500
+ # Special case for Marlin.
501
+ shard_size, shard_offset = adjust_marlin_shard(
502
+ param, shard_size, shard_offset
503
+ )
504
+
505
+ loaded_weight_shard = loaded_weight.narrow(
506
+ output_dim, shard_offset, shard_size
507
+ )
508
+ self.weight_loader(param, loaded_weight_shard, shard_id)
509
+ return
510
+
511
+ assert loaded_shard_id < len(self.output_sizes)
512
+ tp_rank = get_tensor_model_parallel_rank()
513
+ tp_size = get_tensor_model_parallel_world_size()
514
+ if output_dim is not None:
515
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
516
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
517
+ # Special case for quantization.
518
+ # If quantized, we need to adjust the offset and size to account
519
+ # for the packing.
520
+ packed_dim = getattr(param, "packed_dim", None)
521
+ if packed_dim == output_dim:
522
+ shard_size = shard_size // param.pack_factor
523
+ shard_offset = shard_offset // param.pack_factor
524
+ # Special case for Marlin.
525
+ shard_size, shard_offset = adjust_marlin_shard(
526
+ param, shard_size, shard_offset
527
+ )
528
+
529
+ use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
530
+ if use_bitsandbytes:
531
+ shard_size = loaded_weight.shape[output_dim]
532
+ shard_offset = loaded_weight.shape[output_dim] * loaded_shard_id
533
+
534
+ if is_gguf_weight:
535
+ tp_size = get_tensor_model_parallel_world_size()
536
+ output_dim = getattr(param, "output_dim", None)
537
+ shard_shape = list(loaded_weight.shape)
538
+ shard_shape[output_dim] = shard_shape[output_dim] // tp_size
539
+ param.shard_id.append(loaded_shard_id)
540
+ param.shard_size[loaded_shard_id] = shard_shape
541
+
542
+ input_dim = getattr(param, "input_dim", None)
543
+ input_size = loaded_weight.shape[input_dim]
544
+ param_data = param_data.narrow(input_dim, 0, input_size)
545
+
546
+ param_data = param_data.narrow(output_dim, shard_offset, shard_size)
547
+ start_idx = tp_rank * shard_size
548
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
549
+ # Special case for AQLM codebooks.
550
+ elif is_metadata:
551
+ # metadata indicates fixed size concatenated along dim 0
552
+ shard_size = loaded_weight.shape[0]
553
+ shard_offset = loaded_shard_id * shard_size
554
+ param_data = param_data.narrow(0, shard_offset, shard_size)
555
+
556
+ # Special case for per-tensor scales in fused case.
557
+ elif needs_scalar_to_array:
558
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
559
+ param_data, loaded_weight, loaded_shard_id
560
+ )
561
+
562
+ else:
563
+ ignore_warning = getattr(param, "ignore_warning", False)
564
+ if not ignore_warning:
565
+ logger.warning(
566
+ "Loading a weight without `output_dim` attribute in "
567
+ "MergedColumnParallelLinear, assume the weight is "
568
+ "the same for all partitions."
569
+ )
570
+
571
+ assert param_data.shape == loaded_weight.shape
572
+ param_data.copy_(loaded_weight)
573
+
574
+ def _load_fused_module_from_checkpoint(
575
+ self, param: BasevLLMParameter, loaded_weight: torch.Tensor
576
+ ):
577
+ """
578
+ Handle special case for models where MLP layers are already
579
+ fused on disk. In this case, we have no shard id. This function
580
+ determmines the shard id by splitting these layers and then calls
581
+ the weight loader using the shard id.
582
+
583
+ An example of a model with these fused layers:
584
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
585
+ """
586
+
587
+ current_shard_offset = 0
588
+ shard_offsets: List[Tuple[int, int, int]] = []
589
+ for i, output_size in enumerate(self.output_sizes):
590
+ shard_offsets.append((i, current_shard_offset, output_size))
591
+ current_shard_offset += output_size
592
+
593
+ for shard_id, shard_offset, shard_size in shard_offsets:
594
+ # Special case for Quantization.
595
+ # If quantized, we need to adjust the offset and size to account
596
+ # for the packing.
597
+ if (
598
+ isinstance(param, PackedvLLMParameter)
599
+ and param.packed_dim == param.output_dim
600
+ ):
601
+ shard_size, shard_offset = param.adjust_shard_indexes_for_packing(
602
+ shard_size=shard_size, shard_offset=shard_offset
603
+ )
604
+
605
+ loaded_weight_shard = loaded_weight.narrow(
606
+ param.output_dim, shard_offset, shard_size
607
+ )
608
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
609
+
610
+ def weight_loader_v2(
611
+ self,
612
+ param: BasevLLMParameter,
613
+ loaded_weight: torch.Tensor,
614
+ loaded_shard_id: Optional[int] = None,
615
+ ):
616
+ if loaded_shard_id is None:
617
+ if isinstance(param, PerTensorScaleParameter):
618
+ param.load_merged_column_weight(loaded_weight=loaded_weight, shard_id=0)
619
+ return
620
+ elif type(param) is BasevLLMParameter:
621
+ param.load_merged_column_weight(loaded_weight=loaded_weight)
622
+ return
623
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
624
+ return
625
+
626
+ assert loaded_shard_id < len(self.output_sizes)
627
+
628
+ tp_size = get_tensor_model_parallel_world_size()
629
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
630
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
631
+
632
+ param.load_merged_column_weight(
633
+ loaded_weight=loaded_weight,
634
+ shard_id=loaded_shard_id,
635
+ shard_offset=shard_offset,
636
+ shard_size=shard_size,
637
+ )
638
+
639
+
640
+ class QKVParallelLinear(ColumnParallelLinear):
641
+ """Linear layers for the attention's QKV transformation.
642
+
643
+ Linear layers for the linear transformation of the query, key, and value
644
+ vectors in the attention layer. The weight matrix is concatenated along
645
+ the output dimension. The layer is parallelized along the head dimension.
646
+ When the number of key/value heads is smaller than the number of query
647
+ heads (e.g., multi-query/grouped-query attention), the key/value head may
648
+ be replicated while the query heads are partitioned.
649
+
650
+ Args:
651
+ hidden_size: input hidden state size of the transformer.
652
+ head_size: size of each attention head.
653
+ total_num_heads: total number of attention query heads.
654
+ total_num_kv_heads: total number of attention key/value heads. If
655
+ None, assume total_num_kv_heads = total_num_heads.
656
+ bias: If true, add bias.
657
+ skip_bias_add: This was added to enable performance optimizations where
658
+ bias can be fused with other element-wise operations. we
659
+ skip adding bias but instead return it.
660
+ params_dtype: Data type for the parameters.
661
+ quant_config: Quantization configure.
662
+ prefix: The name of the layer in the state dict, including all parents
663
+ (e.g. model.layers.0.qkv_proj)
664
+ """
665
+
666
+ def __init__(
667
+ self,
668
+ hidden_size: int,
669
+ head_size: int,
670
+ total_num_heads: int,
671
+ total_num_kv_heads: Optional[int] = None,
672
+ bias: bool = True,
673
+ skip_bias_add: bool = False,
674
+ params_dtype: Optional[torch.dtype] = None,
675
+ quant_config: Optional[QuantizationConfig] = None,
676
+ prefix: str = "",
677
+ ):
678
+ self.hidden_size = hidden_size
679
+ self.head_size = head_size
680
+ self.total_num_heads = total_num_heads
681
+ if total_num_kv_heads is None:
682
+ total_num_kv_heads = total_num_heads
683
+ self.total_num_kv_heads = total_num_kv_heads
684
+ # Divide the weight matrix along the last dimension.
685
+ tp_size = get_tensor_model_parallel_world_size()
686
+ self.num_heads = divide(self.total_num_heads, tp_size)
687
+ if tp_size >= self.total_num_kv_heads:
688
+ self.num_kv_heads = 1
689
+ self.num_kv_head_replicas = divide(tp_size, self.total_num_kv_heads)
690
+ else:
691
+ self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
692
+ self.num_kv_head_replicas = 1
693
+ input_size = self.hidden_size
694
+ output_size = (
695
+ (self.num_heads + 2 * self.num_kv_heads) * tp_size * self.head_size
696
+ )
697
+ self.output_sizes = [
698
+ self.num_heads * self.head_size * tp_size, # q_proj
699
+ self.num_kv_heads * self.head_size * tp_size, # k_proj
700
+ self.num_kv_heads * self.head_size * tp_size, # v_proj
701
+ ]
702
+
703
+ super().__init__(
704
+ input_size=input_size,
705
+ output_size=output_size,
706
+ bias=bias,
707
+ gather_output=False,
708
+ skip_bias_add=skip_bias_add,
709
+ params_dtype=params_dtype,
710
+ quant_config=quant_config,
711
+ prefix=prefix,
712
+ )
713
+
714
+ def _get_shard_offset_mapping(self, loaded_shard_id: str):
715
+ shard_offset_mapping = {
716
+ "q": 0,
717
+ "k": self.num_heads * self.head_size,
718
+ "v": (self.num_heads + self.num_kv_heads) * self.head_size,
719
+ "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size,
720
+ }
721
+ return shard_offset_mapping.get(loaded_shard_id)
722
+
723
+ def _get_shard_size_mapping(self, loaded_shard_id: str):
724
+ shard_size_mapping = {
725
+ "q": self.num_heads * self.head_size,
726
+ "k": self.num_kv_heads * self.head_size,
727
+ "v": self.num_kv_heads * self.head_size,
728
+ }
729
+ return shard_size_mapping.get(loaded_shard_id)
730
+
731
+ def _load_fused_module_from_checkpoint(
732
+ self, param: BasevLLMParameter, loaded_weight: torch.Tensor
733
+ ):
734
+ """
735
+ Handle special case for models where QKV layers are already
736
+ fused on disk. In this case, we have no shard id. This function
737
+ determmines the shard id by splitting these layers and then calls
738
+ the weight loader using the shard id.
739
+
740
+ An example of a model with these fused layers:
741
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
742
+ """
743
+ shard_offsets = [
744
+ # (shard_id, shard_offset, shard_size)
745
+ ("q", 0, self.total_num_heads * self.head_size),
746
+ (
747
+ "k",
748
+ self.total_num_heads * self.head_size,
749
+ self.total_num_kv_heads * self.head_size,
750
+ ),
751
+ (
752
+ "v",
753
+ (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
754
+ self.total_num_kv_heads * self.head_size,
755
+ ),
756
+ ]
757
+
758
+ for shard_id, shard_offset, shard_size in shard_offsets:
759
+ # Special case for Quantization.
760
+ # If quantized, we need to adjust the offset and size to account
761
+ # for the packing.
762
+ if (
763
+ isinstance(param, PackedvLLMParameter)
764
+ and param.packed_dim == param.output_dim
765
+ ):
766
+ shard_size, shard_offset = param.adjust_shard_indexes_for_packing(
767
+ shard_size=shard_size, shard_offset=shard_offset
768
+ )
769
+
770
+ loaded_weight_shard = loaded_weight.narrow(
771
+ param.output_dim, shard_offset, shard_size
772
+ )
773
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
774
+
775
+ def weight_loader_v2(
776
+ self,
777
+ param: BasevLLMParameter,
778
+ loaded_weight: torch.Tensor,
779
+ loaded_shard_id: Optional[str] = None,
780
+ ):
781
+ if loaded_shard_id is None: # special case for certain models
782
+ if isinstance(param, PerTensorScaleParameter):
783
+ param.load_merged_column_weight(loaded_weight=loaded_weight, shard_id=0)
784
+ return
785
+ elif type(param) is BasevLLMParameter:
786
+ param.load_merged_column_weight(loaded_weight=loaded_weight)
787
+ return
788
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
789
+ return
790
+
791
+ assert loaded_shard_id in ["q", "k", "v"]
792
+
793
+ shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
794
+ shard_size = self._get_shard_size_mapping(loaded_shard_id)
795
+
796
+ param.load_qkv_weight(
797
+ loaded_weight=loaded_weight,
798
+ num_heads=self.num_kv_head_replicas,
799
+ shard_id=loaded_shard_id,
800
+ shard_offset=shard_offset,
801
+ shard_size=shard_size,
802
+ )
803
+
804
+ def weight_loader(
805
+ self,
806
+ param: Parameter,
807
+ loaded_weight: torch.Tensor,
808
+ loaded_shard_id: Optional[str] = None,
809
+ ):
810
+
811
+ # Special case for GGUF
812
+ # initialize GGUF param after we know the quantize type
813
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
814
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
815
+ if is_gguf_weight_type and loaded_shard_id is not None:
816
+ idx_map = {"q": 0, "k": 1, "v": 2}
817
+ param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
818
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
819
+ return
820
+
821
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
822
+ from gguf.constants import GGML_QUANT_SIZES
823
+
824
+ ori_shape = param.tensor_shape
825
+ weight_types = self.qweight_type.shard_weight_type.values()
826
+ row_size = []
827
+ for weight_type in weight_types:
828
+ block_size, type_size = GGML_QUANT_SIZES[weight_type]
829
+ row_size.append(ori_shape[1] // block_size * type_size)
830
+ q_shape = (ori_shape[0], max(row_size))
831
+ param.materialize(q_shape, dtype=loaded_weight.dtype)
832
+
833
+ param_data = param.data
834
+ output_dim = getattr(param, "output_dim", None)
835
+ # Special case for AQLM codebooks.
836
+ is_metadata = getattr(param, "is_metadata", False)
837
+
838
+ # Special case for per-tensor scales in fused case.
839
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
840
+
841
+ if loaded_shard_id is None:
842
+ # Loaded weight is already fused on disk (qkv/mlp).
843
+ if output_dim is None:
844
+ if needs_scalar_to_array:
845
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
846
+ param_data, loaded_weight, 0
847
+ )
848
+
849
+ assert param_data.shape == loaded_weight.shape
850
+ param_data.copy_(loaded_weight)
851
+ return
852
+ shard_offsets = [
853
+ # (shard_id, shard_offset, shard_size)
854
+ ("q", 0, self.total_num_heads * self.head_size),
855
+ (
856
+ "k",
857
+ self.total_num_heads * self.head_size,
858
+ self.total_num_kv_heads * self.head_size,
859
+ ),
860
+ (
861
+ "v",
862
+ (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
863
+ self.total_num_kv_heads * self.head_size,
864
+ ),
865
+ ]
866
+ packed_dim = getattr(param, "packed_dim", None)
867
+ for shard_id, shard_offset, shard_size in shard_offsets:
868
+ # Special case for Quantized Weights.
869
+ # If quantized, we need to adjust the offset and size to account
870
+ # for the packing.
871
+ if packed_dim == output_dim:
872
+ shard_size = shard_size // param.pack_factor
873
+ shard_offset = shard_offset // param.pack_factor
874
+
875
+ # Special case for Marlin.
876
+ shard_size, shard_offset = adjust_marlin_shard(
877
+ param, shard_size, shard_offset
878
+ )
879
+
880
+ loaded_weight_shard = loaded_weight.narrow(
881
+ output_dim, shard_offset, shard_size
882
+ )
883
+ self.weight_loader(param, loaded_weight_shard, shard_id)
884
+ return
885
+
886
+ tp_rank = get_tensor_model_parallel_rank()
887
+ assert loaded_shard_id in ["q", "k", "v"]
888
+
889
+ # If output dim is defined, use the default loading process.
890
+ if output_dim is not None:
891
+ if loaded_shard_id == "q":
892
+ shard_offset = 0
893
+ shard_size = self.num_heads * self.head_size
894
+ elif loaded_shard_id == "k":
895
+ shard_offset = self.num_heads * self.head_size
896
+ shard_size = self.num_kv_heads * self.head_size
897
+ elif loaded_shard_id == "v":
898
+ shard_offset = (self.num_heads + self.num_kv_heads) * self.head_size
899
+ shard_size = self.num_kv_heads * self.head_size
900
+ # Special case for Quantized Weights.
901
+ # If quantized, we need to adjust the offset and size to account
902
+ # for the packing.
903
+ packed_dim = getattr(param, "packed_dim", None)
904
+ if packed_dim == output_dim:
905
+ shard_size = shard_size // param.pack_factor
906
+ shard_offset = shard_offset // param.pack_factor
907
+
908
+ # Special case for Marlin.
909
+ shard_size, shard_offset = adjust_marlin_shard(
910
+ param, shard_size, shard_offset
911
+ )
912
+
913
+ use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
914
+ if use_bitsandbytes:
915
+ orig_qkv_offsets = {
916
+ "q": (0, self.num_heads * self.head_size),
917
+ "k": (
918
+ self.num_heads * self.head_size,
919
+ self.num_kv_heads * self.head_size,
920
+ ),
921
+ "v": (
922
+ (self.num_heads + self.num_kv_heads) * self.head_size,
923
+ self.num_kv_heads * self.head_size,
924
+ ),
925
+ "total": (
926
+ (self.num_heads + 2 * self.num_kv_heads) * self.head_size,
927
+ 0,
928
+ ),
929
+ }
930
+ shard_size, shard_offset = adjust_bitsandbytes_shard(
931
+ param, orig_qkv_offsets, loaded_shard_id
932
+ )
933
+
934
+ if is_gguf_weight:
935
+ tp_size = get_tensor_model_parallel_world_size()
936
+ output_dim = getattr(param, "output_dim", None)
937
+ shard_shape = list(loaded_weight.shape)
938
+ shard_shape[output_dim] = shard_shape[output_dim] // tp_size
939
+ param.shard_id.append(loaded_shard_id)
940
+ param.shard_size[loaded_shard_id] = shard_shape
941
+
942
+ input_dim = getattr(param, "input_dim", None)
943
+ input_size = loaded_weight.shape[input_dim]
944
+ param_data = param_data.narrow(input_dim, 0, input_size)
945
+
946
+ param_data = param_data.narrow(output_dim, shard_offset, shard_size)
947
+ if loaded_shard_id == "q":
948
+ shard_id = tp_rank
949
+ else:
950
+ shard_id = tp_rank // self.num_kv_head_replicas
951
+ start_idx = shard_id * shard_size
952
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
953
+ # Special case for for AQLM codebooks.
954
+ elif is_metadata:
955
+ # metadata indicates fixed size concatenated along dim 0
956
+ shard_size = loaded_weight.shape[0]
957
+ shard_index = ["q", "k", "v"].index(loaded_shard_id)
958
+ param_data = param_data.narrow(0, shard_index * shard_size, shard_size)
959
+ # Special case for per-tensor scales in fused case.
960
+ elif needs_scalar_to_array:
961
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
962
+ param_data, loaded_weight, loaded_shard_id
963
+ )
964
+ else:
965
+ ignore_warning = getattr(param, "ignore_warning", False)
966
+ if not ignore_warning:
967
+ logger.warning(
968
+ "Loading a weight without `output_dim` attribute in "
969
+ "QKVParallelLinear, assume the weight is the same "
970
+ "for all partitions."
971
+ )
972
+
973
+ assert param_data.shape == loaded_weight.shape
974
+ param_data.copy_(loaded_weight)
975
+
976
+
977
+ class RowParallelLinear(LinearBase):
978
+ """Linear layer with row parallelism.
979
+
980
+ The linear layer is defined as Y = XA + b. A is parallelized along
981
+ its first dimension and X along its second dimension as:
982
+ - -
983
+ | A_1 |
984
+ | . |
985
+ A = | . | X = [X_1, ..., X_p]
986
+ | . |
987
+ | A_p |
988
+ - -
989
+ Arguments:
990
+ input_size: first dimension of matrix A.
991
+ output_size: second dimension of matrix A.
992
+ bias: If true, add bias. Note that bias is not parallelized.
993
+ input_is_parallel: If true, we assume that the input is already
994
+ split across the GPUs and we do not split
995
+ again.
996
+ skip_bias_add: This was added to enable performance optimization where
997
+ bias can be fused with other element-wise operations.
998
+ We skip adding bias but instead return it.
999
+ params_dtype: Data type for the parameters.
1000
+ quant_config: Quantization configure.
1001
+ """
1002
+
1003
+ def __init__(
1004
+ self,
1005
+ input_size: int,
1006
+ output_size: int,
1007
+ bias: bool = True,
1008
+ input_is_parallel: bool = True,
1009
+ skip_bias_add: bool = False,
1010
+ params_dtype: Optional[torch.dtype] = None,
1011
+ reduce_results: bool = True,
1012
+ quant_config: Optional[QuantizationConfig] = None,
1013
+ prefix: str = "",
1014
+ ):
1015
+ super().__init__(
1016
+ input_size, output_size, skip_bias_add, params_dtype, quant_config, prefix
1017
+ )
1018
+
1019
+ self.input_is_parallel = input_is_parallel
1020
+ self.reduce_results = reduce_results
1021
+
1022
+ # Divide the weight matrix along the last dimension.
1023
+ self.tp_rank = get_tensor_model_parallel_rank()
1024
+ self.tp_size = get_tensor_model_parallel_world_size()
1025
+ self.input_size_per_partition = divide(input_size, self.tp_size)
1026
+ assert self.quant_method is not None
1027
+
1028
+ self.quant_method.create_weights(
1029
+ layer=self,
1030
+ input_size_per_partition=self.input_size_per_partition,
1031
+ output_partition_sizes=[self.output_size],
1032
+ input_size=self.input_size,
1033
+ output_size=self.output_size,
1034
+ params_dtype=self.params_dtype,
1035
+ weight_loader=(
1036
+ self.weight_loader_v2
1037
+ if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED
1038
+ else self.weight_loader
1039
+ ),
1040
+ prefix=prefix,
1041
+ )
1042
+ if not reduce_results and (bias and not skip_bias_add):
1043
+ raise ValueError(
1044
+ "When not reduce the results, adding bias to the "
1045
+ "results can lead to incorrect results"
1046
+ )
1047
+
1048
+ if bias:
1049
+ self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
1050
+ set_weight_attrs(
1051
+ self.bias,
1052
+ {
1053
+ "output_dim": 0,
1054
+ "weight_loader": self.weight_loader,
1055
+ },
1056
+ )
1057
+ else:
1058
+ self.register_parameter("bias", None)
1059
+
1060
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
1061
+ tp_rank = get_tensor_model_parallel_rank()
1062
+ tp_size = get_tensor_model_parallel_world_size()
1063
+ input_dim = getattr(param, "input_dim", None)
1064
+
1065
+ # Special case for GGUF
1066
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
1067
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
1068
+ if is_gguf_weight_type:
1069
+ param.weight_type = loaded_weight.item()
1070
+
1071
+ # Materialize GGUF UninitializedParameter
1072
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
1073
+ weight_shape = list(loaded_weight.shape)
1074
+ if input_dim:
1075
+ weight_shape[input_dim] = weight_shape[input_dim] // tp_size
1076
+ param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)
1077
+
1078
+ param_data = param.data
1079
+ if input_dim is not None:
1080
+ shard_size = param_data.shape[input_dim]
1081
+ start_idx = tp_rank * shard_size
1082
+ loaded_weight = loaded_weight.narrow(input_dim, start_idx, shard_size)
1083
+
1084
+ # Special case for loading scales off disk, which often do not
1085
+ # have a shape (such as in the case of AutoFP8).
1086
+ if len(loaded_weight.shape) == 0:
1087
+ loaded_weight = loaded_weight.reshape(1)
1088
+
1089
+ assert param_data.shape == loaded_weight.shape
1090
+ param_data.copy_(loaded_weight)
1091
+
1092
+ def weight_loader_v2(self, param: BasevLLMParameter, loaded_weight: torch.Tensor):
1093
+
1094
+ # Special case for loading scales off disk, which often do not
1095
+ # have a shape (such as in the case of AutoFP8).
1096
+ if len(loaded_weight.shape) == 0:
1097
+ assert loaded_weight.numel() == 1
1098
+ loaded_weight = loaded_weight.reshape(1)
1099
+
1100
+ param.load_row_parallel_weight(loaded_weight=loaded_weight)
1101
+
1102
+ def forward(self, input_):
1103
+ if self.input_is_parallel:
1104
+ input_parallel = input_
1105
+ else:
1106
+ tp_rank = get_tensor_model_parallel_rank()
1107
+ splitted_input = split_tensor_along_last_dim(
1108
+ input_, num_partitions=self.tp_size
1109
+ )
1110
+ input_parallel = splitted_input[tp_rank].contiguous()
1111
+
1112
+ # Matrix multiply.
1113
+ assert self.quant_method is not None
1114
+ # Only fuse bias add into GEMM for rank 0 (this ensures that
1115
+ # bias will not get added more than once in TP>1 case)
1116
+ bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
1117
+ output_parallel = self.quant_method.apply(self, input_parallel, bias=bias_)
1118
+ if self.reduce_results and self.tp_size > 1:
1119
+ output = tensor_model_parallel_all_reduce(output_parallel)
1120
+ else:
1121
+ output = output_parallel
1122
+
1123
+ output_bias = self.bias if self.skip_bias_add else None
1124
+
1125
+ return output, output_bias
1126
+
1127
+ def extra_repr(self) -> str:
1128
+ s = f"input_features={self.input_size_per_partition}"
1129
+ s += f", output_features={self.output_size}"
1130
+ s += f", bias={self.bias is not None}"
1131
+ s += f", tp_size={self.tp_size}"
1132
+ s += f", reduce_results={self.reduce_results}"
1133
+ return s