sglang 0.1.20__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. sglang/__init__.py +8 -8
  2. sglang/api.py +1 -1
  3. sglang/backend/runtime_endpoint.py +14 -4
  4. sglang/backend/vertexai.py +5 -4
  5. sglang/bench.py +627 -0
  6. sglang/bench_latency.py +22 -20
  7. sglang/bench_serving.py +758 -0
  8. sglang/check_env.py +171 -0
  9. sglang/global_config.py +3 -1
  10. sglang/lang/backend/__init__.py +0 -0
  11. sglang/lang/backend/anthropic.py +77 -0
  12. sglang/lang/backend/base_backend.py +80 -0
  13. sglang/lang/backend/litellm.py +90 -0
  14. sglang/lang/backend/openai.py +438 -0
  15. sglang/lang/backend/runtime_endpoint.py +283 -0
  16. sglang/lang/backend/vertexai.py +149 -0
  17. sglang/lang/chat_template.py +2 -2
  18. sglang/lang/ir.py +3 -3
  19. sglang/lang/tracer.py +1 -1
  20. sglang/launch_server.py +1 -1
  21. sglang/launch_server_llavavid.py +1 -4
  22. sglang/srt/conversation.py +1 -1
  23. sglang/srt/layers/context_flashattention_nopad.py +0 -29
  24. sglang/srt/layers/extend_attention.py +0 -39
  25. sglang/srt/layers/linear.py +869 -0
  26. sglang/srt/layers/quantization/__init__.py +49 -0
  27. sglang/srt/layers/quantization/fp8.py +662 -0
  28. sglang/srt/layers/radix_attention.py +31 -5
  29. sglang/srt/layers/token_attention.py +1 -51
  30. sglang/srt/managers/controller/cuda_graph_runner.py +44 -18
  31. sglang/srt/managers/controller/infer_batch.py +76 -72
  32. sglang/srt/managers/controller/manager_multi.py +109 -98
  33. sglang/srt/managers/controller/manager_single.py +105 -50
  34. sglang/srt/managers/controller/model_runner.py +42 -18
  35. sglang/srt/managers/controller/radix_cache.py +4 -3
  36. sglang/srt/managers/controller/schedule_heuristic.py +4 -0
  37. sglang/srt/managers/controller/tp_worker.py +143 -156
  38. sglang/srt/managers/detokenizer_manager.py +49 -5
  39. sglang/srt/managers/io_struct.py +36 -17
  40. sglang/srt/managers/tokenizer_manager.py +228 -125
  41. sglang/srt/memory_pool.py +46 -58
  42. sglang/srt/model_loader/model_loader.py +277 -0
  43. sglang/srt/model_loader/utils.py +260 -0
  44. sglang/srt/models/chatglm.py +1 -0
  45. sglang/srt/models/dbrx.py +1 -0
  46. sglang/srt/models/grok.py +1 -0
  47. sglang/srt/models/internlm2.py +317 -0
  48. sglang/srt/models/llama2.py +65 -16
  49. sglang/srt/models/llama_classification.py +1 -0
  50. sglang/srt/models/llava.py +1 -0
  51. sglang/srt/models/llavavid.py +1 -0
  52. sglang/srt/models/minicpm.py +2 -8
  53. sglang/srt/models/mixtral.py +1 -0
  54. sglang/srt/models/mixtral_quant.py +1 -0
  55. sglang/srt/models/qwen.py +1 -0
  56. sglang/srt/models/qwen2.py +6 -0
  57. sglang/srt/models/qwen2_moe.py +130 -108
  58. sglang/srt/models/stablelm.py +1 -0
  59. sglang/srt/openai_api/adapter.py +432 -0
  60. sglang/srt/openai_api/api_adapter.py +432 -0
  61. sglang/srt/openai_api/openai_api_adapter.py +431 -0
  62. sglang/srt/openai_api/openai_protocol.py +207 -0
  63. sglang/srt/openai_api/protocol.py +208 -0
  64. sglang/srt/openai_protocol.py +17 -0
  65. sglang/srt/sampling_params.py +2 -0
  66. sglang/srt/server.py +114 -90
  67. sglang/srt/server_args.py +27 -17
  68. sglang/srt/utils.py +17 -118
  69. sglang/test/test_conversation.py +1 -1
  70. sglang/test/test_openai_protocol.py +1 -1
  71. sglang/test/test_programs.py +1 -1
  72. sglang/test/test_utils.py +2 -2
  73. {sglang-0.1.20.dist-info → sglang-0.1.22.dist-info}/METADATA +157 -159
  74. sglang-0.1.22.dist-info/RECORD +103 -0
  75. {sglang-0.1.20.dist-info → sglang-0.1.22.dist-info}/WHEEL +1 -1
  76. sglang-0.1.20.dist-info/RECORD +0 -82
  77. {sglang-0.1.20.dist-info → sglang-0.1.22.dist-info}/LICENSE +0 -0
  78. {sglang-0.1.20.dist-info → sglang-0.1.22.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,869 @@
1
+ # temporarily adapted from https://github.com/vllm-project/vllm/blob/e76466dde2bc9525d55165ceaa600d298c7bf773/vllm/model_executor/layers/linear.py
2
+ # FIXME: refactor the linear abstraction
3
+ from abc import abstractmethod
4
+ from typing import Dict, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch.nn.parameter import Parameter
9
+ from vllm.distributed import (
10
+ divide,
11
+ get_tensor_model_parallel_rank,
12
+ get_tensor_model_parallel_world_size,
13
+ split_tensor_along_last_dim,
14
+ tensor_model_parallel_all_gather,
15
+ tensor_model_parallel_all_reduce,
16
+ )
17
+ from vllm.logger import init_logger
18
+ from vllm.model_executor.layers.quantization.base_config import (
19
+ QuantizationConfig,
20
+ QuantizeMethodBase,
21
+ )
22
+ from vllm.model_executor.utils import set_weight_attrs
23
+
24
+ logger = init_logger(__name__)
25
+
26
+
27
+ def adjust_marlin_shard(param, shard_size, shard_offset):
28
+ marlin_tile_size = getattr(param, "marlin_tile_size", None)
29
+ if marlin_tile_size is None:
30
+ return shard_size, shard_offset
31
+
32
+ return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
33
+
34
+
35
+ def adjust_bitsandbytes_shard(
36
+ param: Parameter, qkv_offsets: Dict[str, Tuple[int, int]], loaded_shard_id: str
37
+ ) -> Tuple[int, int]:
38
+ """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
39
+
40
+ total, _ = qkv_offsets["total"]
41
+ orig_offset, orig_size = qkv_offsets[loaded_shard_id]
42
+
43
+ quantized_total = param.data.shape[0]
44
+ quantized_offset = orig_offset * quantized_total // total
45
+ quantized_size = orig_size * quantized_total // total
46
+
47
+ return quantized_size, quantized_offset
48
+
49
+
50
+ def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
51
+ """For fused modules (QKV and MLP) we have an array of length
52
+ N that holds 1 scale for each "logical" matrix. So the param
53
+ is an array of length N. The loaded_weight corresponds to
54
+ one of the shards on disk. Here, we slice the param based on
55
+ the shard_id for loading.
56
+ """
57
+ qkv_idxs = {"q": 0, "k": 1, "v": 2}
58
+
59
+ if isinstance(shard_id, str):
60
+ shard_id = qkv_idxs[shard_id]
61
+ elif not isinstance(shard_id, int):
62
+ raise ValueError(f"Unknown Shard Id {shard_id}")
63
+
64
+ # AutoFP8 scales do not have a shape
65
+ # compressed-tensors scales do have a shape
66
+ if len(loaded_weight.shape) != 0:
67
+ assert loaded_weight.shape[0] == 1
68
+ loaded_weight = loaded_weight[0]
69
+
70
+ return param[shard_id], loaded_weight
71
+
72
+
73
+ class LinearMethodBase(QuantizeMethodBase):
74
+ """Base class for different (maybe quantized) linear methods."""
75
+
76
+ @abstractmethod
77
+ def create_weights(
78
+ self,
79
+ layer: torch.nn.Module,
80
+ input_size_per_partition: int,
81
+ output_partition_sizes: List[int],
82
+ input_size: int,
83
+ output_size: int,
84
+ params_dtype: torch.dtype,
85
+ **extra_weight_attrs,
86
+ ):
87
+ """Create weights for a linear layer.
88
+ The weights will be set as attributes of the layer.
89
+
90
+ Args:
91
+ layer: The layer that is using the LinearMethodBase factory.
92
+ input_size_per_partition: Size of the weight input dim on rank X.
93
+ output_partition_sizes: Sizes of the output dim of each logical
94
+ weight on rank X. E.g., output_partition_sizes for QKVLinear
95
+ is a list contains the width of Wq, Wk, Wv on rank X.
96
+ input_size: Size of the input dim of the weight across all ranks.
97
+ output_size: Size of the output dim of the weight across all ranks.
98
+ params_dtype: Datatype of the parameters.
99
+ """
100
+ raise NotImplementedError
101
+
102
+ @abstractmethod
103
+ def apply(
104
+ self,
105
+ layer: torch.nn.Module,
106
+ x: torch.Tensor,
107
+ bias: Optional[torch.Tensor] = None,
108
+ ) -> torch.Tensor:
109
+ """Apply the weights in layer to the input tensor.
110
+ Expects create_weights to have been called before on the layer."""
111
+ raise NotImplementedError
112
+
113
+
114
+ class UnquantizedLinearMethod(LinearMethodBase):
115
+ """Linear method without quantization.
116
+
117
+ Args:
118
+ separate_bias_add: If true, add bias separately after matrix
119
+ multiplication.
120
+ """
121
+
122
+ def __init__(self, separate_bias_add: bool = False):
123
+ self.separate_bias_add = separate_bias_add
124
+
125
+ def create_weights(
126
+ self,
127
+ layer: torch.nn.Module,
128
+ input_size_per_partition: int,
129
+ output_partition_sizes: List[int],
130
+ input_size: int,
131
+ output_size: int,
132
+ params_dtype: torch.dtype,
133
+ **extra_weight_attrs,
134
+ ):
135
+ weight = Parameter(
136
+ torch.empty(
137
+ sum(output_partition_sizes),
138
+ input_size_per_partition,
139
+ dtype=params_dtype,
140
+ ),
141
+ requires_grad=False,
142
+ )
143
+ set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
144
+ layer.register_parameter("weight", weight)
145
+ set_weight_attrs(weight, extra_weight_attrs)
146
+
147
+ def apply(
148
+ self,
149
+ layer: torch.nn.Module,
150
+ x: torch.Tensor,
151
+ bias: Optional[torch.Tensor] = None,
152
+ ) -> torch.Tensor:
153
+ weight = layer.weight
154
+ if self.separate_bias_add:
155
+ if bias is not None:
156
+ return F.linear(x, weight) + bias
157
+ return F.linear(x, weight)
158
+ return F.linear(x, weight, bias)
159
+
160
+
161
+ class LinearBase(torch.nn.Module):
162
+ """Base linear layer.
163
+
164
+ Args:
165
+ input_size: input dimension of the linear layer.
166
+ output_size: output dimension of the linear layer.
167
+ bias: If true, add bias.
168
+ skip_bias_add: If true, skip adding bias but instead return it.
169
+ params_dtype: Data type for the parameters.
170
+ quant_config: Quantization configure.
171
+ """
172
+
173
+ def __init__(
174
+ self,
175
+ input_size: int,
176
+ output_size: int,
177
+ skip_bias_add: bool = False,
178
+ params_dtype: Optional[torch.dtype] = None,
179
+ quant_config: Optional[QuantizationConfig] = None,
180
+ ):
181
+ super().__init__()
182
+
183
+ # Keep input parameters
184
+ self.input_size = input_size
185
+ self.output_size = output_size
186
+ self.skip_bias_add = skip_bias_add
187
+ if params_dtype is None:
188
+ params_dtype = torch.get_default_dtype()
189
+ self.params_dtype = params_dtype
190
+ if quant_config is None:
191
+ self.quant_method: Optional[QuantizeMethodBase] = UnquantizedLinearMethod()
192
+ else:
193
+ self.quant_method = quant_config.get_quant_method(self)
194
+
195
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
196
+ raise NotImplementedError
197
+
198
+
199
+ class ReplicatedLinear(LinearBase):
200
+ """Replicated linear layer.
201
+
202
+ Args:
203
+ input_size: input dimension of the linear layer.
204
+ output_size: output dimension of the linear layer.
205
+ bias: If true, add bias.
206
+ skip_bias_add: If true, skip adding bias but instead return it.
207
+ params_dtype: Data type for the parameters.
208
+ quant_config: Quantization configure.
209
+ """
210
+
211
+ def __init__(
212
+ self,
213
+ input_size: int,
214
+ output_size: int,
215
+ bias: bool = True,
216
+ skip_bias_add: bool = False,
217
+ params_dtype: Optional[torch.dtype] = None,
218
+ quant_config: Optional[QuantizationConfig] = None,
219
+ ):
220
+ super().__init__(
221
+ input_size, output_size, skip_bias_add, params_dtype, quant_config
222
+ )
223
+
224
+ # All the linear layer supports quant method.
225
+ assert self.quant_method is not None
226
+ self.quant_method.create_weights(
227
+ self,
228
+ self.input_size,
229
+ [self.output_size],
230
+ self.input_size,
231
+ self.output_size,
232
+ self.params_dtype,
233
+ )
234
+
235
+ if bias:
236
+ self.bias = Parameter(
237
+ torch.empty(self.output_size, dtype=self.params_dtype)
238
+ )
239
+ set_weight_attrs(self.bias, {"output_dim": 0})
240
+ else:
241
+ self.register_parameter("bias", None)
242
+
243
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
244
+ bias = self.bias if not self.skip_bias_add else None
245
+ assert self.quant_method is not None
246
+ output = self.quant_method.apply(self, x, bias)
247
+ output_bias = self.bias if self.skip_bias_add else None
248
+ return output, output_bias
249
+
250
+ def extra_repr(self) -> str:
251
+ s = f"in_features={self.input_size}"
252
+ s += f", output_features={self.output_size}"
253
+ s += f", bias={self.bias is not None}"
254
+ return s
255
+
256
+
257
+ class ColumnParallelLinear(LinearBase):
258
+ """Linear layer with column parallelism.
259
+
260
+ The linear layer is defined as Y = XA + b. A is parallelized along
261
+ its second dimension as A = [A_1, ..., A_p].
262
+
263
+ Args:
264
+ input_size: first dimension of matrix A.
265
+ output_size: second dimension of matrix A.
266
+ bias: If true, add bias.
267
+ gather_output: If true, call all-gather on output and make Y available
268
+ to all GPUs, otherwise, every GPU will have its output
269
+ which is Y_i = XA_i
270
+ skip_bias_add: This was added to enable performance optimizations where
271
+ bias can be fused with other element-wise operations. we
272
+ skip adding bias but instead return it.
273
+ params_dtype: Data type for the parameters.
274
+ quant_config: Quantization configure.
275
+ output_sizes: list of output sizes packed into one output, like for QKV
276
+ the list would be size 3.
277
+ """
278
+
279
+ def __init__(
280
+ self,
281
+ input_size: int,
282
+ output_size: int,
283
+ bias: bool = True,
284
+ gather_output: bool = False,
285
+ skip_bias_add: bool = False,
286
+ params_dtype: Optional[torch.dtype] = None,
287
+ quant_config: Optional[QuantizationConfig] = None,
288
+ output_sizes: Optional[List[int]] = None,
289
+ ):
290
+ super().__init__(
291
+ input_size, output_size, skip_bias_add, params_dtype, quant_config
292
+ )
293
+
294
+ self.gather_output = gather_output
295
+
296
+ # Divide the weight matrix along the last dimension.
297
+ tp_size = get_tensor_model_parallel_world_size()
298
+ assert self.quant_method is not None
299
+ self.output_size_per_partition = divide(self.output_size, tp_size)
300
+ self.output_partition_sizes = [self.output_size_per_partition]
301
+ # If QKV or MergedColumn, use output size of each partition.
302
+ if hasattr(self, "output_sizes"):
303
+ self.output_partition_sizes = [
304
+ divide(output_size, tp_size) for output_size in self.output_sizes
305
+ ]
306
+
307
+ if output_sizes is None:
308
+ output_sizes = [output_size]
309
+ self.quant_method.create_weights(
310
+ layer=self,
311
+ input_size_per_partition=self.input_size,
312
+ output_partition_sizes=self.output_partition_sizes,
313
+ input_size=self.input_size,
314
+ output_size=self.output_size,
315
+ params_dtype=self.params_dtype,
316
+ weight_loader=self.weight_loader,
317
+ )
318
+ if bias:
319
+ self.bias = Parameter(
320
+ torch.empty(self.output_size_per_partition, dtype=params_dtype)
321
+ )
322
+ set_weight_attrs(
323
+ self.bias,
324
+ {
325
+ "output_dim": 0,
326
+ "weight_loader": self.weight_loader,
327
+ },
328
+ )
329
+ else:
330
+ self.register_parameter("bias", None)
331
+
332
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
333
+ if param.data.dtype != loaded_weight.dtype:
334
+ param.data = torch.empty_like(
335
+ param.data, dtype=loaded_weight.dtype, device="cuda"
336
+ )
337
+
338
+ tp_rank = get_tensor_model_parallel_rank()
339
+ output_dim = getattr(param, "output_dim", None)
340
+ param_data = param.data
341
+ if output_dim is not None:
342
+ shard_size = param_data.shape[output_dim]
343
+ start_idx = tp_rank * shard_size
344
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
345
+
346
+ # Special case for loading scales off disk, which often do not
347
+ # have a shape (such as in the case of AutoFP8).
348
+ if len(loaded_weight.shape) == 0:
349
+ loaded_weight = loaded_weight.reshape(1)
350
+
351
+ assert param_data.shape == loaded_weight.shape
352
+ param_data.copy_(loaded_weight)
353
+
354
+ def forward(self, input_):
355
+ bias = self.bias if not self.skip_bias_add else None
356
+
357
+ # Matrix multiply.
358
+ assert self.quant_method is not None
359
+ output_parallel = self.quant_method.apply(self, input_, bias)
360
+ if self.gather_output:
361
+ # All-gather across the partitions.
362
+ output = tensor_model_parallel_all_gather(output_parallel)
363
+ else:
364
+ output = output_parallel
365
+ output_bias = self.bias if self.skip_bias_add else None
366
+ return output, output_bias
367
+
368
+ def extra_repr(self) -> str:
369
+ s = f"in_features={self.input_size}"
370
+ s += f", output_features={self.output_size_per_partition}"
371
+ s += f", bias={self.bias is not None}"
372
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
373
+ s += f", gather_output={self.gather_output}"
374
+ return s
375
+
376
+
377
+ class MergedColumnParallelLinear(ColumnParallelLinear):
378
+ """Packed linear layers with column parallelism.
379
+
380
+ Similar to ColumnParallelLinear, but the weight matrix is concatenated
381
+ along the output dimension. When the weight matrix is loaded, the
382
+ different partitions are sharded separately.
383
+
384
+ Args:
385
+ input_size: input dimension of the linear layer.
386
+ output_sizes: list of output dimensions of the linear layer.
387
+ bias: If true, add bias.
388
+ gather_output: If true, call all-gather on output and make the output
389
+ available to all GPUs, otherwise, every GPU will have
390
+ its own output.
391
+ skip_bias_add: This was added to enable performance optimizations where
392
+ bias can be fused with other element-wise operations. we
393
+ skip adding bias but instead return it.
394
+ params_dtype: Data type for the parameters.
395
+ quant_config: Quantization configure.
396
+ """
397
+
398
+ def __init__(
399
+ self,
400
+ input_size: int,
401
+ output_sizes: List[int],
402
+ bias: bool = True,
403
+ gather_output: bool = False,
404
+ skip_bias_add: bool = False,
405
+ params_dtype: Optional[torch.dtype] = None,
406
+ quant_config: Optional[QuantizationConfig] = None,
407
+ ):
408
+ self.output_sizes = output_sizes
409
+ tp_size = get_tensor_model_parallel_world_size()
410
+ assert all(output_size % tp_size == 0 for output_size in output_sizes)
411
+ super().__init__(
412
+ input_size=input_size,
413
+ output_size=sum(output_sizes),
414
+ bias=bias,
415
+ gather_output=gather_output,
416
+ skip_bias_add=skip_bias_add,
417
+ params_dtype=params_dtype,
418
+ quant_config=quant_config,
419
+ )
420
+
421
+ def weight_loader(
422
+ self,
423
+ param: Parameter,
424
+ loaded_weight: torch.Tensor,
425
+ loaded_shard_id: Optional[int] = None,
426
+ ):
427
+ if param.data.dtype != loaded_weight.dtype:
428
+ param.data = torch.empty_like(
429
+ param.data, dtype=loaded_weight.dtype, device="cuda"
430
+ )
431
+
432
+ param_data = param.data
433
+ output_dim = getattr(param, "output_dim", None)
434
+ # Special case for AQLM codebooks.
435
+ is_metadata = getattr(param, "is_metadata", False)
436
+ # Special case for per-tensor scale to load scalar into fused array.
437
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
438
+
439
+ if loaded_shard_id is None:
440
+ # Loaded weight is already fused on disk (qkv/mlp).
441
+ if output_dim is None:
442
+ if needs_scalar_to_array is not None:
443
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
444
+ param_data, loaded_weight, 0
445
+ )
446
+
447
+ assert param_data.shape == loaded_weight.shape
448
+ param_data.copy_(loaded_weight)
449
+ return
450
+ current_shard_offset = 0
451
+ shard_offsets: List[Tuple[int, int, int]] = []
452
+ for i, output_size in enumerate(self.output_sizes):
453
+ shard_offsets.append((i, current_shard_offset, output_size))
454
+ current_shard_offset += output_size
455
+ packed_dim = getattr(param, "packed_dim", None)
456
+ for shard_id, shard_offset, shard_size in shard_offsets:
457
+ # Special case for Quantization.
458
+ # If quantized, we need to adjust the offset and size to account
459
+ # for the packing.
460
+ if packed_dim == output_dim:
461
+ shard_size = shard_size // param.pack_factor
462
+ shard_offset = shard_offset // param.pack_factor
463
+ # Special case for Marlin.
464
+ shard_size, shard_offset = adjust_marlin_shard(
465
+ param, shard_size, shard_offset
466
+ )
467
+
468
+ loaded_weight_shard = loaded_weight.narrow(
469
+ output_dim, shard_offset, shard_size
470
+ )
471
+ self.weight_loader(param, loaded_weight_shard, shard_id)
472
+ return
473
+
474
+ assert loaded_shard_id < len(self.output_sizes)
475
+ tp_rank = get_tensor_model_parallel_rank()
476
+ tp_size = get_tensor_model_parallel_world_size()
477
+ if output_dim is not None:
478
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
479
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
480
+ # Special case for quantization.
481
+ # If quantized, we need to adjust the offset and size to account
482
+ # for the packing.
483
+ packed_dim = getattr(param, "packed_dim", None)
484
+ if packed_dim == output_dim:
485
+ shard_size = shard_size // param.pack_factor
486
+ shard_offset = shard_offset // param.pack_factor
487
+ # Special case for Marlin.
488
+ shard_size, shard_offset = adjust_marlin_shard(
489
+ param, shard_size, shard_offset
490
+ )
491
+
492
+ use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
493
+ if use_bitsandbytes:
494
+ shard_size = loaded_weight.shape[output_dim]
495
+ shard_offset = loaded_weight.shape[output_dim] * loaded_shard_id
496
+
497
+ param_data = param_data.narrow(output_dim, shard_offset, shard_size)
498
+ start_idx = tp_rank * shard_size
499
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
500
+ # Special case for AQLM codebooks.
501
+ elif is_metadata:
502
+ # metadata indicates fixed size concatenated along dim 0
503
+ shard_size = loaded_weight.shape[0]
504
+ shard_offset = loaded_shard_id * shard_size
505
+ param_data = param_data.narrow(0, shard_offset, shard_size)
506
+
507
+ # Special case for per-tensor scales in fused case.
508
+ elif needs_scalar_to_array:
509
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
510
+ param_data, loaded_weight, loaded_shard_id
511
+ )
512
+
513
+ else:
514
+ ignore_warning = getattr(param, "ignore_warning", False)
515
+ if not ignore_warning:
516
+ logger.warning(
517
+ "Loading a weight without `output_dim` attribute in "
518
+ "MergedColumnParallelLinear, assume the weight is "
519
+ "the same for all partitions."
520
+ )
521
+
522
+ assert param_data.shape == loaded_weight.shape
523
+ param_data.copy_(loaded_weight)
524
+
525
+
526
+ class QKVParallelLinear(ColumnParallelLinear):
527
+ """Linear layers for the attention's QKV transformation.
528
+
529
+ Linear layers for the linear transformation of the query, key, and value
530
+ vectors in the attention layer. The weight matrix is concatenated along
531
+ the output dimension. The layer is parallelized along the head dimension.
532
+ When the number of key/value heads is smaller than the number of query
533
+ heads (e.g., multi-query/grouped-query attention), the key/value head may
534
+ be replicated while the query heads are partitioned.
535
+
536
+ Args:
537
+ hidden_size: input hidden state size of the transformer.
538
+ head_size: size of each attention head.
539
+ total_num_heads: total number of attention query heads.
540
+ total_num_kv_heads: total number of attention key/value heads. If
541
+ None, assume total_num_kv_heads = total_num_heads.
542
+ bias: If true, add bias.
543
+ skip_bias_add: This was added to enable performance optimizations where
544
+ bias can be fused with other element-wise operations. we
545
+ skip adding bias but instead return it.
546
+ params_dtype: Data type for the parameters.
547
+ quant_config: Quantization configure.
548
+ """
549
+
550
+ def __init__(
551
+ self,
552
+ hidden_size: int,
553
+ head_size: int,
554
+ total_num_heads: int,
555
+ total_num_kv_heads: Optional[int] = None,
556
+ bias: bool = True,
557
+ skip_bias_add: bool = False,
558
+ params_dtype: Optional[torch.dtype] = None,
559
+ quant_config: Optional[QuantizationConfig] = None,
560
+ ):
561
+ self.hidden_size = hidden_size
562
+ self.head_size = head_size
563
+ self.total_num_heads = total_num_heads
564
+ if total_num_kv_heads is None:
565
+ total_num_kv_heads = total_num_heads
566
+ self.total_num_kv_heads = total_num_kv_heads
567
+ # Divide the weight matrix along the last dimension.
568
+ tp_size = get_tensor_model_parallel_world_size()
569
+ self.num_heads = divide(self.total_num_heads, tp_size)
570
+ if tp_size >= self.total_num_kv_heads:
571
+ self.num_kv_heads = 1
572
+ self.num_kv_head_replicas = divide(tp_size, self.total_num_kv_heads)
573
+ else:
574
+ self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
575
+ self.num_kv_head_replicas = 1
576
+ input_size = self.hidden_size
577
+ output_size = (
578
+ (self.num_heads + 2 * self.num_kv_heads) * tp_size * self.head_size
579
+ )
580
+ self.output_sizes = [
581
+ self.num_heads * self.head_size * tp_size, # q_proj
582
+ self.num_kv_heads * self.head_size * tp_size, # k_proj
583
+ self.num_kv_heads * self.head_size * tp_size, # v_proj
584
+ ]
585
+
586
+ super().__init__(
587
+ input_size=input_size,
588
+ output_size=output_size,
589
+ bias=bias,
590
+ gather_output=False,
591
+ skip_bias_add=skip_bias_add,
592
+ params_dtype=params_dtype,
593
+ quant_config=quant_config,
594
+ )
595
+
596
+ def weight_loader(
597
+ self,
598
+ param: Parameter,
599
+ loaded_weight: torch.Tensor,
600
+ loaded_shard_id: Optional[str] = None,
601
+ ):
602
+ if param.data.dtype != loaded_weight.dtype:
603
+ param.data = torch.empty_like(
604
+ param.data, dtype=loaded_weight.dtype, device="cuda"
605
+ )
606
+
607
+ param_data = param.data
608
+ output_dim = getattr(param, "output_dim", None)
609
+ # Special case for AQLM codebooks.
610
+ is_metadata = getattr(param, "is_metadata", False)
611
+
612
+ # Special case for per-tensor scales in fused case.
613
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
614
+
615
+ if loaded_shard_id is None:
616
+ # Loaded weight is already fused on disk (qkv/mlp).
617
+ if output_dim is None:
618
+ if needs_scalar_to_array is not None:
619
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
620
+ param_data, loaded_weight, 0
621
+ )
622
+
623
+ assert param_data.shape == loaded_weight.shape
624
+ param_data.copy_(loaded_weight)
625
+ return
626
+ shard_offsets = [
627
+ # (shard_id, shard_offset, shard_size)
628
+ ("q", 0, self.total_num_heads * self.head_size),
629
+ (
630
+ "k",
631
+ self.total_num_heads * self.head_size,
632
+ self.total_num_kv_heads * self.head_size,
633
+ ),
634
+ (
635
+ "v",
636
+ (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
637
+ self.total_num_kv_heads * self.head_size,
638
+ ),
639
+ ]
640
+ packed_dim = getattr(param, "packed_dim", None)
641
+ for shard_id, shard_offset, shard_size in shard_offsets:
642
+ # Special case for Quantized Weights.
643
+ # If quantized, we need to adjust the offset and size to account
644
+ # for the packing.
645
+ if packed_dim == output_dim:
646
+ shard_size = shard_size // param.pack_factor
647
+ shard_offset = shard_offset // param.pack_factor
648
+
649
+ # Special case for Marlin.
650
+ shard_size, shard_offset = adjust_marlin_shard(
651
+ param, shard_size, shard_offset
652
+ )
653
+
654
+ loaded_weight_shard = loaded_weight.narrow(
655
+ output_dim, shard_offset, shard_size
656
+ )
657
+ self.weight_loader(param, loaded_weight_shard, shard_id)
658
+ return
659
+
660
+ tp_rank = get_tensor_model_parallel_rank()
661
+ assert loaded_shard_id in ["q", "k", "v"]
662
+
663
+ # If output dim is defined, use the default loading process.
664
+ if output_dim is not None:
665
+ if loaded_shard_id == "q":
666
+ shard_offset = 0
667
+ shard_size = self.num_heads * self.head_size
668
+ elif loaded_shard_id == "k":
669
+ shard_offset = self.num_heads * self.head_size
670
+ shard_size = self.num_kv_heads * self.head_size
671
+ elif loaded_shard_id == "v":
672
+ shard_offset = (self.num_heads + self.num_kv_heads) * self.head_size
673
+ shard_size = self.num_kv_heads * self.head_size
674
+ # Special case for Quantized Weights.
675
+ # If quantized, we need to adjust the offset and size to account
676
+ # for the packing.
677
+ packed_dim = getattr(param, "packed_dim", None)
678
+ if packed_dim == output_dim:
679
+ shard_size = shard_size // param.pack_factor
680
+ shard_offset = shard_offset // param.pack_factor
681
+
682
+ # Special case for Marlin.
683
+ shard_size, shard_offset = adjust_marlin_shard(
684
+ param, shard_size, shard_offset
685
+ )
686
+
687
+ use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
688
+ if use_bitsandbytes:
689
+ orig_qkv_offsets = {
690
+ "q": (0, self.num_heads * self.head_size),
691
+ "k": (
692
+ self.num_heads * self.head_size,
693
+ self.num_kv_heads * self.head_size,
694
+ ),
695
+ "v": (
696
+ (self.num_heads + self.num_kv_heads) * self.head_size,
697
+ self.num_kv_heads * self.head_size,
698
+ ),
699
+ "total": (
700
+ (self.num_heads + 2 * self.num_kv_heads) * self.head_size,
701
+ 0,
702
+ ),
703
+ }
704
+ shard_size, shard_offset = adjust_bitsandbytes_shard(
705
+ param, orig_qkv_offsets, loaded_shard_id
706
+ )
707
+
708
+ param_data = param_data.narrow(output_dim, shard_offset, shard_size)
709
+ if loaded_shard_id == "q":
710
+ shard_id = tp_rank
711
+ else:
712
+ shard_id = tp_rank // self.num_kv_head_replicas
713
+ start_idx = shard_id * shard_size
714
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
715
+ # Special case for for AQLM codebooks.
716
+ elif is_metadata:
717
+ # metadata indicates fixed size concatenated along dim 0
718
+ shard_size = loaded_weight.shape[0]
719
+ shard_index = ["q", "k", "v"].index(loaded_shard_id)
720
+ param_data = param_data.narrow(0, shard_index * shard_size, shard_size)
721
+ # Special case for per-tensor scales in fused case.
722
+ elif needs_scalar_to_array:
723
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
724
+ param_data, loaded_weight, loaded_shard_id
725
+ )
726
+ else:
727
+ ignore_warning = getattr(param, "ignore_warning", False)
728
+ if not ignore_warning:
729
+ logger.warning(
730
+ "Loading a weight without `output_dim` attribute in "
731
+ "QKVParallelLinear, assume the weight is the same "
732
+ "for all partitions."
733
+ )
734
+
735
+ assert param_data.shape == loaded_weight.shape
736
+ param_data.copy_(loaded_weight)
737
+
738
+
739
+ class RowParallelLinear(LinearBase):
740
+ """Linear layer with row parallelism.
741
+
742
+ The linear layer is defined as Y = XA + b. A is parallelized along
743
+ its first dimension and X along its second dimension as:
744
+ - -
745
+ | A_1 |
746
+ | . |
747
+ A = | . | X = [X_1, ..., X_p]
748
+ | . |
749
+ | A_p |
750
+ - -
751
+ Arguments:
752
+ input_size: first dimension of matrix A.
753
+ output_size: second dimension of matrix A.
754
+ bias: If true, add bias. Note that bias is not parallelized.
755
+ input_is_parallel: If true, we assume that the input is already
756
+ split across the GPUs and we do not split
757
+ again.
758
+ skip_bias_add: This was added to enable performance optimization where
759
+ bias can be fused with other element-wise operations.
760
+ We skip adding bias but instead return it.
761
+ params_dtype: Data type for the parameters.
762
+ quant_config: Quantization configure.
763
+ """
764
+
765
+ def __init__(
766
+ self,
767
+ input_size: int,
768
+ output_size: int,
769
+ bias: bool = True,
770
+ input_is_parallel: bool = True,
771
+ skip_bias_add: bool = False,
772
+ params_dtype: Optional[torch.dtype] = None,
773
+ reduce_results: bool = True,
774
+ quant_config: Optional[QuantizationConfig] = None,
775
+ ):
776
+ super().__init__(
777
+ input_size, output_size, skip_bias_add, params_dtype, quant_config
778
+ )
779
+
780
+ self.input_is_parallel = input_is_parallel
781
+ self.reduce_results = reduce_results
782
+
783
+ # Divide the weight matrix along the last dimension.
784
+ self.tp_size = get_tensor_model_parallel_world_size()
785
+ self.input_size_per_partition = divide(input_size, self.tp_size)
786
+ assert self.quant_method is not None
787
+ self.quant_method.create_weights(
788
+ layer=self,
789
+ input_size_per_partition=self.input_size_per_partition,
790
+ output_partition_sizes=[self.output_size],
791
+ input_size=self.input_size,
792
+ output_size=self.output_size,
793
+ params_dtype=self.params_dtype,
794
+ weight_loader=self.weight_loader,
795
+ )
796
+ if not reduce_results and (bias and not skip_bias_add):
797
+ raise ValueError(
798
+ "When not reduce the results, adding bias to the "
799
+ "results can lead to incorrect results"
800
+ )
801
+
802
+ if bias:
803
+ self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
804
+ set_weight_attrs(
805
+ self.bias,
806
+ {
807
+ "output_dim": 0,
808
+ "weight_loader": self.weight_loader,
809
+ },
810
+ )
811
+ else:
812
+ self.register_parameter("bias", None)
813
+
814
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
815
+ if param.data.dtype != loaded_weight.dtype:
816
+ param.data = torch.empty_like(
817
+ param.data, dtype=loaded_weight.dtype, device="cuda"
818
+ )
819
+
820
+ param_data = param.data
821
+ tp_rank = get_tensor_model_parallel_rank()
822
+ input_dim = getattr(param, "input_dim", None)
823
+ if input_dim is not None:
824
+ shard_size = param.data.shape[input_dim]
825
+ start_idx = tp_rank * shard_size
826
+ loaded_weight = loaded_weight.narrow(input_dim, start_idx, shard_size)
827
+
828
+ # Special case for loading scales off disk, which often do not
829
+ # have a shape (such as in the case of AutoFP8).
830
+ if len(loaded_weight.shape) == 0:
831
+ loaded_weight = loaded_weight.reshape(1)
832
+
833
+ assert param_data.shape == loaded_weight.shape
834
+ param_data.copy_(loaded_weight)
835
+
836
+ def forward(self, input_):
837
+ # Set up backprop all-reduce.
838
+ if self.input_is_parallel:
839
+ input_parallel = input_
840
+ else:
841
+ tp_rank = get_tensor_model_parallel_rank()
842
+ splitted_input = split_tensor_along_last_dim(
843
+ input_, num_partitions=self.tp_size
844
+ )
845
+ input_parallel = splitted_input[tp_rank].contiguous()
846
+
847
+ # Matrix multiply.
848
+ assert self.quant_method is not None
849
+ output_parallel = self.quant_method.apply(self, input_parallel)
850
+ if self.reduce_results and self.tp_size > 1:
851
+ output_ = tensor_model_parallel_all_reduce(output_parallel)
852
+ else:
853
+ output_ = output_parallel
854
+
855
+ if not self.skip_bias_add:
856
+ output = output_ + self.bias if self.bias is not None else output_
857
+ output_bias = None
858
+ else:
859
+ output = output_
860
+ output_bias = self.bias
861
+ return output, output_bias
862
+
863
+ def extra_repr(self) -> str:
864
+ s = f"input_features={self.input_size_per_partition}"
865
+ s += f", output_features={self.output_size}"
866
+ s += f", bias={self.bias is not None}"
867
+ s += f", tp_size={self.tp_size}"
868
+ s += f", reduce_results={self.reduce_results}"
869
+ return s