sglang 0.3.6.post3__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. sglang/bench_one_batch.py +4 -0
  2. sglang/bench_serving.py +13 -0
  3. sglang/check_env.py +1 -1
  4. sglang/srt/_custom_ops.py +118 -0
  5. sglang/srt/configs/device_config.py +17 -0
  6. sglang/srt/configs/load_config.py +84 -0
  7. sglang/srt/configs/model_config.py +161 -4
  8. sglang/srt/configs/qwen2vl.py +5 -8
  9. sglang/srt/constrained/outlines_backend.py +6 -1
  10. sglang/srt/constrained/outlines_jump_forward.py +8 -1
  11. sglang/srt/distributed/__init__.py +3 -0
  12. sglang/srt/distributed/communication_op.py +34 -0
  13. sglang/srt/distributed/device_communicators/__init__.py +0 -0
  14. sglang/srt/distributed/device_communicators/cuda_wrapper.py +182 -0
  15. sglang/srt/distributed/device_communicators/custom_all_reduce.py +352 -0
  16. sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +291 -0
  17. sglang/srt/distributed/device_communicators/hpu_communicator.py +48 -0
  18. sglang/srt/distributed/device_communicators/pynccl.py +204 -0
  19. sglang/srt/distributed/device_communicators/pynccl_wrapper.py +362 -0
  20. sglang/srt/distributed/device_communicators/shm_broadcast.py +568 -0
  21. sglang/srt/distributed/device_communicators/xpu_communicator.py +47 -0
  22. sglang/srt/distributed/parallel_state.py +1275 -0
  23. sglang/srt/distributed/utils.py +223 -0
  24. sglang/srt/hf_transformers_utils.py +37 -1
  25. sglang/srt/layers/attention/flashinfer_backend.py +13 -15
  26. sglang/srt/layers/attention/torch_native_backend.py +285 -0
  27. sglang/srt/layers/fused_moe_patch.py +20 -11
  28. sglang/srt/layers/linear.py +1 -0
  29. sglang/srt/layers/logits_processor.py +17 -3
  30. sglang/srt/layers/quantization/__init__.py +34 -0
  31. sglang/srt/layers/vocab_parallel_embedding.py +1 -0
  32. sglang/srt/lora/lora.py +1 -1
  33. sglang/srt/managers/io_struct.py +48 -2
  34. sglang/srt/managers/schedule_batch.py +18 -14
  35. sglang/srt/managers/schedule_policy.py +7 -4
  36. sglang/srt/managers/scheduler.py +76 -20
  37. sglang/srt/managers/tokenizer_manager.py +166 -68
  38. sglang/srt/managers/tp_worker.py +36 -3
  39. sglang/srt/managers/tp_worker_overlap_thread.py +21 -3
  40. sglang/srt/model_executor/cuda_graph_runner.py +16 -7
  41. sglang/srt/model_executor/forward_batch_info.py +9 -4
  42. sglang/srt/model_executor/model_runner.py +136 -150
  43. sglang/srt/model_loader/__init__.py +34 -0
  44. sglang/srt/model_loader/loader.py +1139 -0
  45. sglang/srt/model_loader/utils.py +41 -0
  46. sglang/srt/model_loader/weight_utils.py +640 -0
  47. sglang/srt/models/baichuan.py +9 -10
  48. sglang/srt/models/chatglm.py +6 -15
  49. sglang/srt/models/commandr.py +2 -3
  50. sglang/srt/models/dbrx.py +2 -3
  51. sglang/srt/models/deepseek.py +4 -11
  52. sglang/srt/models/deepseek_v2.py +3 -11
  53. sglang/srt/models/exaone.py +2 -3
  54. sglang/srt/models/gemma.py +2 -6
  55. sglang/srt/models/gemma2.py +3 -14
  56. sglang/srt/models/gemma2_reward.py +0 -1
  57. sglang/srt/models/gpt2.py +5 -12
  58. sglang/srt/models/gpt_bigcode.py +6 -22
  59. sglang/srt/models/grok.py +3 -3
  60. sglang/srt/models/internlm2.py +2 -3
  61. sglang/srt/models/internlm2_reward.py +0 -1
  62. sglang/srt/models/llama.py +97 -27
  63. sglang/srt/models/llama_classification.py +1 -2
  64. sglang/srt/models/llama_embedding.py +1 -2
  65. sglang/srt/models/llama_reward.py +2 -3
  66. sglang/srt/models/llava.py +1 -4
  67. sglang/srt/models/llavavid.py +1 -2
  68. sglang/srt/models/minicpm.py +4 -7
  69. sglang/srt/models/minicpm3.py +6 -19
  70. sglang/srt/models/mixtral.py +12 -5
  71. sglang/srt/models/mixtral_quant.py +2 -3
  72. sglang/srt/models/mllama.py +3 -7
  73. sglang/srt/models/olmo.py +2 -8
  74. sglang/srt/models/olmo2.py +0 -1
  75. sglang/srt/models/olmoe.py +3 -5
  76. sglang/srt/models/phi3_small.py +8 -8
  77. sglang/srt/models/qwen.py +2 -3
  78. sglang/srt/models/qwen2.py +10 -9
  79. sglang/srt/models/qwen2_moe.py +4 -11
  80. sglang/srt/models/qwen2_vl.py +2 -6
  81. sglang/srt/models/registry.py +99 -0
  82. sglang/srt/models/stablelm.py +2 -3
  83. sglang/srt/models/torch_native_llama.py +6 -12
  84. sglang/srt/models/xverse.py +2 -4
  85. sglang/srt/models/xverse_moe.py +4 -11
  86. sglang/srt/models/yivl.py +2 -3
  87. sglang/srt/openai_api/adapter.py +9 -5
  88. sglang/srt/openai_api/protocol.py +1 -0
  89. sglang/srt/server.py +267 -170
  90. sglang/srt/server_args.py +65 -31
  91. sglang/srt/utils.py +245 -28
  92. sglang/test/test_utils.py +7 -0
  93. sglang/version.py +1 -1
  94. {sglang-0.3.6.post3.dist-info → sglang-0.4.0.dist-info}/METADATA +1 -1
  95. sglang-0.4.0.dist-info/RECORD +184 -0
  96. sglang-0.3.6.post3.dist-info/RECORD +0 -162
  97. {sglang-0.3.6.post3.dist-info → sglang-0.4.0.dist-info}/LICENSE +0 -0
  98. {sglang-0.3.6.post3.dist-info → sglang-0.4.0.dist-info}/WHEEL +0 -0
  99. {sglang-0.3.6.post3.dist-info → sglang-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1139 @@
1
+ # Adapted from https://github.com/vllm-project/vllm/blob/v0.6.3.post1/vllm/model_executor/model_loader/loader.py
2
+
3
+ # ruff: noqa: SIM117
4
+ import collections
5
+ import dataclasses
6
+ import fnmatch
7
+ import glob
8
+ import json
9
+ import logging
10
+ import math
11
+ import os
12
+ from abc import ABC, abstractmethod
13
+ from contextlib import contextmanager
14
+ from typing import Any, Dict, Generator, Iterable, List, Optional, Tuple, Type, cast
15
+
16
+ import gguf
17
+ import huggingface_hub
18
+ import numpy as np
19
+ import torch
20
+ from huggingface_hub import HfApi, hf_hub_download
21
+ from torch import nn
22
+ from transformers import AutoModelForCausalLM, PretrainedConfig
23
+ from transformers.utils import SAFE_WEIGHTS_INDEX_NAME
24
+ from vllm.distributed import (
25
+ get_tensor_model_parallel_rank,
26
+ get_tensor_model_parallel_world_size,
27
+ )
28
+
29
+ from sglang.srt.configs.device_config import DeviceConfig
30
+ from sglang.srt.configs.load_config import LoadConfig, LoadFormat
31
+ from sglang.srt.configs.model_config import ModelConfig
32
+ from sglang.srt.layers.quantization.base_config import QuantizationConfig
33
+ from sglang.srt.model_loader.utils import (
34
+ get_model_architecture,
35
+ set_default_torch_dtype,
36
+ )
37
+ from sglang.srt.model_loader.weight_utils import (
38
+ download_safetensors_index_file_from_hf,
39
+ download_weights_from_hf,
40
+ filter_duplicate_safetensors_files,
41
+ filter_files_not_needed_for_inference,
42
+ get_gguf_extra_tensor_names,
43
+ get_quant_config,
44
+ gguf_quant_weights_iterator,
45
+ initialize_dummy_weights,
46
+ np_cache_weights_iterator,
47
+ pt_weights_iterator,
48
+ safetensors_weights_iterator,
49
+ )
50
+ from sglang.srt.utils import (
51
+ get_device_capability,
52
+ is_pin_memory_available,
53
+ set_weight_attrs,
54
+ )
55
+
56
+
57
+ @contextmanager
58
+ def device_loading_context(module: torch.nn.Module, target_device: torch.device):
59
+ if target_device.type == "cpu":
60
+ # If target is CPU, no need to move anything
61
+ yield module
62
+ return
63
+
64
+ original_device_states: Dict[str, torch.device] = {}
65
+
66
+ # Store original device states and move parameters to GPU if they're on CPU
67
+ for name, p in module.named_parameters():
68
+ if p.device.type == "cpu":
69
+ original_device_states[name] = p.device
70
+ p.data = p.data.to(target_device)
71
+ # Parameters already on target device are not touched
72
+
73
+ try:
74
+ yield module
75
+
76
+ finally:
77
+ # Restore parameters to their original devices, ignoring new parameters
78
+ pin_memory = is_pin_memory_available()
79
+ for name, p in module.named_parameters():
80
+ if name in original_device_states:
81
+ original_device: torch.device = original_device_states[name]
82
+ if original_device.type == "cpu":
83
+ # `torch.empty_like` does not support `pin_memory` argument
84
+ cpu_data = torch.empty_strided(
85
+ size=p.data.size(),
86
+ stride=p.data.stride(),
87
+ dtype=p.data.dtype,
88
+ layout=p.data.layout,
89
+ device="cpu",
90
+ pin_memory=pin_memory,
91
+ )
92
+ cpu_data.copy_(p.data)
93
+ p.data = cpu_data
94
+ else:
95
+ p.data = p.data.to(original_device)
96
+ # New parameters or parameters already on target device are untouched
97
+
98
+
99
+ logger = logging.getLogger(__name__)
100
+
101
+
102
+ def _get_quantization_config(
103
+ model_config: ModelConfig, load_config: LoadConfig
104
+ ) -> Optional[QuantizationConfig]:
105
+ """Get the quantization config."""
106
+ if model_config.quantization is not None:
107
+ quant_config = get_quant_config(model_config, load_config)
108
+ major, minor = get_device_capability()
109
+
110
+ if major is not None and minor is not None:
111
+ assert 0 <= minor < 10
112
+ capability = major * 10 + minor
113
+ if capability < quant_config.get_min_capability():
114
+ raise ValueError(
115
+ f"The quantization method {model_config.quantization} "
116
+ "is not supported for the current GPU. "
117
+ f"Minimum capability: {quant_config.get_min_capability()}. "
118
+ f"Current capability: {capability}."
119
+ )
120
+ supported_dtypes = quant_config.get_supported_act_dtypes()
121
+ if model_config.dtype not in supported_dtypes:
122
+ raise ValueError(
123
+ f"{model_config.dtype} is not supported for quantization "
124
+ f"method {model_config.quantization}. Supported dtypes: "
125
+ f"{supported_dtypes}"
126
+ )
127
+ return quant_config
128
+ return None
129
+
130
+
131
+ def _initialize_model(
132
+ model_config: ModelConfig,
133
+ load_config: LoadConfig,
134
+ ) -> nn.Module:
135
+ """Initialize a model with the given configurations."""
136
+ model_class, _ = get_model_architecture(model_config)
137
+ quant_config = _get_quantization_config(model_config, load_config)
138
+ return model_class(
139
+ config=model_config.hf_config,
140
+ quant_config=quant_config,
141
+ )
142
+
143
+
144
+ class BaseModelLoader(ABC):
145
+ """Base class for model loaders."""
146
+
147
+ def __init__(self, load_config: LoadConfig):
148
+ self.load_config = load_config
149
+
150
+ @abstractmethod
151
+ def download_model(self, model_config: ModelConfig) -> None:
152
+ """Download a model so that it can be immediately loaded."""
153
+ raise NotImplementedError
154
+
155
+ @abstractmethod
156
+ def load_model(
157
+ self,
158
+ *,
159
+ model_config: ModelConfig,
160
+ device_config: DeviceConfig,
161
+ ) -> nn.Module:
162
+ """Load a model with the given configurations."""
163
+ raise NotImplementedError
164
+
165
+
166
+ class DefaultModelLoader(BaseModelLoader):
167
+ """Model loader that can load different file types from disk."""
168
+
169
+ @dataclasses.dataclass
170
+ class Source:
171
+ """A source for weights."""
172
+
173
+ model_or_path: str
174
+ """The model ID or path."""
175
+
176
+ revision: Optional[str]
177
+ """The optional model revision."""
178
+
179
+ prefix: str = ""
180
+ """A prefix to prepend to all weights."""
181
+
182
+ fall_back_to_pt: bool = True
183
+ """Whether .pt weights can be used."""
184
+
185
+ def __init__(self, load_config: LoadConfig):
186
+ super().__init__(load_config)
187
+ if load_config.model_loader_extra_config:
188
+ raise ValueError(
189
+ f"Model loader extra config is not supported for "
190
+ f"load format {load_config.load_format}"
191
+ )
192
+
193
+ def _maybe_download_from_modelscope(
194
+ self, model: str, revision: Optional[str]
195
+ ) -> Optional[str]:
196
+ """Download model from ModelScope hub if VLLM_USE_MODELSCOPE is True.
197
+
198
+ Returns the path to the downloaded model, or None if the model is not
199
+ downloaded from ModelScope."""
200
+ if "SGLANG_USE_MODELSCOPE" in os.environ:
201
+ # download model from ModelScope hub,
202
+ # lazy import so that modelscope is not required for normal use.
203
+ # pylint: disable=C.
204
+ from modelscope.hub.snapshot_download import snapshot_download
205
+
206
+ if not os.path.exists(model):
207
+ model_path = snapshot_download(
208
+ model_id=model,
209
+ cache_dir=self.load_config.download_dir,
210
+ local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
211
+ revision=revision,
212
+ ignore_file_pattern=self.load_config.ignore_patterns,
213
+ )
214
+ else:
215
+ model_path = model
216
+ return model_path
217
+ return None
218
+
219
+ def _prepare_weights(
220
+ self, model_name_or_path: str, revision: Optional[str], fall_back_to_pt: bool
221
+ ) -> Tuple[str, List[str], bool]:
222
+ """Prepare weights for the model.
223
+
224
+ If the model is not local, it will be downloaded."""
225
+ model_name_or_path = (
226
+ self._maybe_download_from_modelscope(model_name_or_path, revision)
227
+ or model_name_or_path
228
+ )
229
+
230
+ is_local = os.path.isdir(model_name_or_path)
231
+ load_format = self.load_config.load_format
232
+ use_safetensors = False
233
+ index_file = SAFE_WEIGHTS_INDEX_NAME
234
+ # Some quantized models use .pt files for storing the weights.
235
+ if load_format == LoadFormat.AUTO:
236
+ allow_patterns = ["*.safetensors", "*.bin"]
237
+ elif load_format == LoadFormat.SAFETENSORS:
238
+ use_safetensors = True
239
+ allow_patterns = ["*.safetensors"]
240
+ elif load_format == LoadFormat.MISTRAL:
241
+ use_safetensors = True
242
+ allow_patterns = ["consolidated*.safetensors"]
243
+ index_file = "consolidated.safetensors.index.json"
244
+ elif load_format == LoadFormat.PT:
245
+ allow_patterns = ["*.pt"]
246
+ elif load_format == LoadFormat.NPCACHE:
247
+ allow_patterns = ["*.bin"]
248
+ else:
249
+ raise ValueError(f"Unknown load_format: {load_format}")
250
+
251
+ if fall_back_to_pt:
252
+ allow_patterns += ["*.pt"]
253
+
254
+ if not is_local:
255
+ hf_folder = download_weights_from_hf(
256
+ model_name_or_path,
257
+ self.load_config.download_dir,
258
+ allow_patterns,
259
+ revision,
260
+ ignore_patterns=self.load_config.ignore_patterns,
261
+ )
262
+ else:
263
+ hf_folder = model_name_or_path
264
+
265
+ hf_weights_files: List[str] = []
266
+ for pattern in allow_patterns:
267
+ hf_weights_files += glob.glob(os.path.join(hf_folder, pattern))
268
+ if len(hf_weights_files) > 0:
269
+ if pattern == "*.safetensors":
270
+ use_safetensors = True
271
+ break
272
+
273
+ if use_safetensors:
274
+ # For models like Mistral-7B-Instruct-v0.3
275
+ # there are both sharded safetensors files and a consolidated
276
+ # safetensors file. Using both breaks.
277
+ # Here, we download the `model.safetensors.index.json` and filter
278
+ # any files not found in the index.
279
+ if not is_local:
280
+ download_safetensors_index_file_from_hf(
281
+ model_name_or_path,
282
+ index_file,
283
+ self.load_config.download_dir,
284
+ revision,
285
+ )
286
+ hf_weights_files = filter_duplicate_safetensors_files(
287
+ hf_weights_files, hf_folder, index_file
288
+ )
289
+ else:
290
+ hf_weights_files = filter_files_not_needed_for_inference(hf_weights_files)
291
+
292
+ if len(hf_weights_files) == 0:
293
+ raise RuntimeError(
294
+ f"Cannot find any model weights with `{model_name_or_path}`"
295
+ )
296
+
297
+ return hf_folder, hf_weights_files, use_safetensors
298
+
299
+ def _get_weights_iterator(
300
+ self, source: "Source"
301
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
302
+ """Get an iterator for the model weights based on the load format."""
303
+ hf_folder, hf_weights_files, use_safetensors = self._prepare_weights(
304
+ source.model_or_path, source.revision, source.fall_back_to_pt
305
+ )
306
+ if self.load_config.load_format == LoadFormat.NPCACHE:
307
+ # Currently np_cache only support *.bin checkpoints
308
+ assert use_safetensors is False
309
+ weights_iterator = np_cache_weights_iterator(
310
+ source.model_or_path,
311
+ self.load_config.download_dir,
312
+ hf_folder,
313
+ hf_weights_files,
314
+ )
315
+ elif use_safetensors:
316
+ weights_iterator = safetensors_weights_iterator(hf_weights_files)
317
+ else:
318
+ weights_iterator = pt_weights_iterator(hf_weights_files)
319
+
320
+ # Apply the prefix.
321
+ return ((source.prefix + name, tensor) for (name, tensor) in weights_iterator)
322
+
323
+ def _get_all_weights(
324
+ self,
325
+ model_config: ModelConfig,
326
+ model: nn.Module,
327
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
328
+
329
+ primary_weights = DefaultModelLoader.Source(
330
+ model_config.model_path,
331
+ model_config.revision,
332
+ prefix="",
333
+ fall_back_to_pt=getattr(model, "fall_back_to_pt_during_load", True),
334
+ )
335
+ yield from self._get_weights_iterator(primary_weights)
336
+
337
+ secondary_weights = cast(
338
+ Iterable[DefaultModelLoader.Source], getattr(model, "secondary_weights", ())
339
+ )
340
+ for source in secondary_weights:
341
+ yield from self._get_weights_iterator(source)
342
+
343
+ def download_model(self, model_config: ModelConfig) -> None:
344
+ self._prepare_weights(
345
+ model_config.model_path, model_config.revision, fall_back_to_pt=True
346
+ )
347
+
348
+ def load_model(
349
+ self,
350
+ *,
351
+ model_config: ModelConfig,
352
+ device_config: DeviceConfig,
353
+ ) -> nn.Module:
354
+ target_device = torch.device(device_config.device)
355
+ with set_default_torch_dtype(model_config.dtype):
356
+ with target_device:
357
+ model = _initialize_model(
358
+ model_config,
359
+ self.load_config,
360
+ )
361
+
362
+ model.load_weights(self._get_all_weights(model_config, model))
363
+
364
+ for _, module in model.named_modules():
365
+ quant_method = getattr(module, "quant_method", None)
366
+ if quant_method is not None:
367
+ # When quant methods need to process weights after loading
368
+ # (for repacking, quantizing, etc), they expect parameters
369
+ # to be on the global target device. This scope is for the
370
+ # case where cpu offloading is used, where we will move the
371
+ # parameters onto device for processing and back off after.
372
+ with device_loading_context(module, target_device):
373
+ quant_method.process_weights_after_loading(module)
374
+ return model.eval()
375
+
376
+
377
+ class DummyModelLoader(BaseModelLoader):
378
+ """Model loader that will set model weights to random values."""
379
+
380
+ def __init__(self, load_config: LoadConfig):
381
+ super().__init__(load_config)
382
+ if load_config.model_loader_extra_config:
383
+ raise ValueError(
384
+ f"Model loader extra config is not supported for "
385
+ f"load format {load_config.load_format}"
386
+ )
387
+
388
+ def download_model(self, model_config: ModelConfig) -> None:
389
+ pass # Nothing to download
390
+
391
+ def load_model(
392
+ self,
393
+ *,
394
+ model_config: ModelConfig,
395
+ device_config: DeviceConfig,
396
+ ) -> nn.Module:
397
+ with set_default_torch_dtype(model_config.dtype):
398
+ with torch.device(device_config.device):
399
+ model = _initialize_model(
400
+ model_config,
401
+ self.load_config,
402
+ )
403
+
404
+ for _, module in model.named_modules():
405
+ quant_method = getattr(module, "quant_method", None)
406
+ if quant_method is not None:
407
+ quant_method.process_weights_after_loading(module)
408
+
409
+ # NOTE(woosuk): For accurate performance evaluation, we assign
410
+ # random values to the weights.
411
+ initialize_dummy_weights(model)
412
+ return model.eval()
413
+
414
+
415
+ class ShardedStateLoader(BaseModelLoader):
416
+ """
417
+ Model loader that directly loads each worker's model state dict, which
418
+ enables a fast load path for large tensor-parallel models where each worker
419
+ only needs to read its own shard rather than the entire checkpoint. See
420
+ `examples/save_sharded_state.py` for creating a sharded checkpoint.
421
+ """
422
+
423
+ DEFAULT_PATTERN = "model-rank-{rank}-part-{part}.safetensors"
424
+
425
+ def __init__(self, load_config: LoadConfig):
426
+ super().__init__(load_config)
427
+ extra_config = (
428
+ {}
429
+ if load_config.model_loader_extra_config is None
430
+ else load_config.model_loader_extra_config.copy()
431
+ )
432
+ self.pattern = extra_config.pop("pattern", self.DEFAULT_PATTERN)
433
+ if extra_config:
434
+ raise ValueError(
435
+ f"Unexpected extra config keys for load format "
436
+ f"{load_config.load_format}: "
437
+ f"{load_config.model_loader_extra_config.keys()}"
438
+ )
439
+
440
+ @staticmethod
441
+ def _filter_subtensors(tensors: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
442
+ """
443
+ Filter out all tensors that share the same memory or a subset of the
444
+ memory of another tensor.
445
+ """
446
+ same_storage_groups: Dict[Any, List[Tuple[str, torch.Tensor]]] = (
447
+ collections.defaultdict(list)
448
+ )
449
+ for key, tensor in tensors.items():
450
+ if tensor.numel():
451
+ ptr = tensor.untyped_storage().data_ptr()
452
+ same_storage_groups[tensor.device, ptr].append((key, tensor))
453
+
454
+ def get_end_ptr(tensor: torch.Tensor) -> int:
455
+ return tensor.view(-1)[-1].data_ptr() + tensor.element_size()
456
+
457
+ result: Dict[str, torch.Tensor] = {}
458
+ for group in same_storage_groups.values():
459
+ for k, t in group:
460
+ a, b = t.data_ptr(), get_end_ptr(t)
461
+ for k2, t2 in group:
462
+ if not t2.is_contiguous():
463
+ continue
464
+ a2, b2 = t2.data_ptr(), get_end_ptr(t2)
465
+ if a < a2 or b2 < b:
466
+ continue
467
+ if a2 < a or b < b2 or not t.is_contiguous():
468
+ break # t2 covers strictly more memory than t.
469
+ if k2 < k:
470
+ # Same tensors, keep the one with the smaller key.
471
+ break
472
+ else:
473
+ result[k] = t
474
+ return result
475
+
476
+ def _prepare_weights(self, model_name_or_path: str, revision: Optional[str]):
477
+ if os.path.isdir(model_name_or_path):
478
+ return model_name_or_path
479
+ else:
480
+ allow_patterns = ["*.safetensors"]
481
+ return download_weights_from_hf(
482
+ model_name_or_path,
483
+ self.load_config.download_dir,
484
+ allow_patterns,
485
+ revision,
486
+ ignore_patterns=self.load_config.ignore_patterns,
487
+ )
488
+
489
+ def download_model(self, model_config: ModelConfig) -> None:
490
+ self._prepare_weights(model_config.model_path, model_config.revision)
491
+
492
+ def load_model(
493
+ self,
494
+ *,
495
+ model_config: ModelConfig,
496
+ device_config: DeviceConfig,
497
+ ) -> nn.Module:
498
+ from safetensors.torch import safe_open
499
+ from vllm.distributed import get_tensor_model_parallel_rank
500
+
501
+ local_model_path = self._prepare_weights(
502
+ model_config.model_path, model_config.revision
503
+ )
504
+
505
+ with set_default_torch_dtype(model_config.dtype):
506
+ with torch.device(device_config.device):
507
+ model = _initialize_model(model_config, self.load_config)
508
+ for _, module in model.named_modules():
509
+ quant_method = getattr(module, "quant_method", None)
510
+ if quant_method is not None:
511
+ quant_method.process_weights_after_loading(module)
512
+ rank = get_tensor_model_parallel_rank()
513
+ pattern = os.path.join(
514
+ local_model_path,
515
+ self.pattern.format(rank=rank, part="*"),
516
+ )
517
+ filepaths = glob.glob(pattern)
518
+ if not filepaths:
519
+ # TODO: support un-sharded checkpoints too
520
+ raise ValueError(
521
+ f"Could not find checkpoint files '{pattern}', only "
522
+ f"pre-sharded checkpoints are currently supported!"
523
+ )
524
+ state_dict = self._filter_subtensors(model.state_dict())
525
+ for path in filepaths:
526
+ with safe_open(path, framework="pt") as f:
527
+ for key in f.keys(): # noqa: SIM118
528
+ tensor = f.get_tensor(key)
529
+ # If loading with LoRA enabled, additional padding may
530
+ # be added to certain parameters. We only load into a
531
+ # narrowed view of the parameter data.
532
+ param_data = state_dict[key].data
533
+ param_shape = state_dict[key].shape
534
+ for dim, size in enumerate(tensor.shape):
535
+ if size < param_shape[dim]:
536
+ param_data = param_data.narrow(dim, 0, size)
537
+ if tensor.shape != param_shape:
538
+ logger.warning(
539
+ "loading tensor of shape %s into "
540
+ "parameter '%s' of shape %s",
541
+ tensor.shape,
542
+ key,
543
+ param_shape,
544
+ )
545
+ param_data.copy_(tensor)
546
+ state_dict.pop(key)
547
+ if state_dict:
548
+ raise ValueError(f"Missing keys {tuple(state_dict)} in loaded state!")
549
+ return model.eval()
550
+
551
+ @staticmethod
552
+ def save_model(
553
+ model: torch.nn.Module,
554
+ path: str,
555
+ pattern: Optional[str] = None,
556
+ max_size: Optional[int] = None,
557
+ ) -> None:
558
+ from safetensors.torch import save_file
559
+ from vllm.distributed import get_tensor_model_parallel_rank
560
+
561
+ if pattern is None:
562
+ pattern = ShardedStateLoader.DEFAULT_PATTERN
563
+ rank = get_tensor_model_parallel_rank()
564
+ part_idx = 0
565
+ total_size = 0
566
+ state_dict = ShardedStateLoader._filter_subtensors(model.state_dict())
567
+ state_dict_part: Dict[str, torch.Tensor] = {}
568
+ for key, tensor in state_dict.items():
569
+ param_size = tensor.nelement() * tensor.element_size()
570
+ if max_size is not None and total_size + param_size > max_size:
571
+ filename = pattern.format(rank=rank, part=part_idx)
572
+ save_file(
573
+ state_dict_part,
574
+ os.path.join(path, filename),
575
+ )
576
+ part_idx += 1
577
+ total_size = 0
578
+ state_dict_part = {}
579
+ state_dict_part[key] = tensor
580
+ total_size += param_size
581
+ if len(state_dict_part) > 0:
582
+ filename = pattern.format(rank=rank, part=part_idx)
583
+ save_file(
584
+ state_dict_part,
585
+ os.path.join(path, filename),
586
+ )
587
+
588
+
589
+ class BitsAndBytesModelLoader(BaseModelLoader):
590
+ """Model loader to load model weights with BitAndBytes quantization."""
591
+
592
+ possible_config_file_names = ["adapter_config.json"]
593
+
594
+ default_target_modules = [
595
+ ".gate_proj.",
596
+ ".down_proj.",
597
+ ".up_proj.",
598
+ ".q_proj.",
599
+ ".k_proj.",
600
+ ".v_proj.",
601
+ ".o_proj.",
602
+ ".fc1.",
603
+ ".fc2.",
604
+ ".dense.",
605
+ ".query_key_value.",
606
+ ".qkv_proj.",
607
+ ".dense_h_to_4h.",
608
+ ".dense_4h_to_h.",
609
+ ".out_proj.",
610
+ ]
611
+
612
+ def __init__(self, load_config: LoadConfig):
613
+ super().__init__(load_config)
614
+
615
+ # we don't need to quantize the whole model, only the target modules
616
+ # that are specified in the adapter config file. If the adapter config
617
+ # file is not provided, we will quantize the default modules.
618
+ if (
619
+ not load_config.model_loader_extra_config
620
+ or "qlora_adapter_name_or_path" not in load_config.model_loader_extra_config
621
+ ):
622
+ self.target_modules = []
623
+ return
624
+
625
+ qlora_adapter = load_config.model_loader_extra_config[
626
+ "qlora_adapter_name_or_path"
627
+ ]
628
+
629
+ config_file_path = self._get_config_file(qlora_adapter)
630
+
631
+ with open(config_file_path, "r") as f:
632
+ config = json.load(f)
633
+ self.target_modules = config["target_modules"]
634
+
635
+ def _get_config_file(self, qlora_adapter: str) -> str:
636
+ is_local = os.path.isdir(qlora_adapter)
637
+ config_file_path = None
638
+ if is_local:
639
+ for file in self.possible_config_file_names:
640
+ config_file_path = os.path.join(qlora_adapter, file)
641
+ if os.path.exists(config_file_path):
642
+ break
643
+ else:
644
+ hf_api = HfApi()
645
+ repo_files = hf_api.list_repo_files(repo_id=qlora_adapter)
646
+ for file in self.possible_config_file_names:
647
+ if file in repo_files:
648
+ config_file_path = hf_hub_download(
649
+ repo_id=qlora_adapter, filename=file
650
+ )
651
+ break
652
+
653
+ if not config_file_path:
654
+ raise ValueError(f"Cannot find adapter config file in {qlora_adapter}")
655
+
656
+ return config_file_path
657
+
658
+ def _get_weight_files(
659
+ self,
660
+ model_name_or_path: str,
661
+ allowed_patterns: List[str],
662
+ revision: Optional[str] = None,
663
+ ) -> Tuple[List[str], str]:
664
+ """Retrieve weight files. Download the files if necessary.
665
+
666
+ Return the weight files and the file pattern."""
667
+ is_local = os.path.isdir(model_name_or_path)
668
+
669
+ if is_local:
670
+ for pattern in allowed_patterns:
671
+ weight_files = glob.glob(os.path.join(model_name_or_path, pattern))
672
+ if weight_files:
673
+ return weight_files, pattern
674
+ else:
675
+ hf_api = HfApi()
676
+ repo_files = hf_api.list_repo_files(repo_id=model_name_or_path)
677
+ for pattern in allowed_patterns:
678
+ matching_files = fnmatch.filter(repo_files, pattern)
679
+ if matching_files:
680
+ hf_folder = download_weights_from_hf(
681
+ model_name_or_path,
682
+ self.load_config.download_dir,
683
+ [pattern],
684
+ revision,
685
+ ignore_patterns=self.load_config.ignore_patterns,
686
+ )
687
+ return glob.glob(os.path.join(hf_folder, pattern)), pattern
688
+
689
+ raise RuntimeError(f"No model weights found in: `{model_name_or_path}`")
690
+
691
+ def _prepare_weights(
692
+ self, model_name_or_path: str, revision: Optional[str]
693
+ ) -> Tuple[List[str], bool]:
694
+ """Prepare weight files for the model."""
695
+
696
+ allowed_patterns = ["*.safetensors", "*.bin", "*.pt"]
697
+
698
+ hf_weights_files, matched_pattern = self._get_weight_files(
699
+ model_name_or_path, allowed_patterns, revision
700
+ )
701
+
702
+ if matched_pattern != "*.safetensors":
703
+ hf_weights_files = filter_files_not_needed_for_inference(hf_weights_files)
704
+
705
+ if len(hf_weights_files) == 0:
706
+ raise RuntimeError(
707
+ f"Cannot find any model weights with `{model_name_or_path}`"
708
+ )
709
+
710
+ return hf_weights_files, matched_pattern == "*.safetensors"
711
+
712
+ def _hf_weight_iter(self, hf_weights_files, use_safetensors: bool):
713
+ if use_safetensors:
714
+ return safetensors_weights_iterator(hf_weights_files)
715
+ else:
716
+ return pt_weights_iterator(hf_weights_files)
717
+
718
+ def _get_quantized_weights_iterator(
719
+ self,
720
+ model_name_or_path: str,
721
+ revision: Optional[str],
722
+ pre_quant: bool,
723
+ load_8bit: bool,
724
+ ) -> Tuple[Generator[Tuple[str, torch.Tensor], None, None], Dict[str, Any]]:
725
+ """Get an iterator to the model weights with bitsandbytes quantization,
726
+ as well as the quantization state dictionary."""
727
+
728
+ # only load the bitsandbytes module when needed
729
+ try:
730
+ import bitsandbytes
731
+
732
+ if bitsandbytes.__version__ < "0.44.0":
733
+ raise ImportError(
734
+ "bitsandbytes version is wrong. Please "
735
+ "install bitsandbytes>=0.44.0."
736
+ )
737
+ except ImportError as err:
738
+ raise ImportError(
739
+ "Please install bitsandbytes>=0.44.0 via "
740
+ "`pip install bitsandbytes>=0.44.0` to use "
741
+ "bitsandbytes quantizer."
742
+ ) from err
743
+
744
+ hf_weights_files, use_safetensors = self._prepare_weights(
745
+ model_name_or_path, revision
746
+ )
747
+
748
+ quant_state_dict: Dict[str, Any] = {}
749
+
750
+ if pre_quant:
751
+ if load_8bit:
752
+ return (
753
+ self._quantized_8bit_generator(
754
+ hf_weights_files, use_safetensors, quant_state_dict
755
+ ),
756
+ quant_state_dict,
757
+ )
758
+ else:
759
+ return (
760
+ self._quantized_4bit_generator(
761
+ hf_weights_files, use_safetensors, quant_state_dict
762
+ ),
763
+ quant_state_dict,
764
+ )
765
+
766
+ return (
767
+ self._unquantized_generator(
768
+ hf_weights_files, use_safetensors, quant_state_dict
769
+ ),
770
+ quant_state_dict,
771
+ )
772
+
773
+ def _quantized_8bit_generator(
774
+ self, hf_weights_files, use_safetensors, quant_state_dict
775
+ ) -> Generator:
776
+ for weight_name, weight_tensor in self._hf_weight_iter(
777
+ hf_weights_files, use_safetensors
778
+ ):
779
+ if not weight_name.lower().endswith(".scb"):
780
+ continue
781
+
782
+ weight_key = weight_name.lower().replace(".scb", ".qweight")
783
+ quant_state_dict[weight_key] = weight_tensor
784
+
785
+ for weight_name, weight_tensor in self._hf_weight_iter(
786
+ hf_weights_files, use_safetensors
787
+ ):
788
+
789
+ if not weight_name.endswith((".weight", ".bias")):
790
+ continue
791
+
792
+ qweight_name = weight_name.replace(".weight", ".qweight")
793
+
794
+ if qweight_name in quant_state_dict:
795
+ set_weight_attrs(weight_tensor, {"load_in_8bit": True})
796
+ yield qweight_name, weight_tensor
797
+ else:
798
+ yield weight_name, weight_tensor
799
+
800
+ def _quantized_4bit_generator(
801
+ self, hf_weights_files, use_safetensors, quant_state_dict
802
+ ) -> Generator:
803
+ from bitsandbytes.functional import QuantState
804
+
805
+ # First iterate over all quant state weights
806
+ weight_iterator = self._hf_weight_iter(hf_weights_files, use_safetensors)
807
+ temp_state_dict = {}
808
+ for weight_name, weight_tensor in weight_iterator:
809
+ if weight_name.endswith((".weight", ".bias")):
810
+ continue
811
+ # bitsandbytes library requires
812
+ # weight.quant_state.bitsandbytes__* in CPU
813
+ if "quant_state.bitsandbytes" in weight_name:
814
+ temp_state_dict[weight_name] = weight_tensor.cpu().data
815
+ else:
816
+ temp_state_dict[weight_name] = weight_tensor
817
+
818
+ # Closure to parse quant_state for each prequant weight
819
+ def _parse_quant_state(param_name: str, temp_state_dict: Dict) -> QuantState:
820
+ quant_state = {}
821
+ for k in temp_state_dict:
822
+ if param_name + "." in k:
823
+ quant_state[k] = temp_state_dict[k]
824
+
825
+ return QuantState.from_dict(quant_state, device="cuda")
826
+
827
+ # Second iterate over all prequant and normal weights
828
+ # pre quantized weights would have a quant_state
829
+ for weight_name, weight_tensor in self._hf_weight_iter(
830
+ hf_weights_files, use_safetensors
831
+ ):
832
+
833
+ if not weight_name.endswith((".weight", ".bias")):
834
+ continue
835
+
836
+ if (f"{weight_name}.quant_state.bitsandbytes__nf4" in temp_state_dict) or (
837
+ f"{weight_name}.quant_state.bitsandbytes__fp4" in temp_state_dict
838
+ ):
839
+ quant_state = _parse_quant_state(weight_name, temp_state_dict)
840
+ weight_name = weight_name.replace(".weight", ".qweight")
841
+ quant_state_dict[weight_name] = quant_state
842
+ yield weight_name.replace(".weight", ".qweight"), weight_tensor
843
+ else:
844
+ yield weight_name, weight_tensor
845
+
846
+ def _unquantized_generator(
847
+ self, hf_weights_files, use_safetensors, quant_state_dict
848
+ ) -> Generator:
849
+ from bitsandbytes.functional import quantize_4bit
850
+
851
+ tp_size = get_tensor_model_parallel_world_size()
852
+ tp_rank = get_tensor_model_parallel_rank()
853
+
854
+ for weight_name, weight_tensor in self._hf_weight_iter(
855
+ hf_weights_files, use_safetensors
856
+ ):
857
+
858
+ if any(
859
+ target_module in weight_name for target_module in self.target_modules
860
+ ) and weight_name.endswith(".weight"):
861
+ weight_name = weight_name.replace(".weight", ".qweight")
862
+
863
+ if any(
864
+ module in weight_name
865
+ for module in self.column_parallel_weights_modules
866
+ ):
867
+
868
+ total_size = weight_tensor.size(-1)
869
+ start_index = total_size // tp_size * tp_rank
870
+ end_index = total_size // tp_size * (tp_rank + 1)
871
+ weight_sub_tensor = weight_tensor[..., start_index:end_index]
872
+
873
+ else:
874
+ total_size = weight_tensor.size(0)
875
+ start_index = total_size // tp_size * tp_rank
876
+ end_index = total_size // tp_size * (tp_rank + 1)
877
+ weight_sub_tensor = weight_tensor[start_index:end_index, ...]
878
+
879
+ # bitsandbytes requires data in GPU
880
+ if weight_sub_tensor.is_cuda:
881
+ loaded_weight = weight_sub_tensor
882
+ else:
883
+ loaded_weight = weight_sub_tensor.cuda()
884
+
885
+ # remove the following after the issue is fixed:
886
+ # https://github.com/bitsandbytes-foundation/bitsandbytes/issues/1342
887
+ if loaded_weight.is_contiguous() is False:
888
+ loaded_weight = loaded_weight.contiguous()
889
+
890
+ with set_default_torch_dtype(torch.float32):
891
+ processed_weight, quant_state = quantize_4bit(
892
+ loaded_weight, compress_statistics=True, quant_type="nf4"
893
+ )
894
+
895
+ quant_state_dict[weight_name] = quant_state
896
+ else:
897
+ processed_weight = weight_tensor
898
+
899
+ yield weight_name, processed_weight
900
+
901
+ def _load_weights(self, model_config: ModelConfig, model: nn.Module) -> None:
902
+ if not hasattr(model, "load_weights"):
903
+ raise AttributeError(
904
+ "The required method 'load_weights' is not defined in class"
905
+ f" {type(model).__name__}."
906
+ )
907
+
908
+ if not hasattr(model, "bitsandbytes_stacked_params_mapping"):
909
+ raise AttributeError(
910
+ f"Model {type(model).__name__} does not support BitsAndBytes "
911
+ "quantization yet."
912
+ )
913
+
914
+ if len(self.target_modules) == 0:
915
+ if hasattr(model, "default_bitsandbytes_target_modules"):
916
+ self.target_modules = model.default_bitsandbytes_target_modules
917
+ else:
918
+ self.target_modules = self.default_target_modules
919
+
920
+ if hasattr(model, "column_parallel_weights_modules"):
921
+ self.column_parallel_weights_modules = model.column_parallel_weights_modules
922
+ else:
923
+ self.column_parallel_weights_modules = []
924
+
925
+ self.model_type = type(model).__name__
926
+
927
+ logger.info(
928
+ "Loading weights with BitsAndBytes quantization. " " May take a while ..."
929
+ )
930
+
931
+ quant_config = getattr(model_config.hf_config, "quantization_config", None)
932
+
933
+ pre_quant = False
934
+ if quant_config is not None:
935
+ quant_method = quant_config.get("quant_method")
936
+ if quant_method == "bitsandbytes":
937
+ pre_quant = True
938
+ else:
939
+ raise ValueError(
940
+ f"BitsAndBytes loader does not support {quant_method} "
941
+ "quantization"
942
+ )
943
+
944
+ # The quant_states in pre_quantized models cannot work with a split
945
+ # weight tensor. So TP does not work with pre_quantized bnb models.
946
+ if pre_quant and get_tensor_model_parallel_world_size() > 1:
947
+ raise ValueError(
948
+ "Prequant BitsAndBytes models with TP is not supported."
949
+ "Please try with PP."
950
+ )
951
+
952
+ load_8bit = False
953
+ if pre_quant:
954
+ load_8bit = quant_config.get("load_in_8bit", False)
955
+
956
+ qweight_iterator, quant_state_dict = self._get_quantized_weights_iterator(
957
+ model_config.model_path, model_config.revision, pre_quant, load_8bit
958
+ )
959
+
960
+ model.load_weights(qweight_iterator)
961
+
962
+ torch.cuda.empty_cache()
963
+
964
+ param_dict = dict(model.named_parameters())
965
+ stacked_quant_state_dict: Dict[str, Dict[int, Any]] = {}
966
+ for quant_param_name in quant_state_dict:
967
+ non_stacked_param_name = quant_param_name
968
+
969
+ shard_index = 0
970
+ for shard_name, (
971
+ weight_name,
972
+ index,
973
+ ) in model.bitsandbytes_stacked_params_mapping.items():
974
+ if shard_name in quant_param_name:
975
+ shard_index = index
976
+ quant_param_name = quant_param_name.replace(shard_name, weight_name)
977
+ break
978
+
979
+ if quant_param_name not in param_dict:
980
+ raise ValueError(
981
+ f"Parameter {quant_param_name} not found in the model."
982
+ )
983
+
984
+ if quant_param_name not in stacked_quant_state_dict:
985
+ stacked_quant_state_dict[quant_param_name] = {}
986
+
987
+ stacked_quant_state_dict[quant_param_name][shard_index] = quant_state_dict[
988
+ non_stacked_param_name
989
+ ]
990
+
991
+ # save quant_states and offsets as the attributes of the parameters
992
+ for param_name, param in param_dict.items():
993
+ if param_name in stacked_quant_state_dict:
994
+ quant_states = stacked_quant_state_dict[param_name]
995
+ set_weight_attrs(param, {"bnb_quant_state": quant_states})
996
+
997
+ pack_ratio = getattr(param, "pack_factor", -1)
998
+ if pack_ratio == -1:
999
+ raise ValueError(f"pack_factor not set for parameter {param_name}.")
1000
+
1001
+ num_elements = [0] * len(quant_states)
1002
+ for seq, quant_state in quant_states.items():
1003
+ num_elements[seq] = math.prod(quant_state.shape) // pack_ratio
1004
+
1005
+ offsets = np.concatenate(([0], np.cumsum(num_elements)))
1006
+ set_weight_attrs(param, {"bnb_shard_offsets": offsets})
1007
+
1008
+ if load_8bit:
1009
+ set_weight_attrs(
1010
+ param, {"matmul_state": [None] * len(quant_states)}
1011
+ )
1012
+
1013
+ def download_model(self, model_config: ModelConfig) -> None:
1014
+ self._prepare_weights(model_config.model_path, model_config.revision)
1015
+
1016
+ def load_model(
1017
+ self,
1018
+ *,
1019
+ model_config: ModelConfig,
1020
+ device_config: DeviceConfig,
1021
+ ) -> nn.Module:
1022
+ with set_default_torch_dtype(model_config.dtype):
1023
+ with torch.device(device_config.device):
1024
+ model = _initialize_model(
1025
+ model_config,
1026
+ self.load_config,
1027
+ )
1028
+
1029
+ self._load_weights(model_config, model)
1030
+
1031
+ return model.eval()
1032
+
1033
+
1034
+ class GGUFModelLoader(BaseModelLoader):
1035
+ """
1036
+ Model loader that can load GGUF files. This is useful for loading models
1037
+ that are quantized with GGUF and saved in the GGUF format. This loader
1038
+ supports loading both full models and sharded models.
1039
+ """
1040
+
1041
+ def __init__(self, load_config: LoadConfig):
1042
+ super().__init__(load_config)
1043
+ if load_config.model_loader_extra_config:
1044
+ raise ValueError(
1045
+ f"Model loader extra config is not supported for "
1046
+ f"load format {load_config.load_format}"
1047
+ )
1048
+
1049
+ def _prepare_weights(self, model_name_or_path: str):
1050
+ if os.path.isfile(model_name_or_path):
1051
+ return model_name_or_path
1052
+ else:
1053
+ raise ValueError(f"{model_name_or_path} is not a file.")
1054
+
1055
+ def _get_gguf_weights_map(self, model_config: ModelConfig):
1056
+ """
1057
+ GGUF uses this naming convention for their tensors from HF checkpoint:
1058
+ `blk.N.BB.weight` and `blk.N.BB.bias`
1059
+ where N signifies the block number of a layer, and BB signifies the
1060
+ attention/mlp layer components.
1061
+ See "Standardized tensor names" in
1062
+ https://github.com/ggerganov/ggml/blob/master/docs/gguf.md for details.
1063
+ """
1064
+ config = model_config.hf_config
1065
+ model_type = config.model_type
1066
+ # hack: ggufs have a different name than transformers
1067
+ if model_type == "cohere":
1068
+ model_type = "command-r"
1069
+ arch = None
1070
+ for key, value in gguf.MODEL_ARCH_NAMES.items():
1071
+ if value == model_type:
1072
+ arch = key
1073
+ break
1074
+ if arch is None:
1075
+ raise RuntimeError(f"Unknown gguf model_type: {model_type}")
1076
+ num_layers = config.num_hidden_layers
1077
+ name_map = gguf.get_tensor_name_map(arch, num_layers)
1078
+ with torch.device("meta"):
1079
+ dummy_model = AutoModelForCausalLM.from_config(config)
1080
+ state_dict = dummy_model.state_dict()
1081
+
1082
+ gguf_to_hf_name_map = {}
1083
+ for hf_name in state_dict:
1084
+ name, suffix = hf_name.rsplit(".", 1)
1085
+ gguf_name = name_map.get_name(name)
1086
+ gguf_to_hf_name_map[f"{gguf_name}.{suffix}"] = hf_name
1087
+ return gguf_to_hf_name_map
1088
+
1089
+ def _get_weights_iterator(
1090
+ self, model_name_or_path: str, gguf_to_hf_name_map: Dict[str, str]
1091
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
1092
+ return gguf_quant_weights_iterator(model_name_or_path, gguf_to_hf_name_map)
1093
+
1094
+ def download_model(self, model_config: ModelConfig) -> None:
1095
+ self._prepare_weights(model_config.model_path)
1096
+
1097
+ def load_model(
1098
+ self,
1099
+ *,
1100
+ model_config: ModelConfig,
1101
+ device_config: DeviceConfig,
1102
+ ) -> nn.Module:
1103
+
1104
+ local_model_path = self._prepare_weights(model_config.model_path)
1105
+ gguf_weights_map = self._get_gguf_weights_map(model_config)
1106
+ # we can only know if tie word embeddings after mapping weights
1107
+ if "lm_head.weight" in get_gguf_extra_tensor_names(
1108
+ local_model_path, gguf_weights_map
1109
+ ):
1110
+ model_config.hf_config.update({"tie_word_embeddings": True})
1111
+
1112
+ with set_default_torch_dtype(model_config.dtype):
1113
+ with torch.device(device_config.device):
1114
+ model = _initialize_model(model_config, self.load_config)
1115
+ model.load_weights(
1116
+ self._get_weights_iterator(local_model_path, gguf_weights_map)
1117
+ )
1118
+ return model
1119
+
1120
+
1121
+ def get_model_loader(load_config: LoadConfig) -> BaseModelLoader:
1122
+ """Get a model loader based on the load format."""
1123
+
1124
+ if isinstance(load_config.load_format, type):
1125
+ return load_config.load_format(load_config)
1126
+
1127
+ if load_config.load_format == LoadFormat.DUMMY:
1128
+ return DummyModelLoader(load_config)
1129
+
1130
+ if load_config.load_format == LoadFormat.SHARDED_STATE:
1131
+ return ShardedStateLoader(load_config)
1132
+
1133
+ if load_config.load_format == LoadFormat.BITSANDBYTES:
1134
+ return BitsAndBytesModelLoader(load_config)
1135
+
1136
+ if load_config.load_format == LoadFormat.GGUF:
1137
+ return GGUFModelLoader(load_config)
1138
+
1139
+ return DefaultModelLoader(load_config)