sglang 0.3.6.post2__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. sglang/bench_offline_throughput.py +55 -2
  2. sglang/bench_one_batch.py +7 -6
  3. sglang/bench_one_batch_server.py +4 -3
  4. sglang/bench_serving.py +13 -0
  5. sglang/check_env.py +1 -1
  6. sglang/launch_server.py +3 -2
  7. sglang/srt/_custom_ops.py +118 -0
  8. sglang/srt/configs/device_config.py +17 -0
  9. sglang/srt/configs/load_config.py +84 -0
  10. sglang/srt/configs/model_config.py +161 -4
  11. sglang/srt/configs/qwen2vl.py +5 -8
  12. sglang/srt/constrained/outlines_backend.py +6 -1
  13. sglang/srt/constrained/outlines_jump_forward.py +8 -1
  14. sglang/srt/distributed/__init__.py +3 -0
  15. sglang/srt/distributed/communication_op.py +34 -0
  16. sglang/srt/distributed/device_communicators/__init__.py +0 -0
  17. sglang/srt/distributed/device_communicators/cuda_wrapper.py +182 -0
  18. sglang/srt/distributed/device_communicators/custom_all_reduce.py +352 -0
  19. sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +291 -0
  20. sglang/srt/distributed/device_communicators/hpu_communicator.py +48 -0
  21. sglang/srt/distributed/device_communicators/pynccl.py +204 -0
  22. sglang/srt/distributed/device_communicators/pynccl_wrapper.py +362 -0
  23. sglang/srt/distributed/device_communicators/shm_broadcast.py +568 -0
  24. sglang/srt/distributed/device_communicators/xpu_communicator.py +47 -0
  25. sglang/srt/distributed/parallel_state.py +1275 -0
  26. sglang/srt/distributed/utils.py +223 -0
  27. sglang/srt/hf_transformers_utils.py +37 -1
  28. sglang/srt/layers/attention/flashinfer_backend.py +13 -15
  29. sglang/srt/layers/attention/torch_native_backend.py +285 -0
  30. sglang/srt/layers/fused_moe_patch.py +20 -11
  31. sglang/srt/layers/linear.py +1 -0
  32. sglang/srt/layers/logits_processor.py +17 -3
  33. sglang/srt/layers/quantization/__init__.py +34 -0
  34. sglang/srt/layers/vocab_parallel_embedding.py +1 -0
  35. sglang/srt/lora/lora.py +1 -1
  36. sglang/srt/managers/data_parallel_controller.py +7 -11
  37. sglang/srt/managers/detokenizer_manager.py +7 -4
  38. sglang/srt/managers/image_processor.py +1 -1
  39. sglang/srt/managers/io_struct.py +48 -12
  40. sglang/srt/managers/schedule_batch.py +42 -36
  41. sglang/srt/managers/schedule_policy.py +7 -4
  42. sglang/srt/managers/scheduler.py +111 -46
  43. sglang/srt/managers/session_controller.py +0 -3
  44. sglang/srt/managers/tokenizer_manager.py +169 -100
  45. sglang/srt/managers/tp_worker.py +36 -3
  46. sglang/srt/managers/tp_worker_overlap_thread.py +32 -5
  47. sglang/srt/model_executor/cuda_graph_runner.py +16 -7
  48. sglang/srt/model_executor/forward_batch_info.py +9 -4
  49. sglang/srt/model_executor/model_runner.py +136 -150
  50. sglang/srt/model_loader/__init__.py +34 -0
  51. sglang/srt/model_loader/loader.py +1139 -0
  52. sglang/srt/model_loader/utils.py +41 -0
  53. sglang/srt/model_loader/weight_utils.py +640 -0
  54. sglang/srt/models/baichuan.py +9 -10
  55. sglang/srt/models/chatglm.py +6 -15
  56. sglang/srt/models/commandr.py +2 -3
  57. sglang/srt/models/dbrx.py +2 -3
  58. sglang/srt/models/deepseek.py +4 -11
  59. sglang/srt/models/deepseek_v2.py +3 -11
  60. sglang/srt/models/exaone.py +2 -3
  61. sglang/srt/models/gemma.py +2 -6
  62. sglang/srt/models/gemma2.py +3 -14
  63. sglang/srt/models/gemma2_reward.py +0 -1
  64. sglang/srt/models/gpt2.py +5 -12
  65. sglang/srt/models/gpt_bigcode.py +6 -22
  66. sglang/srt/models/grok.py +14 -51
  67. sglang/srt/models/internlm2.py +2 -3
  68. sglang/srt/models/internlm2_reward.py +0 -1
  69. sglang/srt/models/llama.py +97 -27
  70. sglang/srt/models/llama_classification.py +1 -2
  71. sglang/srt/models/llama_embedding.py +1 -2
  72. sglang/srt/models/llama_reward.py +2 -3
  73. sglang/srt/models/llava.py +10 -12
  74. sglang/srt/models/llavavid.py +1 -2
  75. sglang/srt/models/minicpm.py +4 -7
  76. sglang/srt/models/minicpm3.py +6 -19
  77. sglang/srt/models/mixtral.py +12 -5
  78. sglang/srt/models/mixtral_quant.py +2 -3
  79. sglang/srt/models/mllama.py +3 -7
  80. sglang/srt/models/olmo.py +2 -8
  81. sglang/srt/models/olmo2.py +391 -0
  82. sglang/srt/models/olmoe.py +3 -5
  83. sglang/srt/models/phi3_small.py +8 -8
  84. sglang/srt/models/qwen.py +2 -3
  85. sglang/srt/models/qwen2.py +10 -9
  86. sglang/srt/models/qwen2_moe.py +4 -11
  87. sglang/srt/models/qwen2_vl.py +12 -9
  88. sglang/srt/models/registry.py +99 -0
  89. sglang/srt/models/stablelm.py +2 -3
  90. sglang/srt/models/torch_native_llama.py +6 -12
  91. sglang/srt/models/xverse.py +2 -4
  92. sglang/srt/models/xverse_moe.py +4 -11
  93. sglang/srt/models/yivl.py +2 -3
  94. sglang/srt/openai_api/adapter.py +10 -6
  95. sglang/srt/openai_api/protocol.py +1 -0
  96. sglang/srt/server.py +303 -204
  97. sglang/srt/server_args.py +65 -31
  98. sglang/srt/utils.py +253 -48
  99. sglang/test/test_utils.py +27 -7
  100. sglang/utils.py +2 -2
  101. sglang/version.py +1 -1
  102. {sglang-0.3.6.post2.dist-info → sglang-0.4.0.dist-info}/METADATA +2 -1
  103. sglang-0.4.0.dist-info/RECORD +184 -0
  104. sglang/srt/layers/fused_moe_grok/__init__.py +0 -1
  105. sglang/srt/layers/fused_moe_grok/fused_moe.py +0 -692
  106. sglang/srt/layers/fused_moe_grok/layer.py +0 -630
  107. sglang-0.3.6.post2.dist-info/RECORD +0 -164
  108. {sglang-0.3.6.post2.dist-info → sglang-0.4.0.dist-info}/LICENSE +0 -0
  109. {sglang-0.3.6.post2.dist-info → sglang-0.4.0.dist-info}/WHEEL +0 -0
  110. {sglang-0.3.6.post2.dist-info → sglang-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,41 @@
1
+ # Adapted from https://github.com/vllm-project/vllm/blob/v0.6.4.post1/vllm/model_executor/model_loader/utils.py
2
+
3
+ """Utilities for selecting and loading models."""
4
+ import contextlib
5
+ from typing import Tuple, Type
6
+
7
+ import torch
8
+ from torch import nn
9
+
10
+ from sglang.srt.configs.model_config import ModelConfig
11
+
12
+
13
+ @contextlib.contextmanager
14
+ def set_default_torch_dtype(dtype: torch.dtype):
15
+ """Sets the default torch dtype to the given dtype."""
16
+ old_dtype = torch.get_default_dtype()
17
+ torch.set_default_dtype(dtype)
18
+ yield
19
+ torch.set_default_dtype(old_dtype)
20
+
21
+
22
+ def get_model_architecture(model_config: ModelConfig) -> Tuple[Type[nn.Module], str]:
23
+ from sglang.srt.models.registry import ModelRegistry
24
+
25
+ architectures = getattr(model_config.hf_config, "architectures", [])
26
+ # Special handling for quantized Mixtral.
27
+ # FIXME(woosuk): This is a temporary hack.
28
+ mixtral_supported = ["fp8", "compressed-tensors", "gptq_marlin", "awq_marlin"]
29
+
30
+ if (
31
+ model_config.quantization is not None
32
+ and model_config.quantization not in mixtral_supported
33
+ and "MixtralForCausalLM" in architectures
34
+ ):
35
+ architectures = ["QuantMixtralForCausalLM"]
36
+
37
+ return ModelRegistry.resolve_model_cls(architectures)
38
+
39
+
40
+ def get_architecture_class_name(model_config: ModelConfig) -> str:
41
+ return get_model_architecture(model_config)[1]
@@ -0,0 +1,640 @@
1
+ # Adapted from https://github.com/vllm-project/vllm/blob/v0.6.4.post1/vllm/model_executor/model_loader/weight_utils.py
2
+
3
+ """Utilities for downloading and initializing model weights."""
4
+ import fnmatch
5
+ import glob
6
+ import hashlib
7
+ import json
8
+ import logging
9
+ import os
10
+ import tempfile
11
+ from collections import defaultdict
12
+ from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
13
+
14
+ import filelock
15
+ import gguf
16
+ import huggingface_hub.constants
17
+ import numpy as np
18
+ import torch
19
+ from huggingface_hub import HfFileSystem, hf_hub_download, snapshot_download
20
+ from safetensors.torch import load_file, safe_open, save_file
21
+ from tqdm.auto import tqdm
22
+ from vllm.distributed import get_tensor_model_parallel_rank
23
+
24
+ from sglang.srt.configs.load_config import LoadConfig
25
+ from sglang.srt.configs.model_config import ModelConfig
26
+ from sglang.srt.layers.quantization import QuantizationConfig, get_quantization_config
27
+ from sglang.srt.utils import print_warning_once
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+ # use system-level temp directory for file locks, so that multiple users
32
+ # can share the same lock without error.
33
+ # lock files in the temp directory will be automatically deleted when the
34
+ # system reboots, so users will not complain about annoying lock files
35
+ temp_dir = tempfile.gettempdir()
36
+
37
+
38
+ def enable_hf_transfer():
39
+ """automatically activates hf_transfer"""
40
+ if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ:
41
+ try:
42
+ # enable hf hub transfer if available
43
+ import hf_transfer # type: ignore # noqa
44
+
45
+ huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True
46
+ except ImportError:
47
+ pass
48
+
49
+
50
+ enable_hf_transfer()
51
+
52
+
53
+ class DisabledTqdm(tqdm):
54
+
55
+ def __init__(self, *args, **kwargs):
56
+ super().__init__(*args, **kwargs, disable=True)
57
+
58
+
59
+ def get_lock(model_name_or_path: str, cache_dir: Optional[str] = None):
60
+ lock_dir = cache_dir or temp_dir
61
+ os.makedirs(os.path.dirname(lock_dir), exist_ok=True)
62
+ model_name = model_name_or_path.replace("/", "-")
63
+ hash_name = hashlib.sha256(model_name.encode()).hexdigest()
64
+ # add hash to avoid conflict with old users' lock files
65
+ lock_file_name = hash_name + model_name + ".lock"
66
+ # mode 0o666 is required for the filelock to be shared across users
67
+ lock = filelock.FileLock(os.path.join(lock_dir, lock_file_name), mode=0o666)
68
+ return lock
69
+
70
+
71
+ def _shared_pointers(tensors):
72
+ ptrs = defaultdict(list)
73
+ for k, v in tensors.items():
74
+ ptrs[v.data_ptr()].append(k)
75
+ failing = []
76
+ for _, names in ptrs.items():
77
+ if len(names) > 1:
78
+ failing.append(names)
79
+ return failing
80
+
81
+
82
+ def convert_bin_to_safetensor_file(
83
+ pt_filename: str,
84
+ sf_filename: str,
85
+ ) -> None:
86
+ loaded = torch.load(pt_filename, map_location="cpu")
87
+ if "state_dict" in loaded:
88
+ loaded = loaded["state_dict"]
89
+ shared = _shared_pointers(loaded)
90
+ for shared_weights in shared:
91
+ for name in shared_weights[1:]:
92
+ loaded.pop(name)
93
+
94
+ # For tensors to be contiguous
95
+ loaded = {k: v.contiguous() for k, v in loaded.items()}
96
+
97
+ dirname = os.path.dirname(sf_filename)
98
+ os.makedirs(dirname, exist_ok=True)
99
+ save_file(loaded, sf_filename, metadata={"format": "pt"})
100
+
101
+ # check file size
102
+ sf_size = os.stat(sf_filename).st_size
103
+ pt_size = os.stat(pt_filename).st_size
104
+ if (sf_size - pt_size) / pt_size > 0.01:
105
+ raise RuntimeError(
106
+ f"""The file size different is more than 1%:
107
+ - {sf_filename}: {sf_size}
108
+ - {pt_filename}: {pt_size}
109
+ """
110
+ )
111
+
112
+ # check if the tensors are the same
113
+ reloaded = load_file(sf_filename)
114
+ for k in loaded:
115
+ pt_tensor = loaded[k]
116
+ sf_tensor = reloaded[k]
117
+ if not torch.equal(pt_tensor, sf_tensor):
118
+ raise RuntimeError(f"The output tensors do not match for key {k}")
119
+
120
+
121
+ # TODO(woosuk): Move this to other place.
122
+ def get_quant_config(
123
+ model_config: ModelConfig, load_config: LoadConfig
124
+ ) -> QuantizationConfig:
125
+
126
+ quant_cls = get_quantization_config(model_config.quantization)
127
+
128
+ # GGUF doesn't have config file
129
+ if model_config.quantization == "gguf":
130
+ return quant_cls.from_config({})
131
+
132
+ # Read the quantization config from the HF model config, if available.
133
+ hf_quant_config = getattr(model_config.hf_config, "quantization_config", None)
134
+ # some vision model may keep quantization_config in their text_config
135
+ hf_text_config = getattr(model_config.hf_config, "text_config", None)
136
+ if hf_quant_config is None and hf_text_config is not None:
137
+ hf_quant_config = getattr(hf_text_config, "quantization_config", None)
138
+ if hf_quant_config is None:
139
+ # compressed-tensors uses a compressions_config
140
+ hf_quant_config = getattr(model_config.hf_config, "compression_config", None)
141
+ if hf_quant_config is not None:
142
+ return quant_cls.from_config(hf_quant_config)
143
+ # In case of bitsandbytes/QLoRA, get quant config from the adapter model.
144
+ if model_config.quantization == "bitsandbytes":
145
+ if (
146
+ not load_config.model_loader_extra_config
147
+ or "qlora_adapter_name_or_path" not in load_config.model_loader_extra_config
148
+ ):
149
+ return quant_cls.from_config({"adapter_name_or_path": ""})
150
+ model_name_or_path = load_config.model_loader_extra_config[
151
+ "qlora_adapter_name_or_path"
152
+ ]
153
+
154
+ else:
155
+ model_name_or_path = model_config.model_path
156
+ is_local = os.path.isdir(model_name_or_path)
157
+ if not is_local:
158
+ # Download the config files.
159
+ with get_lock(model_name_or_path, load_config.download_dir):
160
+ hf_folder = snapshot_download(
161
+ model_name_or_path,
162
+ revision=model_config.revision,
163
+ allow_patterns="*.json",
164
+ cache_dir=load_config.download_dir,
165
+ local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
166
+ tqdm_class=DisabledTqdm,
167
+ )
168
+ else:
169
+ hf_folder = model_name_or_path
170
+
171
+ possible_config_filenames = quant_cls.get_config_filenames()
172
+
173
+ # If the quantization config is not found, use the default config.
174
+ if not possible_config_filenames:
175
+ return quant_cls()
176
+
177
+ config_files = glob.glob(os.path.join(hf_folder, "*.json"))
178
+
179
+ quant_config_files = [
180
+ f for f in config_files if any(f.endswith(x) for x in possible_config_filenames)
181
+ ]
182
+ if len(quant_config_files) == 0:
183
+ raise ValueError(f"Cannot find the config file for {model_config.quantization}")
184
+ if len(quant_config_files) > 1:
185
+ raise ValueError(
186
+ f"Found multiple config files for {model_config.quantization}: "
187
+ f"{quant_config_files}"
188
+ )
189
+
190
+ quant_config_file = quant_config_files[0]
191
+ with open(quant_config_file) as f:
192
+ config = json.load(f)
193
+
194
+ if model_config.quantization == "bitsandbytes":
195
+ config["adapter_name_or_path"] = model_name_or_path
196
+ elif model_config.quantization == "modelopt":
197
+ if config["producer"]["name"] == "modelopt":
198
+ return quant_cls.from_config(config)
199
+ else:
200
+ raise ValueError(
201
+ f"Unsupported quantization config"
202
+ f" found for {model_config.quantization} in {f}."
203
+ )
204
+
205
+ return quant_cls.from_config(config)
206
+
207
+
208
+ def download_weights_from_hf(
209
+ model_name_or_path: str,
210
+ cache_dir: Optional[str],
211
+ allow_patterns: List[str],
212
+ revision: Optional[str] = None,
213
+ ignore_patterns: Optional[Union[str, List[str]]] = None,
214
+ ) -> str:
215
+ """Download model weights from Hugging Face Hub.
216
+
217
+ Args:
218
+ model_name_or_path (str): The model name or path.
219
+ cache_dir (Optional[str]): The cache directory to store the model
220
+ weights. If None, will use HF defaults.
221
+ allow_patterns (List[str]): The allowed patterns for the
222
+ weight files. Files matched by any of the patterns will be
223
+ downloaded.
224
+ revision (Optional[str]): The revision of the model.
225
+ ignore_patterns (Optional[Union[str, List[str]]]): The patterns to
226
+ filter out the weight files. Files matched by any of the patterns
227
+ will be ignored.
228
+
229
+ Returns:
230
+ str: The path to the downloaded model weights.
231
+ """
232
+ if not huggingface_hub.constants.HF_HUB_OFFLINE:
233
+ # Before we download we look at that is available:
234
+ fs = HfFileSystem()
235
+ file_list = fs.ls(model_name_or_path, detail=False, revision=revision)
236
+
237
+ # depending on what is available we download different things
238
+ for pattern in allow_patterns:
239
+ matching = fnmatch.filter(file_list, pattern)
240
+ if len(matching) > 0:
241
+ allow_patterns = [pattern]
242
+ break
243
+
244
+ logger.info("Using model weights format %s", allow_patterns)
245
+ # Use file lock to prevent multiple processes from
246
+ # downloading the same model weights at the same time.
247
+ with get_lock(model_name_or_path, cache_dir):
248
+ hf_folder = snapshot_download(
249
+ model_name_or_path,
250
+ allow_patterns=allow_patterns,
251
+ ignore_patterns=ignore_patterns,
252
+ cache_dir=cache_dir,
253
+ tqdm_class=DisabledTqdm,
254
+ revision=revision,
255
+ local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
256
+ )
257
+ return hf_folder
258
+
259
+
260
+ def download_safetensors_index_file_from_hf(
261
+ model_name_or_path: str,
262
+ index_file: str,
263
+ cache_dir: Optional[str],
264
+ revision: Optional[str] = None,
265
+ ) -> None:
266
+ """Download hf safetensors index file from Hugging Face Hub.
267
+
268
+ Args:
269
+ model_name_or_path (str): The model name or path.
270
+ cache_dir (Optional[str]): The cache directory to store the model
271
+ weights. If None, will use HF defaults.
272
+ revision (Optional[str]): The revision of the model.
273
+ """
274
+ # Use file lock to prevent multiple processes from
275
+ # downloading the same model weights at the same time.
276
+ with get_lock(model_name_or_path, cache_dir):
277
+ try:
278
+ # Download the safetensors index file.
279
+ hf_hub_download(
280
+ repo_id=model_name_or_path,
281
+ filename=index_file,
282
+ cache_dir=cache_dir,
283
+ revision=revision,
284
+ local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
285
+ )
286
+ # If file not found on remote or locally, we should not fail since
287
+ # only some models will have index_file.
288
+ except huggingface_hub.utils.EntryNotFoundError:
289
+ logger.info("No %s found in remote.", index_file)
290
+ except huggingface_hub.utils.LocalEntryNotFoundError:
291
+ logger.info("No %s found in local cache.", index_file)
292
+
293
+
294
+ # For models like Mistral-7B-v0.3, there are both sharded
295
+ # safetensors files and a consolidated safetensors file.
296
+ # Passing both of these to the weight loader functionality breaks.
297
+ # So, we use the index_file to
298
+ # look up which safetensors files should be used.
299
+ def filter_duplicate_safetensors_files(
300
+ hf_weights_files: List[str], hf_folder: str, index_file: str
301
+ ) -> List[str]:
302
+ # model.safetensors.index.json is a mapping from keys in the
303
+ # torch state_dict to safetensors file holding that weight.
304
+ index_file_name = os.path.join(hf_folder, index_file)
305
+ if not os.path.isfile(index_file_name):
306
+ return hf_weights_files
307
+
308
+ # Iterate through the weight_map (weight_name: safetensors files)
309
+ # to identify weights that we should use.
310
+ with open(index_file_name) as f:
311
+ weight_map = json.load(f)["weight_map"]
312
+ weight_files_in_index = set()
313
+ for weight_name in weight_map:
314
+ weight_files_in_index.add(os.path.join(hf_folder, weight_map[weight_name]))
315
+ # Filter out any fields that are not found in the index file.
316
+ hf_weights_files = [f for f in hf_weights_files if f in weight_files_in_index]
317
+ return hf_weights_files
318
+
319
+
320
+ def filter_files_not_needed_for_inference(hf_weights_files: List[str]) -> List[str]:
321
+ """
322
+ Exclude files that are not needed for inference.
323
+
324
+ See https://github.com/huggingface/transformers/blob/v4.34.0/src/transformers/trainer.py#L227-L233
325
+ """
326
+ blacklist = [
327
+ "training_args.bin",
328
+ "optimizer.bin",
329
+ "optimizer.pt",
330
+ "scheduler.pt",
331
+ "scaler.pt",
332
+ ]
333
+ hf_weights_files = [
334
+ f for f in hf_weights_files if not any(f.endswith(x) for x in blacklist)
335
+ ]
336
+ return hf_weights_files
337
+
338
+
339
+ # explicitly use pure text format, with a newline at the end
340
+ # this makes it impossible to see the animation in the progress bar
341
+ # but will avoid messing up with ray or multiprocessing, which wraps
342
+ # each line of output with some prefix.
343
+ _BAR_FORMAT = "{desc}: {percentage:3.0f}% Completed | {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]\n" # noqa: E501
344
+
345
+
346
+ def np_cache_weights_iterator(
347
+ model_name_or_path: str,
348
+ cache_dir: Optional[str],
349
+ hf_folder: str,
350
+ hf_weights_files: List[str],
351
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
352
+ """Iterate over the weights in the model np files.
353
+
354
+ Will dump the model weights to numpy files if they are not already dumped.
355
+ """
356
+ enable_tqdm = (
357
+ not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
358
+ )
359
+ # Convert the model weights from torch tensors to numpy arrays for
360
+ # faster loading.
361
+ np_folder = os.path.join(hf_folder, "np")
362
+ os.makedirs(np_folder, exist_ok=True)
363
+ weight_names_file = os.path.join(np_folder, "weight_names.json")
364
+ # Use file lock to prevent multiple processes from
365
+ # dumping the same model weights to numpy at the same time.
366
+ with get_lock(model_name_or_path, cache_dir):
367
+ if not os.path.exists(weight_names_file):
368
+ weight_names: List[str] = []
369
+ for bin_file in tqdm(
370
+ hf_weights_files,
371
+ desc="Loading np_cache checkpoint shards",
372
+ disable=not enable_tqdm,
373
+ bar_format=_BAR_FORMAT,
374
+ ):
375
+ state = torch.load(bin_file, map_location="cpu")
376
+ for name, param in state.items():
377
+ param_path = os.path.join(np_folder, name)
378
+ with open(param_path, "wb") as f:
379
+ np.save(f, param.cpu().detach().numpy())
380
+ weight_names.append(name)
381
+ with open(weight_names_file, "w") as f:
382
+ json.dump(weight_names, f)
383
+
384
+ with open(weight_names_file) as f:
385
+ weight_names = json.load(f)
386
+
387
+ for name in weight_names:
388
+ param_path = os.path.join(np_folder, name)
389
+ with open(param_path, "rb") as f:
390
+ param = np.load(f)
391
+ yield name, torch.from_numpy(param)
392
+
393
+
394
+ def safetensors_weights_iterator(
395
+ hf_weights_files: List[str],
396
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
397
+ """Iterate over the weights in the model safetensor files."""
398
+ enable_tqdm = (
399
+ not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
400
+ )
401
+ for st_file in tqdm(
402
+ hf_weights_files,
403
+ desc="Loading safetensors checkpoint shards",
404
+ disable=not enable_tqdm,
405
+ bar_format=_BAR_FORMAT,
406
+ ):
407
+ with safe_open(st_file, framework="pt") as f:
408
+ for name in f.keys(): # noqa: SIM118
409
+ param = f.get_tensor(name)
410
+ yield name, param
411
+
412
+
413
+ def pt_weights_iterator(
414
+ hf_weights_files: List[str],
415
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
416
+ """Iterate over the weights in the model bin/pt files."""
417
+ enable_tqdm = (
418
+ not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
419
+ )
420
+ for bin_file in tqdm(
421
+ hf_weights_files,
422
+ desc="Loading pt checkpoint shards",
423
+ disable=not enable_tqdm,
424
+ bar_format=_BAR_FORMAT,
425
+ ):
426
+ state = torch.load(bin_file, map_location="cpu")
427
+ yield from state.items()
428
+ del state
429
+ torch.cuda.empty_cache()
430
+
431
+
432
+ def get_gguf_extra_tensor_names(
433
+ gguf_file: str, gguf_to_hf_name_map: Dict[str, str]
434
+ ) -> List[str]:
435
+ reader = gguf.GGUFReader(gguf_file)
436
+ expected_gguf_keys = set(gguf_to_hf_name_map.keys())
437
+ exact_gguf_keys = set([tensor.name for tensor in reader.tensors])
438
+ extra_keys = expected_gguf_keys - exact_gguf_keys
439
+ return [gguf_to_hf_name_map[key] for key in extra_keys]
440
+
441
+
442
+ def gguf_quant_weights_iterator(
443
+ gguf_file: str, gguf_to_hf_name_map: Dict[str, str]
444
+ ) -> Generator[Tuple[str, torch.Tensor], None, None]:
445
+ """
446
+ Iterate over the quant weights in the model gguf files and convert
447
+ them to torch tensors
448
+ """
449
+
450
+ reader = gguf.GGUFReader(gguf_file)
451
+
452
+ for tensor in reader.tensors:
453
+ if tensor.name in gguf_to_hf_name_map:
454
+ weight_type = tensor.tensor_type
455
+ name = gguf_to_hf_name_map[tensor.name]
456
+
457
+ if weight_type.name != "F32":
458
+ weight_type_name = name.replace("weight", "qweight_type")
459
+ weight_type = torch.tensor(weight_type)
460
+ yield weight_type_name, weight_type
461
+
462
+ for tensor in reader.tensors:
463
+ if tensor.name in gguf_to_hf_name_map:
464
+ weight = tensor.data
465
+ weight_type = tensor.tensor_type
466
+ name = gguf_to_hf_name_map[tensor.name]
467
+
468
+ if weight_type.name != "F32":
469
+ name = name.replace("weight", "qweight")
470
+ param = torch.tensor(weight)
471
+ yield name, param
472
+
473
+
474
+ def convert_pyslice_to_tensor(x: Any) -> torch.Tensor:
475
+ """convert PySafeSlice object from safetensors to torch.Tensor
476
+
477
+ PySafeSlice object supports indexing, which is done before loading the
478
+ actual tensor and can reduce the amount of memory being read into the
479
+ memory. However, it does not support more advanced functionalities
480
+ like `.view()` or `.t()`. Therefore, if we need to modify the loaded
481
+ tensor with these more complicated operators, we need to convert to
482
+ tensor first.
483
+ """
484
+ if not isinstance(x, torch.Tensor):
485
+ x = x[:]
486
+ return x
487
+
488
+
489
+ def default_weight_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
490
+ """Default weight loader."""
491
+ try:
492
+ if param.numel() == 1 and loaded_weight.numel() == 1:
493
+ # Sometimes scalar values aren't considered tensors with shapes
494
+ # so if both param and loaded_weight are a scalar,
495
+ # "broadcast" instead of copy
496
+ param.data.fill_(loaded_weight.item())
497
+ else:
498
+ assert param.size() == loaded_weight.size(), (
499
+ f"Attempted to load weight ({loaded_weight.size()}) "
500
+ f"into parameter ({param.size()})"
501
+ )
502
+
503
+ param.data.copy_(loaded_weight)
504
+ except Exception:
505
+ # NOTE: This exception is added for the purpose of setting breakpoint to
506
+ # debug weight loading issues.
507
+ raise
508
+
509
+
510
+ def row_parallel_weight_loader(
511
+ param: torch.Tensor, loaded_weight: torch.Tensor
512
+ ) -> None:
513
+ """Load weights that are row-parallelized."""
514
+ tp_rank = get_tensor_model_parallel_rank()
515
+ shard_dim = 0 if param.dim() != 1 else None
516
+
517
+ if shard_dim is not None:
518
+ shard_size = param.data.shape[shard_dim]
519
+ start_idx = tp_rank * shard_size
520
+ loaded_weight = loaded_weight.narrow(shard_dim, start_idx, shard_size)
521
+
522
+ return default_weight_loader(param, loaded_weight)
523
+
524
+
525
+ LoaderFunction = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
526
+
527
+
528
+ def sharded_weight_loader(shard_axis: int) -> LoaderFunction:
529
+ """Create a weight loader that shards the weights along the given axis"""
530
+
531
+ def loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
532
+ tp_rank = get_tensor_model_parallel_rank()
533
+
534
+ shard_size = param.data.shape[shard_axis]
535
+ start_idx = tp_rank * shard_size
536
+ loaded_weight = loaded_weight.narrow(shard_axis, start_idx, shard_size)
537
+
538
+ return default_weight_loader(param, loaded_weight)
539
+
540
+ return loader
541
+
542
+
543
+ def composed_weight_loader(
544
+ loader: LoaderFunction, fn: Callable[[torch.Tensor], torch.Tensor]
545
+ ) -> LoaderFunction:
546
+ """Create a weight loader that post-processes the weights after loading"""
547
+
548
+ def composed_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
549
+ loader(param, loaded_weight)
550
+ param.data.copy_(fn(param))
551
+ return
552
+
553
+ return composed_loader
554
+
555
+
556
+ def initialize_dummy_weights(
557
+ model: torch.nn.Module,
558
+ low: float = -1e-3,
559
+ high: float = 1e-3,
560
+ seed: int = 1234,
561
+ ) -> None:
562
+ """Initialize model weights with random values.
563
+
564
+ The model weights must be randomly initialized for accurate performance
565
+ measurements. Additionally, the model weights should not cause NaNs in the
566
+ forward pass. We empirically found that initializing the weights with
567
+ values between -1e-3 and 1e-3 works well for most models.
568
+
569
+ We use per-parameter random seed, so that dummy weights are consistent,
570
+ even if the model is partitioned across multiple devices. When the seed
571
+ is fixed, the random values generated by this function only depends on
572
+ the parameter's number of elements and its data type.
573
+ """
574
+ for param in model.state_dict().values():
575
+ if torch.is_floating_point(param):
576
+ generator = torch.Generator(device=param.data.device)
577
+ generator.manual_seed(seed)
578
+ if torch.finfo(param.data.dtype).bits < 16:
579
+ # uniform_ doesn't support < 16-bit datatypes (FP8)
580
+ dtype = param.data.dtype
581
+ tmp_param = param.data.to(torch.float16)
582
+ tmp_param = tmp_param.uniform_(low, high, generator=generator).to(dtype)
583
+ param.data.copy_(tmp_param)
584
+ else:
585
+ param.uniform_(low, high, generator=generator)
586
+
587
+
588
+ def maybe_remap_kv_scale_name(name: str, params_dict: dict) -> Optional[str]:
589
+ """Remap the name of FP8 k/v_scale parameters.
590
+
591
+ This function handles the remapping of FP8 k/v_scale parameter names.
592
+ It detects if the given name ends with a suffix and attempts to remap
593
+ it to the expected name format in the model. If the remapped name is not
594
+ found in the params_dict, a warning is printed and None is returned.
595
+
596
+ Args:
597
+ name (str): The original loaded checkpoint parameter name.
598
+ params_dict (dict): Dictionary containing the model's named parameters.
599
+
600
+ Returns:
601
+ str: The remapped parameter name if successful, or the original name
602
+ if no remapping is needed.
603
+ None: If the remapped name is not found in params_dict.
604
+ """
605
+ if name.endswith(".kv_scale"):
606
+ print_warning_once(
607
+ "DEPRECATED. Found kv_scale in the checkpoint. "
608
+ "This format is deprecated in favor of separate k_scale and "
609
+ "v_scale tensors and will be removed in a future release. "
610
+ "Functionally, we will remap kv_scale to k_scale and duplicate "
611
+ "k_scale to v_scale"
612
+ )
613
+ # NOTE: we remap the deprecated kv_scale to k_scale
614
+ remapped_name = name.replace(".kv_scale", ".attn.k_scale")
615
+ if remapped_name not in params_dict:
616
+ print_warning_once(
617
+ f"Found kv_scale in the checkpoint (e.g. {name}), "
618
+ "but not found the expected name in the model "
619
+ f"(e.g. {remapped_name}). kv_scale is "
620
+ "not loaded."
621
+ )
622
+ return None
623
+ return remapped_name
624
+
625
+ possible_scale_names = [".k_scale", ".v_scale"]
626
+ for scale_name in possible_scale_names:
627
+ if name.endswith(scale_name):
628
+ remapped_name = name.replace(scale_name, f".attn{scale_name}")
629
+ if remapped_name not in params_dict:
630
+ print_warning_once(
631
+ f"Found {scale_name} in the checkpoint (e.g. {name}), "
632
+ "but not found the expected name in the model "
633
+ f"(e.g. {remapped_name}). {scale_name} is "
634
+ "not loaded."
635
+ )
636
+ return None
637
+ return remapped_name
638
+
639
+ # If there were no matches, return the untouched param name
640
+ return name