vllm-cpu 0.9.2.post2__cp311-cp311-manylinux_2_17_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1236) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +214 -0
  3. vllm/_custom_ops.py +1915 -0
  4. vllm/_ipex_ops.py +350 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +139 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +325 -0
  20. vllm/attention/backends/blocksparse_attn.py +465 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1506 -0
  23. vllm/attention/backends/flash_attn.py +1008 -0
  24. vllm/attention/backends/flashinfer.py +1107 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +318 -0
  27. vllm/attention/backends/ipex_attn.py +403 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1391 -0
  30. vllm/attention/backends/pallas.py +356 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +1015 -0
  34. vllm/attention/backends/torch_sdpa.py +707 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +807 -0
  38. vllm/attention/layer.py +481 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +903 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/pallas_kv_cache_update.py +120 -0
  52. vllm/attention/ops/prefix_prefill.py +902 -0
  53. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  54. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  55. vllm/attention/ops/triton_decode_attention.py +674 -0
  56. vllm/attention/ops/triton_flash_attention.py +984 -0
  57. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  58. vllm/attention/ops/triton_unified_attention.py +738 -0
  59. vllm/attention/selector.py +214 -0
  60. vllm/attention/utils/fa_utils.py +72 -0
  61. vllm/beam_search.py +87 -0
  62. vllm/benchmarks/__init__.py +0 -0
  63. vllm/benchmarks/datasets.py +1441 -0
  64. vllm/benchmarks/endpoint_request_func.py +393 -0
  65. vllm/benchmarks/latency.py +168 -0
  66. vllm/benchmarks/serve.py +1063 -0
  67. vllm/benchmarks/throughput.py +609 -0
  68. vllm/benchmarks/utils.py +70 -0
  69. vllm/collect_env.py +820 -0
  70. vllm/compilation/__init__.py +0 -0
  71. vllm/compilation/activation_quant_fusion.py +89 -0
  72. vllm/compilation/backends.py +610 -0
  73. vllm/compilation/base_piecewise_backend.py +72 -0
  74. vllm/compilation/collective_fusion.py +127 -0
  75. vllm/compilation/compiler_interface.py +564 -0
  76. vllm/compilation/counter.py +41 -0
  77. vllm/compilation/cuda_piecewise_backend.py +218 -0
  78. vllm/compilation/decorators.py +250 -0
  79. vllm/compilation/fix_functionalization.py +191 -0
  80. vllm/compilation/fusion.py +645 -0
  81. vllm/compilation/fusion_attn.py +166 -0
  82. vllm/compilation/fx_utils.py +84 -0
  83. vllm/compilation/inductor_pass.py +115 -0
  84. vllm/compilation/monitor.py +39 -0
  85. vllm/compilation/multi_output_match.py +109 -0
  86. vllm/compilation/noop_elimination.py +165 -0
  87. vllm/compilation/pass_manager.py +82 -0
  88. vllm/compilation/sequence_parallelism.py +482 -0
  89. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  90. vllm/compilation/vllm_inductor_pass.py +70 -0
  91. vllm/compilation/wrapper.py +135 -0
  92. vllm/config.py +4913 -0
  93. vllm/connections.py +174 -0
  94. vllm/core/__init__.py +0 -0
  95. vllm/core/block/__init__.py +0 -0
  96. vllm/core/block/block_table.py +399 -0
  97. vllm/core/block/common.py +371 -0
  98. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  99. vllm/core/block/interfaces.py +319 -0
  100. vllm/core/block/naive_block.py +466 -0
  101. vllm/core/block/prefix_caching_block.py +1135 -0
  102. vllm/core/block/utils.py +28 -0
  103. vllm/core/block_manager.py +525 -0
  104. vllm/core/evictor.py +157 -0
  105. vllm/core/interfaces.py +139 -0
  106. vllm/core/placeholder_block_space_manager.py +103 -0
  107. vllm/core/scheduler.py +2126 -0
  108. vllm/device_allocator/__init__.py +0 -0
  109. vllm/device_allocator/cumem.py +281 -0
  110. vllm/distributed/__init__.py +6 -0
  111. vllm/distributed/communication_op.py +41 -0
  112. vllm/distributed/device_communicators/__init__.py +0 -0
  113. vllm/distributed/device_communicators/all2all.py +264 -0
  114. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  115. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  116. vllm/distributed/device_communicators/cuda_communicator.py +194 -0
  117. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  118. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  119. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  120. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  121. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  122. vllm/distributed/device_communicators/pynccl.py +218 -0
  123. vllm/distributed/device_communicators/pynccl_wrapper.py +349 -0
  124. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  125. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  126. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  127. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  128. vllm/distributed/eplb/__init__.py +8 -0
  129. vllm/distributed/eplb/eplb_state.py +432 -0
  130. vllm/distributed/eplb/rebalance_algo.py +234 -0
  131. vllm/distributed/eplb/rebalance_execute.py +307 -0
  132. vllm/distributed/kv_events.py +356 -0
  133. vllm/distributed/kv_transfer/README.md +29 -0
  134. vllm/distributed/kv_transfer/__init__.py +12 -0
  135. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  137. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  138. vllm/distributed/kv_transfer/kv_connector/factory.py +133 -0
  139. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  140. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  141. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  142. vllm/distributed/kv_transfer/kv_connector/utils.py +109 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1103 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +485 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +533 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +265 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +389 -0
  153. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  154. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  155. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  156. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  158. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  159. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  160. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  161. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  162. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  163. vllm/distributed/parallel_state.py +1385 -0
  164. vllm/distributed/tpu_distributed_utils.py +178 -0
  165. vllm/distributed/utils.py +536 -0
  166. vllm/engine/__init__.py +0 -0
  167. vllm/engine/arg_utils.py +1801 -0
  168. vllm/engine/async_llm_engine.py +1200 -0
  169. vllm/engine/async_timeout.py +173 -0
  170. vllm/engine/llm_engine.py +2101 -0
  171. vllm/engine/metrics.py +629 -0
  172. vllm/engine/metrics_types.py +94 -0
  173. vllm/engine/multiprocessing/__init__.py +148 -0
  174. vllm/engine/multiprocessing/client.py +681 -0
  175. vllm/engine/multiprocessing/engine.py +460 -0
  176. vllm/engine/output_processor/__init__.py +0 -0
  177. vllm/engine/output_processor/interfaces.py +75 -0
  178. vllm/engine/output_processor/multi_step.py +216 -0
  179. vllm/engine/output_processor/single_step.py +145 -0
  180. vllm/engine/output_processor/stop_checker.py +131 -0
  181. vllm/engine/output_processor/util.py +28 -0
  182. vllm/engine/protocol.py +326 -0
  183. vllm/entrypoints/__init__.py +0 -0
  184. vllm/entrypoints/api_server.py +178 -0
  185. vllm/entrypoints/chat_utils.py +1278 -0
  186. vllm/entrypoints/cli/__init__.py +12 -0
  187. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  188. vllm/entrypoints/cli/benchmark/base.py +25 -0
  189. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  190. vllm/entrypoints/cli/benchmark/main.py +58 -0
  191. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  192. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  193. vllm/entrypoints/cli/collect_env.py +36 -0
  194. vllm/entrypoints/cli/main.py +71 -0
  195. vllm/entrypoints/cli/openai.py +201 -0
  196. vllm/entrypoints/cli/run_batch.py +69 -0
  197. vllm/entrypoints/cli/serve.py +265 -0
  198. vllm/entrypoints/cli/types.py +29 -0
  199. vllm/entrypoints/launcher.py +147 -0
  200. vllm/entrypoints/llm.py +1599 -0
  201. vllm/entrypoints/logger.py +50 -0
  202. vllm/entrypoints/openai/__init__.py +0 -0
  203. vllm/entrypoints/openai/api_server.py +1495 -0
  204. vllm/entrypoints/openai/cli_args.py +331 -0
  205. vllm/entrypoints/openai/logits_processors.py +90 -0
  206. vllm/entrypoints/openai/protocol.py +2096 -0
  207. vllm/entrypoints/openai/run_batch.py +473 -0
  208. vllm/entrypoints/openai/serving_chat.py +1258 -0
  209. vllm/entrypoints/openai/serving_classification.py +160 -0
  210. vllm/entrypoints/openai/serving_completion.py +618 -0
  211. vllm/entrypoints/openai/serving_embedding.py +201 -0
  212. vllm/entrypoints/openai/serving_engine.py +988 -0
  213. vllm/entrypoints/openai/serving_models.py +315 -0
  214. vllm/entrypoints/openai/serving_pooling.py +234 -0
  215. vllm/entrypoints/openai/serving_score.py +431 -0
  216. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  217. vllm/entrypoints/openai/serving_transcription.py +132 -0
  218. vllm/entrypoints/openai/speech_to_text.py +395 -0
  219. vllm/entrypoints/openai/tool_parsers/__init__.py +25 -0
  220. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  221. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  222. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  223. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  224. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  225. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  226. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  227. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  228. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  229. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +369 -0
  230. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  231. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  232. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  233. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  234. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +466 -0
  235. vllm/entrypoints/score_utils.py +50 -0
  236. vllm/entrypoints/ssl.py +75 -0
  237. vllm/entrypoints/utils.py +262 -0
  238. vllm/env_override.py +41 -0
  239. vllm/envs.py +1029 -0
  240. vllm/executor/__init__.py +0 -0
  241. vllm/executor/executor_base.py +401 -0
  242. vllm/executor/mp_distributed_executor.py +244 -0
  243. vllm/executor/msgspec_utils.py +30 -0
  244. vllm/executor/multiproc_worker_utils.py +313 -0
  245. vllm/executor/ray_distributed_executor.py +701 -0
  246. vllm/executor/ray_utils.py +399 -0
  247. vllm/executor/uniproc_executor.py +139 -0
  248. vllm/forward_context.py +185 -0
  249. vllm/inputs/__init__.py +41 -0
  250. vllm/inputs/data.py +331 -0
  251. vllm/inputs/parse.py +151 -0
  252. vllm/inputs/preprocess.py +924 -0
  253. vllm/inputs/registry.py +245 -0
  254. vllm/jsontree.py +80 -0
  255. vllm/logger.py +212 -0
  256. vllm/logging_utils/__init__.py +8 -0
  257. vllm/logging_utils/dump_input.py +81 -0
  258. vllm/logging_utils/formatter.py +18 -0
  259. vllm/logits_process.py +119 -0
  260. vllm/lora/__init__.py +0 -0
  261. vllm/lora/fully_sharded_layers.py +355 -0
  262. vllm/lora/layers.py +1285 -0
  263. vllm/lora/lora.py +199 -0
  264. vllm/lora/models.py +818 -0
  265. vllm/lora/ops/__init__.py +0 -0
  266. vllm/lora/ops/torch_ops/__init__.py +16 -0
  267. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  268. vllm/lora/ops/triton_ops/__init__.py +12 -0
  269. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  270. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  271. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  272. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  273. vllm/lora/ops/triton_ops/utils.py +120 -0
  274. vllm/lora/ops/xla_ops/__init__.py +7 -0
  275. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  276. vllm/lora/peft_helper.py +136 -0
  277. vllm/lora/punica_wrapper/__init__.py +10 -0
  278. vllm/lora/punica_wrapper/punica_base.py +485 -0
  279. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  280. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  281. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  284. vllm/lora/punica_wrapper/utils.py +164 -0
  285. vllm/lora/request.py +99 -0
  286. vllm/lora/resolver.py +85 -0
  287. vllm/lora/utils.py +240 -0
  288. vllm/lora/worker_manager.py +256 -0
  289. vllm/model_executor/__init__.py +16 -0
  290. vllm/model_executor/custom_op.py +208 -0
  291. vllm/model_executor/guided_decoding/__init__.py +181 -0
  292. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  293. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  294. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  295. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  296. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  297. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  298. vllm/model_executor/guided_decoding/utils.py +242 -0
  299. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  300. vllm/model_executor/layers/__init__.py +0 -0
  301. vllm/model_executor/layers/activation.py +420 -0
  302. vllm/model_executor/layers/fused_moe/__init__.py +78 -0
  303. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +298 -0
  304. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +140 -0
  305. vllm/model_executor/layers/fused_moe/config.py +456 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  475. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +215 -0
  476. vllm/model_executor/layers/fused_moe/cutlass_moe.py +645 -0
  477. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +250 -0
  478. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +231 -0
  479. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +183 -0
  480. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1021 -0
  481. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +234 -0
  482. vllm/model_executor/layers/fused_moe/fused_moe.py +1734 -0
  483. vllm/model_executor/layers/fused_moe/layer.py +1528 -0
  484. vllm/model_executor/layers/fused_moe/modular_kernel.py +598 -0
  485. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +224 -0
  486. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  487. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  488. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  489. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +233 -0
  490. vllm/model_executor/layers/fused_moe/prepare_finalize.py +66 -0
  491. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +429 -0
  492. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +136 -0
  493. vllm/model_executor/layers/fused_moe/utils.py +144 -0
  494. vllm/model_executor/layers/layernorm.py +287 -0
  495. vllm/model_executor/layers/lightning_attn.py +652 -0
  496. vllm/model_executor/layers/linear.py +1547 -0
  497. vllm/model_executor/layers/logits_processor.py +197 -0
  498. vllm/model_executor/layers/mamba/__init__.py +0 -0
  499. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  500. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  501. vllm/model_executor/layers/mamba/mamba_mixer2.py +731 -0
  502. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  503. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  504. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  505. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  506. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  507. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  508. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  509. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  510. vllm/model_executor/layers/pooler.py +473 -0
  511. vllm/model_executor/layers/quantization/__init__.py +160 -0
  512. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  513. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  514. vllm/model_executor/layers/quantization/awq.py +228 -0
  515. vllm/model_executor/layers/quantization/awq_marlin.py +523 -0
  516. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  517. vllm/model_executor/layers/quantization/base_config.py +164 -0
  518. vllm/model_executor/layers/quantization/bitblas.py +462 -0
  519. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  520. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  521. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +694 -0
  522. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1613 -0
  523. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  524. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  525. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  526. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  527. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  528. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +149 -0
  529. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  530. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  531. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  532. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  533. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  534. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  535. vllm/model_executor/layers/quantization/deepgemm.py +83 -0
  536. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  537. vllm/model_executor/layers/quantization/experts_int8.py +204 -0
  538. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  539. vllm/model_executor/layers/quantization/fp8.py +950 -0
  540. vllm/model_executor/layers/quantization/gguf.py +577 -0
  541. vllm/model_executor/layers/quantization/gptq.py +278 -0
  542. vllm/model_executor/layers/quantization/gptq_bitblas.py +446 -0
  543. vllm/model_executor/layers/quantization/gptq_marlin.py +679 -0
  544. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  545. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  546. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  547. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  548. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  549. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  550. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  551. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  552. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  553. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +132 -0
  554. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  555. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  556. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  557. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  558. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  559. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  560. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  561. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  562. vllm/model_executor/layers/quantization/marlin.py +263 -0
  563. vllm/model_executor/layers/quantization/modelopt.py +747 -0
  564. vllm/model_executor/layers/quantization/moe_wna16.py +457 -0
  565. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  566. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  567. vllm/model_executor/layers/quantization/qqq.py +275 -0
  568. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  569. vllm/model_executor/layers/quantization/quark/quark.py +437 -0
  570. vllm/model_executor/layers/quantization/quark/quark_moe.py +245 -0
  571. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  572. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  573. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  574. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +157 -0
  575. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  576. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  577. vllm/model_executor/layers/quantization/rtn.py +289 -0
  578. vllm/model_executor/layers/quantization/schema.py +86 -0
  579. vllm/model_executor/layers/quantization/torchao.py +212 -0
  580. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  581. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  582. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  583. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/fp8_utils.py +653 -0
  787. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  788. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  789. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  790. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  791. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  792. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  793. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  794. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  795. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  796. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  797. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  798. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +146 -0
  799. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  800. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  801. vllm/model_executor/layers/rejection_sampler.py +406 -0
  802. vllm/model_executor/layers/resampler.py +270 -0
  803. vllm/model_executor/layers/rotary_embedding.py +2025 -0
  804. vllm/model_executor/layers/sampler.py +1204 -0
  805. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  806. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  807. vllm/model_executor/layers/utils.py +116 -0
  808. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  809. vllm/model_executor/model_loader/__init__.py +77 -0
  810. vllm/model_executor/model_loader/base_loader.py +43 -0
  811. vllm/model_executor/model_loader/bitsandbytes_loader.py +613 -0
  812. vllm/model_executor/model_loader/default_loader.py +282 -0
  813. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  814. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  815. vllm/model_executor/model_loader/neuron.py +476 -0
  816. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  817. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  818. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  819. vllm/model_executor/model_loader/tensorizer.py +602 -0
  820. vllm/model_executor/model_loader/tensorizer_loader.py +127 -0
  821. vllm/model_executor/model_loader/tpu.py +113 -0
  822. vllm/model_executor/model_loader/utils.py +315 -0
  823. vllm/model_executor/model_loader/weight_utils.py +782 -0
  824. vllm/model_executor/models/__init__.py +30 -0
  825. vllm/model_executor/models/adapters.py +375 -0
  826. vllm/model_executor/models/aimv2.py +246 -0
  827. vllm/model_executor/models/arctic.py +559 -0
  828. vllm/model_executor/models/aria.py +670 -0
  829. vllm/model_executor/models/aya_vision.py +486 -0
  830. vllm/model_executor/models/baichuan.py +474 -0
  831. vllm/model_executor/models/bamba.py +558 -0
  832. vllm/model_executor/models/bart.py +938 -0
  833. vllm/model_executor/models/bert.py +513 -0
  834. vllm/model_executor/models/bert_with_rope.py +617 -0
  835. vllm/model_executor/models/blip.py +339 -0
  836. vllm/model_executor/models/blip2.py +728 -0
  837. vllm/model_executor/models/bloom.py +373 -0
  838. vllm/model_executor/models/chameleon.py +1146 -0
  839. vllm/model_executor/models/chatglm.py +478 -0
  840. vllm/model_executor/models/clip.py +407 -0
  841. vllm/model_executor/models/commandr.py +471 -0
  842. vllm/model_executor/models/config.py +200 -0
  843. vllm/model_executor/models/constant_size_cache.py +137 -0
  844. vllm/model_executor/models/dbrx.py +472 -0
  845. vllm/model_executor/models/deepseek.py +486 -0
  846. vllm/model_executor/models/deepseek_mtp.py +281 -0
  847. vllm/model_executor/models/deepseek_v2.py +935 -0
  848. vllm/model_executor/models/deepseek_vl2.py +660 -0
  849. vllm/model_executor/models/dots1.py +536 -0
  850. vllm/model_executor/models/eagle.py +261 -0
  851. vllm/model_executor/models/ernie45.py +43 -0
  852. vllm/model_executor/models/ernie45_moe.py +583 -0
  853. vllm/model_executor/models/exaone.py +551 -0
  854. vllm/model_executor/models/fairseq2_llama.py +154 -0
  855. vllm/model_executor/models/falcon.py +510 -0
  856. vllm/model_executor/models/falcon_h1.py +708 -0
  857. vllm/model_executor/models/florence2.py +1113 -0
  858. vllm/model_executor/models/fuyu.py +406 -0
  859. vllm/model_executor/models/gemma.py +427 -0
  860. vllm/model_executor/models/gemma2.py +427 -0
  861. vllm/model_executor/models/gemma3.py +535 -0
  862. vllm/model_executor/models/gemma3_mm.py +729 -0
  863. vllm/model_executor/models/gemma3n.py +811 -0
  864. vllm/model_executor/models/glm.py +23 -0
  865. vllm/model_executor/models/glm4.py +305 -0
  866. vllm/model_executor/models/glm4_1v.py +1590 -0
  867. vllm/model_executor/models/glm4v.py +657 -0
  868. vllm/model_executor/models/gpt2.py +382 -0
  869. vllm/model_executor/models/gpt_bigcode.py +335 -0
  870. vllm/model_executor/models/gpt_j.py +339 -0
  871. vllm/model_executor/models/gpt_neox.py +332 -0
  872. vllm/model_executor/models/granite.py +493 -0
  873. vllm/model_executor/models/granite_speech.py +790 -0
  874. vllm/model_executor/models/granitemoe.py +437 -0
  875. vllm/model_executor/models/granitemoehybrid.py +653 -0
  876. vllm/model_executor/models/granitemoeshared.py +341 -0
  877. vllm/model_executor/models/gritlm.py +224 -0
  878. vllm/model_executor/models/grok1.py +546 -0
  879. vllm/model_executor/models/h2ovl.py +549 -0
  880. vllm/model_executor/models/hunyuan_v1_moe.py +897 -0
  881. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  882. vllm/model_executor/models/idefics3.py +786 -0
  883. vllm/model_executor/models/interfaces.py +681 -0
  884. vllm/model_executor/models/interfaces_base.py +164 -0
  885. vllm/model_executor/models/intern_vit.py +480 -0
  886. vllm/model_executor/models/internlm2.py +455 -0
  887. vllm/model_executor/models/internlm2_ve.py +147 -0
  888. vllm/model_executor/models/internvl.py +1432 -0
  889. vllm/model_executor/models/jais.py +373 -0
  890. vllm/model_executor/models/jamba.py +592 -0
  891. vllm/model_executor/models/keye.py +1736 -0
  892. vllm/model_executor/models/kimi_vl.py +585 -0
  893. vllm/model_executor/models/llama.py +644 -0
  894. vllm/model_executor/models/llama4.py +531 -0
  895. vllm/model_executor/models/llama_eagle.py +165 -0
  896. vllm/model_executor/models/llama_eagle3.py +263 -0
  897. vllm/model_executor/models/llava.py +887 -0
  898. vllm/model_executor/models/llava_next.py +604 -0
  899. vllm/model_executor/models/llava_next_video.py +492 -0
  900. vllm/model_executor/models/llava_onevision.py +985 -0
  901. vllm/model_executor/models/mamba.py +273 -0
  902. vllm/model_executor/models/mamba2.py +320 -0
  903. vllm/model_executor/models/mamba_cache.py +76 -0
  904. vllm/model_executor/models/medusa.py +219 -0
  905. vllm/model_executor/models/mimo.py +192 -0
  906. vllm/model_executor/models/mimo_mtp.py +285 -0
  907. vllm/model_executor/models/minicpm.py +592 -0
  908. vllm/model_executor/models/minicpm3.py +230 -0
  909. vllm/model_executor/models/minicpm_eagle.py +391 -0
  910. vllm/model_executor/models/minicpmo.py +772 -0
  911. vllm/model_executor/models/minicpmv.py +1307 -0
  912. vllm/model_executor/models/minimax_cache.py +36 -0
  913. vllm/model_executor/models/minimax_text_01.py +1301 -0
  914. vllm/model_executor/models/minimax_vl_01.py +374 -0
  915. vllm/model_executor/models/mistral3.py +624 -0
  916. vllm/model_executor/models/mixtral.py +488 -0
  917. vllm/model_executor/models/mixtral_quant.py +453 -0
  918. vllm/model_executor/models/mllama.py +1682 -0
  919. vllm/model_executor/models/mllama4.py +947 -0
  920. vllm/model_executor/models/mlp_speculator.py +206 -0
  921. vllm/model_executor/models/modernbert.py +339 -0
  922. vllm/model_executor/models/module_mapping.py +72 -0
  923. vllm/model_executor/models/molmo.py +1576 -0
  924. vllm/model_executor/models/moonvit.py +630 -0
  925. vllm/model_executor/models/mpt.py +331 -0
  926. vllm/model_executor/models/nemotron.py +508 -0
  927. vllm/model_executor/models/nemotron_h.py +588 -0
  928. vllm/model_executor/models/nemotron_nas.py +484 -0
  929. vllm/model_executor/models/nvlm_d.py +216 -0
  930. vllm/model_executor/models/olmo.py +389 -0
  931. vllm/model_executor/models/olmo2.py +414 -0
  932. vllm/model_executor/models/olmoe.py +468 -0
  933. vllm/model_executor/models/opt.py +412 -0
  934. vllm/model_executor/models/orion.py +349 -0
  935. vllm/model_executor/models/ovis.py +577 -0
  936. vllm/model_executor/models/paligemma.py +419 -0
  937. vllm/model_executor/models/persimmon.py +344 -0
  938. vllm/model_executor/models/phi.py +356 -0
  939. vllm/model_executor/models/phi3.py +19 -0
  940. vllm/model_executor/models/phi3_small.py +465 -0
  941. vllm/model_executor/models/phi3v.py +733 -0
  942. vllm/model_executor/models/phi4mm.py +1258 -0
  943. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  944. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  945. vllm/model_executor/models/phimoe.py +674 -0
  946. vllm/model_executor/models/pixtral.py +1329 -0
  947. vllm/model_executor/models/plamo2.py +738 -0
  948. vllm/model_executor/models/prithvi_geospatial_mae.py +240 -0
  949. vllm/model_executor/models/qwen.py +362 -0
  950. vllm/model_executor/models/qwen2.py +501 -0
  951. vllm/model_executor/models/qwen2_5_omni_thinker.py +923 -0
  952. vllm/model_executor/models/qwen2_5_vl.py +1175 -0
  953. vllm/model_executor/models/qwen2_audio.py +420 -0
  954. vllm/model_executor/models/qwen2_moe.py +540 -0
  955. vllm/model_executor/models/qwen2_rm.py +122 -0
  956. vllm/model_executor/models/qwen2_vl.py +1513 -0
  957. vllm/model_executor/models/qwen3.py +325 -0
  958. vllm/model_executor/models/qwen3_moe.py +541 -0
  959. vllm/model_executor/models/qwen_vl.py +796 -0
  960. vllm/model_executor/models/registry.py +634 -0
  961. vllm/model_executor/models/roberta.py +271 -0
  962. vllm/model_executor/models/siglip.py +524 -0
  963. vllm/model_executor/models/skyworkr1v.py +961 -0
  964. vllm/model_executor/models/smolvlm.py +52 -0
  965. vllm/model_executor/models/solar.py +506 -0
  966. vllm/model_executor/models/stablelm.py +343 -0
  967. vllm/model_executor/models/starcoder2.py +356 -0
  968. vllm/model_executor/models/tarsier.py +652 -0
  969. vllm/model_executor/models/telechat2.py +140 -0
  970. vllm/model_executor/models/teleflm.py +79 -0
  971. vllm/model_executor/models/transformers.py +509 -0
  972. vllm/model_executor/models/ultravox.py +670 -0
  973. vllm/model_executor/models/utils.py +744 -0
  974. vllm/model_executor/models/vision.py +147 -0
  975. vllm/model_executor/models/whisper.py +886 -0
  976. vllm/model_executor/models/zamba2.py +1036 -0
  977. vllm/model_executor/parameter.py +459 -0
  978. vllm/model_executor/pooling_metadata.py +72 -0
  979. vllm/model_executor/sampling_metadata.py +597 -0
  980. vllm/model_executor/utils.py +80 -0
  981. vllm/multimodal/__init__.py +33 -0
  982. vllm/multimodal/audio.py +116 -0
  983. vllm/multimodal/base.py +219 -0
  984. vllm/multimodal/hasher.py +91 -0
  985. vllm/multimodal/image.py +103 -0
  986. vllm/multimodal/inputs.py +878 -0
  987. vllm/multimodal/parse.py +499 -0
  988. vllm/multimodal/processing.py +1948 -0
  989. vllm/multimodal/profiling.py +283 -0
  990. vllm/multimodal/registry.py +331 -0
  991. vllm/multimodal/utils.py +492 -0
  992. vllm/multimodal/video.py +227 -0
  993. vllm/outputs.py +516 -0
  994. vllm/platforms/__init__.py +291 -0
  995. vllm/platforms/cpu.py +281 -0
  996. vllm/platforms/cuda.py +568 -0
  997. vllm/platforms/hpu.py +106 -0
  998. vllm/platforms/interface.py +551 -0
  999. vllm/platforms/neuron.py +150 -0
  1000. vllm/platforms/rocm.py +453 -0
  1001. vllm/platforms/tpu.py +206 -0
  1002. vllm/platforms/xpu.py +192 -0
  1003. vllm/plugins/__init__.py +94 -0
  1004. vllm/plugins/lora_resolvers/README.md +15 -0
  1005. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1006. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1007. vllm/pooling_params.py +64 -0
  1008. vllm/profiler/__init__.py +0 -0
  1009. vllm/profiler/layerwise_profile.py +375 -0
  1010. vllm/profiler/utils.py +148 -0
  1011. vllm/prompt_adapter/__init__.py +0 -0
  1012. vllm/prompt_adapter/layers.py +83 -0
  1013. vllm/prompt_adapter/models.py +358 -0
  1014. vllm/prompt_adapter/request.py +37 -0
  1015. vllm/prompt_adapter/utils.py +98 -0
  1016. vllm/prompt_adapter/worker_manager.py +179 -0
  1017. vllm/py.typed +2 -0
  1018. vllm/reasoning/__init__.py +15 -0
  1019. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  1020. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1021. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1022. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1023. vllm/sampling_params.py +602 -0
  1024. vllm/scalar_type.py +347 -0
  1025. vllm/scripts.py +15 -0
  1026. vllm/sequence.py +1568 -0
  1027. vllm/spec_decode/__init__.py +0 -0
  1028. vllm/spec_decode/batch_expansion.py +506 -0
  1029. vllm/spec_decode/draft_model_runner.py +349 -0
  1030. vllm/spec_decode/interfaces.py +99 -0
  1031. vllm/spec_decode/medusa_worker.py +138 -0
  1032. vllm/spec_decode/metrics.py +213 -0
  1033. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1034. vllm/spec_decode/mqa_scorer.py +160 -0
  1035. vllm/spec_decode/multi_step_worker.py +423 -0
  1036. vllm/spec_decode/ngram_worker.py +196 -0
  1037. vllm/spec_decode/proposer_worker_base.py +59 -0
  1038. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1039. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1040. vllm/spec_decode/target_model_runner.py +45 -0
  1041. vllm/spec_decode/top1_proposer.py +275 -0
  1042. vllm/spec_decode/util.py +277 -0
  1043. vllm/test_utils.py +130 -0
  1044. vllm/third_party/__init__.py +0 -0
  1045. vllm/third_party/pynvml.py +6140 -0
  1046. vllm/tracing.py +131 -0
  1047. vllm/transformers_utils/__init__.py +24 -0
  1048. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1049. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1050. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1051. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1052. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1053. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1054. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1055. vllm/transformers_utils/config.py +922 -0
  1056. vllm/transformers_utils/configs/__init__.py +57 -0
  1057. vllm/transformers_utils/configs/arctic.py +207 -0
  1058. vllm/transformers_utils/configs/chatglm.py +72 -0
  1059. vllm/transformers_utils/configs/cohere2.py +195 -0
  1060. vllm/transformers_utils/configs/dbrx.py +280 -0
  1061. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1062. vllm/transformers_utils/configs/eagle.py +85 -0
  1063. vllm/transformers_utils/configs/exaone.py +190 -0
  1064. vllm/transformers_utils/configs/falcon.py +90 -0
  1065. vllm/transformers_utils/configs/jais.py +238 -0
  1066. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1067. vllm/transformers_utils/configs/medusa.py +63 -0
  1068. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1069. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1070. vllm/transformers_utils/configs/mllama.py +31 -0
  1071. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1072. vllm/transformers_utils/configs/moonvit.py +33 -0
  1073. vllm/transformers_utils/configs/mpt.py +180 -0
  1074. vllm/transformers_utils/configs/nemotron.py +205 -0
  1075. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1076. vllm/transformers_utils/configs/nvlm_d.py +31 -0
  1077. vllm/transformers_utils/configs/ovis.py +184 -0
  1078. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1079. vllm/transformers_utils/configs/solar.py +247 -0
  1080. vllm/transformers_utils/configs/telechat2.py +64 -0
  1081. vllm/transformers_utils/configs/ultravox.py +108 -0
  1082. vllm/transformers_utils/detokenizer.py +168 -0
  1083. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1084. vllm/transformers_utils/processor.py +221 -0
  1085. vllm/transformers_utils/processors/__init__.py +8 -0
  1086. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1087. vllm/transformers_utils/processors/ovis.py +420 -0
  1088. vllm/transformers_utils/s3_utils.py +162 -0
  1089. vllm/transformers_utils/tokenizer.py +302 -0
  1090. vllm/transformers_utils/tokenizer_base.py +149 -0
  1091. vllm/transformers_utils/tokenizer_group.py +120 -0
  1092. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1093. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1094. vllm/transformers_utils/utils.py +99 -0
  1095. vllm/triton_utils/__init__.py +14 -0
  1096. vllm/triton_utils/importing.py +94 -0
  1097. vllm/usage/__init__.py +0 -0
  1098. vllm/usage/usage_lib.py +259 -0
  1099. vllm/utils/__init__.py +3008 -0
  1100. vllm/v1/__init__.py +0 -0
  1101. vllm/v1/attention/__init__.py +0 -0
  1102. vllm/v1/attention/backends/__init__.py +0 -0
  1103. vllm/v1/attention/backends/cpu_attn.py +184 -0
  1104. vllm/v1/attention/backends/flash_attn.py +757 -0
  1105. vllm/v1/attention/backends/flashinfer.py +680 -0
  1106. vllm/v1/attention/backends/flex_attention.py +491 -0
  1107. vllm/v1/attention/backends/mamba_attn.py +192 -0
  1108. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1109. vllm/v1/attention/backends/mla/common.py +978 -0
  1110. vllm/v1/attention/backends/mla/cutlass_mla.py +98 -0
  1111. vllm/v1/attention/backends/mla/flashmla.py +180 -0
  1112. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +241 -0
  1113. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1114. vllm/v1/attention/backends/pallas.py +320 -0
  1115. vllm/v1/attention/backends/rocm_aiter_fa.py +609 -0
  1116. vllm/v1/attention/backends/triton_attn.py +449 -0
  1117. vllm/v1/attention/backends/utils.py +310 -0
  1118. vllm/v1/core/__init__.py +0 -0
  1119. vllm/v1/core/block_pool.py +349 -0
  1120. vllm/v1/core/encoder_cache_manager.py +254 -0
  1121. vllm/v1/core/kv_cache_coordinator.py +369 -0
  1122. vllm/v1/core/kv_cache_manager.py +398 -0
  1123. vllm/v1/core/kv_cache_utils.py +999 -0
  1124. vllm/v1/core/sched/__init__.py +0 -0
  1125. vllm/v1/core/sched/interface.py +150 -0
  1126. vllm/v1/core/sched/output.py +157 -0
  1127. vllm/v1/core/sched/request_queue.py +224 -0
  1128. vllm/v1/core/sched/scheduler.py +1115 -0
  1129. vllm/v1/core/sched/utils.py +36 -0
  1130. vllm/v1/core/single_type_kv_cache_manager.py +444 -0
  1131. vllm/v1/engine/__init__.py +179 -0
  1132. vllm/v1/engine/async_llm.py +626 -0
  1133. vllm/v1/engine/coordinator.py +278 -0
  1134. vllm/v1/engine/core.py +1046 -0
  1135. vllm/v1/engine/core_client.py +1049 -0
  1136. vllm/v1/engine/detokenizer.py +292 -0
  1137. vllm/v1/engine/exceptions.py +17 -0
  1138. vllm/v1/engine/llm_engine.py +322 -0
  1139. vllm/v1/engine/logprobs.py +200 -0
  1140. vllm/v1/engine/mm_input_cache.py +91 -0
  1141. vllm/v1/engine/output_processor.py +477 -0
  1142. vllm/v1/engine/parallel_sampling.py +133 -0
  1143. vllm/v1/engine/processor.py +422 -0
  1144. vllm/v1/engine/utils.py +546 -0
  1145. vllm/v1/executor/__init__.py +0 -0
  1146. vllm/v1/executor/abstract.py +113 -0
  1147. vllm/v1/executor/multiproc_executor.py +532 -0
  1148. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1149. vllm/v1/kv_cache_interface.py +223 -0
  1150. vllm/v1/metrics/__init__.py +0 -0
  1151. vllm/v1/metrics/loggers.py +557 -0
  1152. vllm/v1/metrics/prometheus.py +82 -0
  1153. vllm/v1/metrics/ray_wrappers.py +131 -0
  1154. vllm/v1/metrics/reader.py +246 -0
  1155. vllm/v1/metrics/stats.py +240 -0
  1156. vllm/v1/outputs.py +124 -0
  1157. vllm/v1/pool/__init__.py +0 -0
  1158. vllm/v1/pool/metadata.py +17 -0
  1159. vllm/v1/request.py +229 -0
  1160. vllm/v1/sample/__init__.py +0 -0
  1161. vllm/v1/sample/logits_processor.py +517 -0
  1162. vllm/v1/sample/metadata.py +43 -0
  1163. vllm/v1/sample/ops/__init__.py +0 -0
  1164. vllm/v1/sample/ops/bad_words.py +39 -0
  1165. vllm/v1/sample/ops/penalties.py +43 -0
  1166. vllm/v1/sample/ops/topk_topp_sampler.py +296 -0
  1167. vllm/v1/sample/rejection_sampler.py +631 -0
  1168. vllm/v1/sample/sampler.py +226 -0
  1169. vllm/v1/sample/tpu/__init__.py +0 -0
  1170. vllm/v1/sample/tpu/metadata.py +124 -0
  1171. vllm/v1/sample/tpu/sampler.py +145 -0
  1172. vllm/v1/serial_utils.py +315 -0
  1173. vllm/v1/spec_decode/__init__.py +0 -0
  1174. vllm/v1/spec_decode/eagle.py +441 -0
  1175. vllm/v1/spec_decode/medusa.py +64 -0
  1176. vllm/v1/spec_decode/metadata.py +62 -0
  1177. vllm/v1/spec_decode/metrics.py +178 -0
  1178. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1179. vllm/v1/spec_decode/utils.py +41 -0
  1180. vllm/v1/structured_output/__init__.py +227 -0
  1181. vllm/v1/structured_output/backend_guidance.py +245 -0
  1182. vllm/v1/structured_output/backend_types.py +134 -0
  1183. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1184. vllm/v1/structured_output/request.py +86 -0
  1185. vllm/v1/structured_output/utils.py +175 -0
  1186. vllm/v1/utils.py +377 -0
  1187. vllm/v1/worker/__init__.py +0 -0
  1188. vllm/v1/worker/block_table.py +142 -0
  1189. vllm/v1/worker/cpu_model_runner.py +91 -0
  1190. vllm/v1/worker/cpu_worker.py +153 -0
  1191. vllm/v1/worker/gpu_input_batch.py +757 -0
  1192. vllm/v1/worker/gpu_model_runner.py +2739 -0
  1193. vllm/v1/worker/gpu_worker.py +408 -0
  1194. vllm/v1/worker/lora_model_runner_mixin.py +177 -0
  1195. vllm/v1/worker/tpu_input_batch.py +585 -0
  1196. vllm/v1/worker/tpu_model_runner.py +1849 -0
  1197. vllm/v1/worker/tpu_worker.py +315 -0
  1198. vllm/v1/worker/utils.py +112 -0
  1199. vllm/v1/worker/worker_base.py +65 -0
  1200. vllm/v1/worker/xpu_model_runner.py +33 -0
  1201. vllm/v1/worker/xpu_worker.py +165 -0
  1202. vllm/version.py +41 -0
  1203. vllm/vllm_flash_attn/.gitkeep +0 -0
  1204. vllm/worker/__init__.py +0 -0
  1205. vllm/worker/cache_engine.py +145 -0
  1206. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1207. vllm/worker/cpu_model_runner.py +671 -0
  1208. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1209. vllm/worker/cpu_worker.py +452 -0
  1210. vllm/worker/enc_dec_model_runner.py +555 -0
  1211. vllm/worker/hpu_model_runner.py +2320 -0
  1212. vllm/worker/hpu_worker.py +484 -0
  1213. vllm/worker/model_runner.py +2178 -0
  1214. vllm/worker/model_runner_base.py +282 -0
  1215. vllm/worker/multi_step_hpu_worker.py +123 -0
  1216. vllm/worker/multi_step_model_runner.py +911 -0
  1217. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1218. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1219. vllm/worker/multi_step_tpu_worker.py +108 -0
  1220. vllm/worker/multi_step_worker.py +197 -0
  1221. vllm/worker/neuron_model_runner.py +460 -0
  1222. vllm/worker/neuron_worker.py +193 -0
  1223. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1224. vllm/worker/pooling_model_runner.py +211 -0
  1225. vllm/worker/tpu_model_runner.py +909 -0
  1226. vllm/worker/tpu_worker.py +337 -0
  1227. vllm/worker/utils.py +53 -0
  1228. vllm/worker/worker.py +577 -0
  1229. vllm/worker/worker_base.py +646 -0
  1230. vllm/worker/xpu_model_runner.py +606 -0
  1231. vllm/worker/xpu_worker.py +186 -0
  1232. vllm_cpu-0.9.2.post2.dist-info/METADATA +339 -0
  1233. vllm_cpu-0.9.2.post2.dist-info/RECORD +1236 -0
  1234. vllm_cpu-0.9.2.post2.dist-info/WHEEL +5 -0
  1235. vllm_cpu-0.9.2.post2.dist-info/entry_points.txt +5 -0
  1236. vllm_cpu-0.9.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1547 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import itertools
5
+ from abc import abstractmethod
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn.parameter import Parameter, UninitializedParameter
11
+
12
+ from vllm import envs
13
+ from vllm.distributed import (divide, get_tensor_model_parallel_rank,
14
+ get_tensor_model_parallel_world_size,
15
+ split_tensor_along_last_dim,
16
+ tensor_model_parallel_all_gather,
17
+ tensor_model_parallel_all_reduce)
18
+ from vllm.logger import init_logger
19
+ from vllm.model_executor.layers.quantization.base_config import (
20
+ QuantizationConfig, QuantizeMethodBase)
21
+ from vllm.model_executor.layers.utils import dispatch_unquantized_gemm
22
+ # yapf: disable
23
+ from vllm.model_executor.parameter import (BasevLLMParameter,
24
+ BlockQuantScaleParameter,
25
+ PackedColumnParameter,
26
+ PackedvLLMParameter,
27
+ PerTensorScaleParameter,
28
+ RowvLLMParameter)
29
+ # yapf: enable
30
+ from vllm.model_executor.utils import set_weight_attrs
31
+ from vllm.platforms import current_platform
32
+
33
+ logger = init_logger(__name__)
34
+
35
+ WEIGHT_LOADER_V2_SUPPORTED = [
36
+ "CompressedTensorsLinearMethod",
37
+ "BitBLASLinearMethod",
38
+ "GPTQBitBLASLinearMethod",
39
+ "AWQMarlinLinearMethod",
40
+ "AWQLinearMethod",
41
+ "GPTQMarlinLinearMethod",
42
+ "Fp8LinearMethod",
43
+ "MarlinLinearMethod",
44
+ "QQQLinearMethod",
45
+ "GPTQMarlin24LinearMethod",
46
+ "TPUInt8LinearMethod",
47
+ "GPTQLinearMethod",
48
+ "FBGEMMFp8LinearMethod",
49
+ "ModelOptFp8LinearMethod",
50
+ "IPEXAWQLinearMethod",
51
+ "IPEXGPTQLinearMethod",
52
+ "HQQMarlinMethod",
53
+ "QuarkLinearMethod",
54
+ "ModelOptNvFp4LinearMethod",
55
+ ]
56
+
57
+
58
+ def adjust_bitblas_shard(param, shard_size, shard_offset):
59
+ bitblas_tile_size = getattr(param, "bitblas_tile_size", None)
60
+ if bitblas_tile_size is not None:
61
+ return (shard_size // bitblas_tile_size,
62
+ shard_offset // bitblas_tile_size)
63
+
64
+ return shard_size, shard_offset
65
+
66
+
67
+ def adjust_marlin_shard(param, shard_size, shard_offset):
68
+ marlin_tile_size = getattr(param, "marlin_tile_size", None)
69
+ if marlin_tile_size is None:
70
+ return shard_size, shard_offset
71
+
72
+ return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
73
+
74
+
75
+ def adjust_bitsandbytes_4bit_shard(param: Parameter,
76
+ shard_offsets: dict[str, tuple[int, int]],
77
+ loaded_shard_id: str) -> tuple[int, int]:
78
+ """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
79
+
80
+ total, _ = shard_offsets["total"]
81
+ orig_offset, orig_size = shard_offsets[loaded_shard_id]
82
+
83
+ quantized_total = param.data.shape[0]
84
+ quantized_offset = orig_offset * quantized_total // total
85
+ quantized_size = orig_size * quantized_total // total
86
+
87
+ return quantized_size, quantized_offset
88
+
89
+
90
+ def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
91
+ """For fused modules (QKV and MLP) we have an array of length
92
+ N that holds 1 scale for each "logical" matrix. So the param
93
+ is an array of length N. The loaded_weight corresponds to
94
+ one of the shards on disk. Here, we slice the param based on
95
+ the shard_id for loading.
96
+ """
97
+ qkv_idxs = {"q": 0, "k": 1, "v": 2}
98
+
99
+ if isinstance(shard_id, str):
100
+ shard_id = qkv_idxs[shard_id]
101
+ elif not isinstance(shard_id, int):
102
+ raise ValueError(f"Unknown Shard Id {shard_id}")
103
+
104
+ # AutoFP8 scales do not have a shape
105
+ # compressed-tensors scales do have a shape
106
+ if len(loaded_weight.shape) != 0:
107
+ assert loaded_weight.shape[0] == 1
108
+ loaded_weight = loaded_weight[0]
109
+
110
+ return param[shard_id], loaded_weight
111
+
112
+
113
+ # TODO(Isotr0py): We might need a more flexible structure to handle
114
+ # bitsandbytes shard offsets.
115
+ def left_shift_bitsandbytes_4bit_shard(bnb_weight_attrs: dict[str, Any]):
116
+ """
117
+ Separate the BitsAndBytes 4-bit shard.
118
+
119
+ For example, given bnb weight attributes as below:
120
+ {
121
+ 'bnb_shard_offsets': array([0, 4, 8, 16]),
122
+ 'bnb_quant_state': {0: ..., 1: ..., 2: ...},
123
+ }
124
+
125
+ The function will return:
126
+ {
127
+ 'bnb_shard_offsets': array([0, 4]),
128
+ 'bnb_quant_state': {0: ...},
129
+ }
130
+ and
131
+ {
132
+ 'bnb_shard_offsets': array([0, 4, 12]),
133
+ 'bnb_quant_state': {0: ..., 1: ...},
134
+ }
135
+ """
136
+ shard_offsets = bnb_weight_attrs["bnb_shard_offsets"]
137
+ offset_l = shard_offsets[:2]
138
+ offset_r = shard_offsets[1:] - shard_offsets[1]
139
+ quant_state_l = {0: bnb_weight_attrs["bnb_quant_state"][0]}
140
+ quant_state_r = {
141
+ i - 1: bnb_weight_attrs["bnb_quant_state"][i]
142
+ for i in range(1,
143
+ len(shard_offsets) - 1)
144
+ }
145
+ left = dict(bnb_shard_offsets=offset_l, bnb_quant_state=quant_state_l)
146
+ right = dict(bnb_shard_offsets=offset_r, bnb_quant_state=quant_state_r)
147
+ return left, right
148
+
149
+
150
+ class LinearMethodBase(QuantizeMethodBase):
151
+ """Base class for different (maybe quantized) linear methods."""
152
+
153
+ @abstractmethod
154
+ def create_weights(self, layer: torch.nn.Module,
155
+ input_size_per_partition: int,
156
+ output_partition_sizes: list[int], input_size: int,
157
+ output_size: int, params_dtype: torch.dtype,
158
+ **extra_weight_attrs):
159
+ """Create weights for a linear layer.
160
+ The weights will be set as attributes of the layer.
161
+
162
+ Args:
163
+ layer: The layer that is using the LinearMethodBase factory.
164
+ input_size_per_partition: Size of the weight input dim on rank X.
165
+ output_partition_sizes: Sizes of the output dim of each logical
166
+ weight on rank X. E.g., output_partition_sizes for QKVLinear
167
+ is a list contains the width of Wq, Wk, Wv on rank X.
168
+ input_size: Size of the input dim of the weight across all ranks.
169
+ output_size: Size of the output dim of the weight across all ranks.
170
+ params_dtype: Datatype of the parameters.
171
+ """
172
+ raise NotImplementedError
173
+
174
+ @abstractmethod
175
+ def apply(self,
176
+ layer: torch.nn.Module,
177
+ x: torch.Tensor,
178
+ bias: Optional[torch.Tensor] = None) -> torch.Tensor:
179
+ """Apply the weights in layer to the input tensor.
180
+ Expects create_weights to have been called before on the layer."""
181
+ raise NotImplementedError
182
+
183
+
184
+ class UnquantizedLinearMethod(LinearMethodBase):
185
+ """Linear method without quantization."""
186
+
187
+ def create_weights(self, layer: torch.nn.Module,
188
+ input_size_per_partition: int,
189
+ output_partition_sizes: list[int], input_size: int,
190
+ output_size: int, params_dtype: torch.dtype,
191
+ **extra_weight_attrs):
192
+ weight = Parameter(torch.empty(sum(output_partition_sizes),
193
+ input_size_per_partition,
194
+ dtype=params_dtype),
195
+ requires_grad=False)
196
+ set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
197
+ layer.register_parameter("weight", weight)
198
+ set_weight_attrs(weight, extra_weight_attrs)
199
+
200
+ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
201
+ if current_platform.is_cpu() and envs.VLLM_CPU_SGL_KERNEL:
202
+ N, K = layer.weight.size()
203
+ dtype = layer.weight.dtype
204
+ if (torch._C._cpu._is_amx_tile_supported()
205
+ and dtype == torch.bfloat16 and N % 32 == 0
206
+ and K % 32 == 0):
207
+ packed_weight = torch.ops._C.convert_weight_packed(
208
+ layer.weight)
209
+ assert packed_weight.size() == layer.weight.size()
210
+ layer.weight.copy_(packed_weight)
211
+ if layer.bias is not None:
212
+ layer.bias = Parameter(layer.bias.to(torch.float32),
213
+ requires_grad=False)
214
+ layer.use_cpu_sgl = True
215
+ else:
216
+ logger.warning(
217
+ "CPU SGL kernels require Intel AMX support,"
218
+ " bfloat16 weight, IC and OC are divisible by 32.")
219
+ layer.use_cpu_sgl = False
220
+
221
+ def apply(self,
222
+ layer: torch.nn.Module,
223
+ x: torch.Tensor,
224
+ bias: Optional[torch.Tensor] = None) -> torch.Tensor:
225
+
226
+ return dispatch_unquantized_gemm()(layer, x, layer.weight, bias)
227
+
228
+
229
+ class LinearBase(torch.nn.Module):
230
+ """Base linear layer.
231
+
232
+ Args:
233
+ input_size: input dimension of the linear layer.
234
+ output_size: output dimension of the linear layer.
235
+ bias: If true, add bias.
236
+ skip_bias_add: If true, skip adding bias but instead return it.
237
+ params_dtype: Data type for the parameters.
238
+ quant_config: Quantization configure.
239
+ return_bias: If true, return bias together with outputs in forward pass.
240
+ """
241
+
242
+ def __init__(
243
+ self,
244
+ input_size: int,
245
+ output_size: int,
246
+ skip_bias_add: bool = False,
247
+ params_dtype: Optional[torch.dtype] = None,
248
+ quant_config: Optional[QuantizationConfig] = None,
249
+ prefix: str = "",
250
+ *,
251
+ return_bias: bool = True,
252
+ ):
253
+ super().__init__()
254
+
255
+ # Keep input parameters
256
+ self.input_size = input_size
257
+ self.output_size = output_size
258
+ self.skip_bias_add = skip_bias_add
259
+ if params_dtype is None:
260
+ params_dtype = torch.get_default_dtype()
261
+ self.params_dtype = params_dtype
262
+ if quant_config is None:
263
+ self.quant_method: Optional[
264
+ QuantizeMethodBase] = UnquantizedLinearMethod()
265
+ else:
266
+ self.quant_method = quant_config.get_quant_method(self,
267
+ prefix=prefix)
268
+ self.return_bias = return_bias
269
+
270
+ def forward(
271
+ self, x: torch.Tensor
272
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
273
+ raise NotImplementedError
274
+
275
+
276
+ class ReplicatedLinear(LinearBase):
277
+ """Replicated linear layer.
278
+
279
+ Args:
280
+ input_size: input dimension of the linear layer.
281
+ output_size: output dimension of the linear layer.
282
+ bias: If true, add bias.
283
+ skip_bias_add: If true, skip adding bias but instead return it.
284
+ params_dtype: Data type for the parameters.
285
+ quant_config: Quantization configure.
286
+ prefix: The name of the layer in the state dict, including all parents
287
+ (e.g. model.layers.0.qkv_proj)
288
+ return_bias: If true, return bias together with outputs in forward pass.
289
+ """
290
+
291
+ def __init__(
292
+ self,
293
+ input_size: int,
294
+ output_size: int,
295
+ bias: bool = True,
296
+ skip_bias_add: bool = False,
297
+ params_dtype: Optional[torch.dtype] = None,
298
+ quant_config: Optional[QuantizationConfig] = None,
299
+ prefix: str = "",
300
+ *,
301
+ return_bias: bool = True,
302
+ ):
303
+ super().__init__(input_size,
304
+ output_size,
305
+ skip_bias_add,
306
+ params_dtype,
307
+ quant_config,
308
+ prefix=prefix,
309
+ return_bias=return_bias)
310
+
311
+ # All the linear layer supports quant method.
312
+ assert self.quant_method is not None
313
+ self.quant_method.create_weights(self,
314
+ self.input_size, [self.output_size],
315
+ self.input_size,
316
+ self.output_size,
317
+ self.params_dtype,
318
+ weight_loader=self.weight_loader)
319
+
320
+ if bias:
321
+ self.bias = Parameter(
322
+ torch.empty(self.output_size, dtype=self.params_dtype))
323
+ set_weight_attrs(self.bias, {
324
+ "output_dim": 0,
325
+ "weight_loader": self.weight_loader,
326
+ })
327
+ else:
328
+ self.register_parameter("bias", None)
329
+
330
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
331
+ # If the weight on disk does not have a shape, give it one
332
+ # (such scales for AutoFp8).
333
+ # Special case for GGUF
334
+
335
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
336
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
337
+ if is_gguf_weight_type:
338
+ param.weight_type = loaded_weight.item()
339
+
340
+ # Materialize GGUF UninitializedParameter
341
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
342
+ param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)
343
+
344
+ if len(loaded_weight.shape) == 0:
345
+ loaded_weight = loaded_weight.reshape(1)
346
+
347
+ assert param.size() == loaded_weight.size(), (
348
+ f"Tried to load weights of size {loaded_weight.size()}"
349
+ f"to a parameter of size {param.size()}")
350
+ param.data.copy_(loaded_weight)
351
+
352
+ def forward(
353
+ self, x: torch.Tensor
354
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
355
+ bias = self.bias if not self.skip_bias_add else None
356
+ assert self.quant_method is not None
357
+ output = self.quant_method.apply(self, x, bias)
358
+ output_bias = self.bias if self.skip_bias_add else None
359
+ if not self.return_bias:
360
+ return output
361
+ return output, output_bias
362
+
363
+ def extra_repr(self) -> str:
364
+ s = f"in_features={self.input_size}"
365
+ s += f", output_features={self.output_size}"
366
+ s += f", bias={self.bias is not None}"
367
+ return s
368
+
369
+
370
+ class ColumnParallelLinear(LinearBase):
371
+ """Linear layer with column parallelism.
372
+
373
+ The linear layer is defined as Y = XA + b. A is parallelized along
374
+ its second dimension as A = [A_1, ..., A_p].
375
+
376
+ Args:
377
+ input_size: first dimension of matrix A.
378
+ output_size: second dimension of matrix A.
379
+ bias: If true, add bias.
380
+ gather_output: If true, call all-gather on output and make Y available
381
+ to all GPUs, otherwise, every GPU will have its output
382
+ which is Y_i = XA_i
383
+ skip_bias_add: This was added to enable performance optimizations where
384
+ bias can be fused with other element-wise operations. we
385
+ skip adding bias but instead return it.
386
+ params_dtype: Data type for the parameters.
387
+ quant_config: Quantization configure.
388
+ output_sizes: list of output sizes packed into one output, like for QKV
389
+ the list would be size 3.
390
+ prefix: The name of the layer in the state dict, including all parents
391
+ (e.g. model.layers.0.qkv_proj)
392
+ """
393
+
394
+ def __init__(
395
+ self,
396
+ input_size: int,
397
+ output_size: int,
398
+ bias: bool = True,
399
+ gather_output: bool = False,
400
+ skip_bias_add: bool = False,
401
+ params_dtype: Optional[torch.dtype] = None,
402
+ quant_config: Optional[QuantizationConfig] = None,
403
+ output_sizes: Optional[list[int]] = None,
404
+ prefix: str = "",
405
+ *,
406
+ return_bias: bool = True,
407
+ ):
408
+ # Divide the weight matrix along the last dimension.
409
+ self.tp_size = get_tensor_model_parallel_world_size()
410
+ self.input_size_per_partition = input_size
411
+ self.output_size_per_partition = divide(output_size, self.tp_size)
412
+ self.output_partition_sizes = [self.output_size_per_partition]
413
+ # If QKV or MergedColumn, use output size of each partition.
414
+ if hasattr(self, "output_sizes"):
415
+ self.output_partition_sizes = [
416
+ divide(output_size, self.tp_size)
417
+ for output_size in self.output_sizes
418
+ ]
419
+
420
+ super().__init__(input_size,
421
+ output_size,
422
+ skip_bias_add,
423
+ params_dtype,
424
+ quant_config,
425
+ prefix,
426
+ return_bias=return_bias)
427
+
428
+ self.gather_output = gather_output
429
+
430
+ if output_sizes is None:
431
+ output_sizes = [output_size]
432
+
433
+ assert self.quant_method is not None
434
+ self.quant_method.create_weights(
435
+ layer=self,
436
+ input_size_per_partition=self.input_size_per_partition,
437
+ output_partition_sizes=self.output_partition_sizes,
438
+ input_size=self.input_size,
439
+ output_size=self.output_size,
440
+ params_dtype=self.params_dtype,
441
+ weight_loader=(
442
+ self.weight_loader_v2 if self.quant_method.__class__.__name__
443
+ in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
444
+ if bias:
445
+ self.bias = Parameter(
446
+ torch.empty(self.output_size_per_partition,
447
+ dtype=params_dtype))
448
+ set_weight_attrs(self.bias, {
449
+ "output_dim": 0,
450
+ "weight_loader": self.weight_loader,
451
+ })
452
+ else:
453
+ self.register_parameter("bias", None)
454
+
455
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
456
+ tp_rank = get_tensor_model_parallel_rank()
457
+ output_dim = getattr(param, "output_dim", None)
458
+
459
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
460
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
461
+ # bitsandbytes loads the weights of the specific portion
462
+ # no need to narrow
463
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
464
+
465
+ # Special case for GGUF
466
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
467
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
468
+ if is_gguf_weight_type:
469
+ param.weight_type = loaded_weight.item()
470
+
471
+ # Materialize GGUF UninitializedParameter
472
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
473
+ final_shape = list(loaded_weight.shape)
474
+ if output_dim is not None:
475
+ tp_size = get_tensor_model_parallel_world_size()
476
+ assert final_shape[output_dim] % tp_size == 0
477
+ final_shape[output_dim] = final_shape[output_dim] // tp_size
478
+ param.materialize(final_shape, dtype=loaded_weight.dtype)
479
+
480
+ param_data = param.data
481
+ if output_dim is not None and not is_sharded_weight:
482
+ shard_size = param_data.shape[output_dim]
483
+ start_idx = tp_rank * shard_size
484
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
485
+ shard_size)
486
+
487
+ # Special case for loading scales off disk, which often do not
488
+ # have a shape (such as in the case of AutoFP8).
489
+ if len(loaded_weight.shape) == 0:
490
+ loaded_weight = loaded_weight.reshape(1)
491
+
492
+ assert param_data.shape == loaded_weight.shape
493
+ param_data.copy_(loaded_weight)
494
+
495
+ def weight_loader_v2(self, param: Parameter, loaded_weight: torch.Tensor):
496
+ # Special case for loading scales off disk, which often do not
497
+ # have a shape (such as in the case of AutoFP8).
498
+ if len(loaded_weight.shape) == 0:
499
+ assert loaded_weight.numel() == 1
500
+ loaded_weight = loaded_weight.reshape(1)
501
+ param.load_column_parallel_weight(loaded_weight=loaded_weight)
502
+
503
+ def forward(
504
+ self, input_
505
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
506
+ bias = self.bias if not self.skip_bias_add else None
507
+
508
+ # Matrix multiply.
509
+ assert self.quant_method is not None
510
+ output_parallel = self.quant_method.apply(self, input_, bias)
511
+ if self.gather_output:
512
+ # All-gather across the partitions.
513
+ output = tensor_model_parallel_all_gather(output_parallel)
514
+ else:
515
+ output = output_parallel
516
+ output_bias = self.bias if self.skip_bias_add else None
517
+ if not self.return_bias:
518
+ return output
519
+ return output, output_bias
520
+
521
+ def extra_repr(self) -> str:
522
+ s = f"in_features={self.input_size}"
523
+ s += f", output_features={self.output_size_per_partition}"
524
+ s += f", bias={self.bias is not None}"
525
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
526
+ s += f", gather_output={self.gather_output}"
527
+ return s
528
+
529
+
530
+ class MergedColumnParallelLinear(ColumnParallelLinear):
531
+ """Packed linear layers with column parallelism.
532
+
533
+ Similar to ColumnParallelLinear, but the weight matrix is concatenated
534
+ along the output dimension. When the weight matrix is loaded, the
535
+ different partitions are sharded separately.
536
+
537
+ Args:
538
+ input_size: input dimension of the linear layer.
539
+ output_sizes: list of output dimensions of the linear layer.
540
+ bias: If true, add bias.
541
+ gather_output: If true, call all-gather on output and make the output
542
+ available to all GPUs, otherwise, every GPU will have
543
+ its own output.
544
+ skip_bias_add: This was added to enable performance optimizations where
545
+ bias can be fused with other element-wise operations. we
546
+ skip adding bias but instead return it.
547
+ params_dtype: Data type for the parameters.
548
+ quant_config: Quantization configure.
549
+ prefix: The name of the layer in the state dict, including all parents
550
+ (e.g. model.layers.0.qkv_proj)
551
+ return_bias: If true, return bias together with outputs in forward pass.
552
+ """
553
+
554
+ def __init__(
555
+ self,
556
+ input_size: int,
557
+ output_sizes: list[int],
558
+ bias: bool = True,
559
+ gather_output: bool = False,
560
+ skip_bias_add: bool = False,
561
+ params_dtype: Optional[torch.dtype] = None,
562
+ quant_config: Optional[QuantizationConfig] = None,
563
+ prefix: str = "",
564
+ *,
565
+ return_bias: bool = True,
566
+ ):
567
+ self.output_sizes = output_sizes
568
+ tp_size = get_tensor_model_parallel_world_size()
569
+ assert all(output_size % tp_size == 0 for output_size in output_sizes)
570
+ super().__init__(input_size=input_size,
571
+ output_size=sum(output_sizes),
572
+ bias=bias,
573
+ gather_output=gather_output,
574
+ skip_bias_add=skip_bias_add,
575
+ params_dtype=params_dtype,
576
+ quant_config=quant_config,
577
+ prefix=prefix,
578
+ return_bias=return_bias)
579
+
580
+ def weight_loader(self,
581
+ param: Parameter,
582
+ loaded_weight: torch.Tensor,
583
+ loaded_shard_id: Optional[int] = None):
584
+
585
+ # Special case for GGUF
586
+ # initialize GGUF param after we know the quantize type
587
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
588
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
589
+ if is_gguf_weight_type:
590
+ if loaded_shard_id is not None:
591
+ param.data[loaded_shard_id].copy_(loaded_weight)
592
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
593
+ else:
594
+ param.shard_weight_type = {
595
+ i: loaded_weight.item()
596
+ for i, _ in enumerate(self.output_sizes)
597
+ }
598
+ return
599
+
600
+ if is_gguf_weight:
601
+ tp_size = get_tensor_model_parallel_world_size()
602
+ tp_rank = get_tensor_model_parallel_rank()
603
+
604
+ output_dim = getattr(param, "output_dim", None)
605
+ shard_size = loaded_weight.size(output_dim) // tp_size
606
+ start_idx = tp_rank * shard_size
607
+
608
+ if loaded_shard_id is not None:
609
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
610
+ shard_size)
611
+ param.shard_id.append(loaded_shard_id)
612
+ param.shard_id_map[loaded_shard_id] = len(param.data_container)
613
+ param.data_container.append(loaded_weight)
614
+ return
615
+
616
+ param_data = param.data
617
+ output_dim = getattr(param, "output_dim", None)
618
+ # Special case for AQLM codebooks.
619
+ is_metadata = getattr(param, "is_metadata", False)
620
+ # Special case for per-tensor scale to load scalar into fused array.
621
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
622
+
623
+ if loaded_shard_id is None:
624
+ # Loaded weight is already fused on disk (mlp).
625
+ # (e.g., Phi-3's gate_up_proj).
626
+ if output_dim is None:
627
+ if needs_scalar_to_array:
628
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
629
+ param_data, loaded_weight, 0)
630
+
631
+ assert param_data.shape == loaded_weight.shape
632
+ param_data.copy_(loaded_weight)
633
+ return
634
+ current_shard_offset = 0
635
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
636
+ False)
637
+ shard_offsets: list[tuple[int, int, int]] = []
638
+ for i, output_size in enumerate(self.output_sizes):
639
+ shard_offsets.append((i, current_shard_offset, output_size))
640
+ current_shard_offset += output_size
641
+ packed_dim = getattr(param, "packed_dim", None)
642
+ for shard_id, shard_offset, shard_size in shard_offsets:
643
+ # Special case for Quantization.
644
+ # If quantized, we need to adjust the offset and size to account
645
+ # for the packing.
646
+ if packed_dim == output_dim:
647
+ shard_size = shard_size // param.pack_factor
648
+ shard_offset = shard_offset // param.pack_factor
649
+ # Special case for Marlin.
650
+ shard_size, shard_offset = adjust_marlin_shard(
651
+ param, shard_size, shard_offset)
652
+
653
+ shard_size, shard_offset = adjust_bitblas_shard(
654
+ param, shard_size, shard_offset)
655
+
656
+ if use_bitsandbytes_4bit:
657
+ index = list(itertools.accumulate([0] + self.output_sizes))
658
+ orig_offsets = {
659
+ str(i): (index[i], size)
660
+ for i, size in enumerate(self.output_sizes)
661
+ }
662
+ orig_offsets["total"] = (self.output_size, 0)
663
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
664
+ param, orig_offsets, str(shard_id))
665
+
666
+ loaded_weight_shard = loaded_weight.narrow(
667
+ output_dim, shard_offset, shard_size)
668
+ self.weight_loader(param, loaded_weight_shard, shard_id)
669
+ return
670
+
671
+ assert loaded_shard_id < len(self.output_sizes)
672
+ tp_rank = get_tensor_model_parallel_rank()
673
+ tp_size = get_tensor_model_parallel_world_size()
674
+ if output_dim is not None:
675
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
676
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
677
+ # Special case for quantization.
678
+ # If quantized, we need to adjust the offset and size to account
679
+ # for the packing.
680
+ packed_dim = getattr(param, "packed_dim", None)
681
+ if packed_dim == output_dim:
682
+ shard_size = shard_size // param.pack_factor
683
+ shard_offset = shard_offset // param.pack_factor
684
+ # Special case for Marlin.
685
+ shard_size, shard_offset = adjust_marlin_shard(
686
+ param, shard_size, shard_offset)
687
+ shard_size, shard_offset = adjust_bitblas_shard(
688
+ param, shard_size, shard_offset)
689
+
690
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
691
+ False)
692
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
693
+ # bitsandbytes loads the weights of the specific portion
694
+ # no need to narrow
695
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
696
+
697
+ if use_bitsandbytes_4bit:
698
+ shard_size = loaded_weight.shape[output_dim]
699
+ shard_offset = loaded_weight.shape[output_dim] * \
700
+ loaded_shard_id
701
+
702
+ param_data = param_data.narrow(output_dim, shard_offset,
703
+ shard_size)
704
+ start_idx = tp_rank * shard_size
705
+ if not is_sharded_weight:
706
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
707
+ shard_size)
708
+ # Special case for AQLM codebooks.
709
+ elif is_metadata:
710
+ # metadata indicates fixed size concatenated along dim 0
711
+ shard_size = loaded_weight.shape[0]
712
+ shard_offset = loaded_shard_id * shard_size
713
+ param_data = param_data.narrow(0, shard_offset, shard_size)
714
+
715
+ # Special case for per-tensor scales in fused case.
716
+ elif needs_scalar_to_array:
717
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
718
+ param_data, loaded_weight, loaded_shard_id)
719
+
720
+ else:
721
+ ignore_warning = getattr(param, "ignore_warning", False)
722
+ if not ignore_warning:
723
+ logger.warning(
724
+ "Loading a weight without `output_dim` attribute in "
725
+ "MergedColumnParallelLinear, assume the weight is "
726
+ "the same for all partitions.")
727
+
728
+ assert param_data.shape == loaded_weight.shape
729
+ param_data.copy_(loaded_weight)
730
+
731
+ def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
732
+ loaded_weight: torch.Tensor):
733
+ """
734
+ Handle special case for models where MLP layers are already
735
+ fused on disk. In this case, we have no shard id. This function
736
+ determmines the shard id by splitting these layers and then calls
737
+ the weight loader using the shard id.
738
+
739
+ An example of a model with these fused layers:
740
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
741
+ """
742
+
743
+ current_shard_offset = 0
744
+ shard_offsets: list[tuple[int, int, int]] = []
745
+ for i, output_size in enumerate(self.output_sizes):
746
+ shard_offsets.append((i, current_shard_offset, output_size))
747
+ current_shard_offset += output_size
748
+
749
+ for shard_id, shard_offset, shard_size in shard_offsets:
750
+ # Special case for Quantization.
751
+ # If quantized, we need to adjust the offset and size to account
752
+ # for the packing.
753
+ if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
754
+ )) and param.packed_dim == param.output_dim:
755
+ shard_size, shard_offset = \
756
+ param.adjust_shard_indexes_for_packing(
757
+ shard_size=shard_size, shard_offset=shard_offset)
758
+
759
+ loaded_weight_shard = loaded_weight.narrow(param.output_dim,
760
+ shard_offset,
761
+ shard_size)
762
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
763
+
764
+ def weight_loader_v2(self,
765
+ param: BasevLLMParameter,
766
+ loaded_weight: torch.Tensor,
767
+ loaded_shard_id: Optional[int] = None):
768
+ if loaded_shard_id is None:
769
+ if isinstance(param, PerTensorScaleParameter):
770
+ param.load_merged_column_weight(loaded_weight=loaded_weight,
771
+ shard_id=0)
772
+ return
773
+ elif type(param) in (RowvLLMParameter, BasevLLMParameter):
774
+ param.load_merged_column_weight(loaded_weight=loaded_weight)
775
+ return
776
+ # TODO: @dsikka - move to parameter.py
777
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
778
+ return
779
+
780
+ assert loaded_shard_id < len(self.output_sizes)
781
+
782
+ tp_size = get_tensor_model_parallel_world_size()
783
+
784
+ if isinstance(param, BlockQuantScaleParameter):
785
+ from vllm.model_executor.layers.quantization.fp8 import (
786
+ Fp8LinearMethod, Fp8MoEMethod)
787
+ assert self.quant_method is not None
788
+ assert isinstance(self.quant_method,
789
+ (Fp8LinearMethod, Fp8MoEMethod))
790
+ weight_block_size = self.quant_method.quant_config.weight_block_size
791
+ assert weight_block_size is not None
792
+ block_n, _ = weight_block_size[0], weight_block_size[1]
793
+ shard_offset = (
794
+ (sum(self.output_sizes[:loaded_shard_id]) + block_n - 1) //
795
+ block_n) // tp_size
796
+ shard_size = ((self.output_sizes[loaded_shard_id] + block_n - 1) //
797
+ block_n // tp_size)
798
+ else:
799
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
800
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
801
+
802
+ param.load_merged_column_weight(loaded_weight=loaded_weight,
803
+ shard_id=loaded_shard_id,
804
+ shard_offset=shard_offset,
805
+ shard_size=shard_size)
806
+
807
+
808
+ class QKVParallelLinear(ColumnParallelLinear):
809
+ """Linear layers for the attention's QKV transformation.
810
+
811
+ Linear layers for the linear transformation of the query, key, and value
812
+ vectors in the attention layer. The weight matrix is concatenated along
813
+ the output dimension. The layer is parallelized along the head dimension.
814
+ When the number of key/value heads is smaller than the number of query
815
+ heads (e.g., multi-query/grouped-query attention), the key/value head may
816
+ be replicated while the query heads are partitioned.
817
+
818
+ Args:
819
+ hidden_size: input hidden state size of the transformer.
820
+ head_size: size of each attention head.
821
+ total_num_heads: total number of attention query heads.
822
+ total_num_kv_heads: total number of attention key/value heads. If
823
+ None, assume total_num_kv_heads = total_num_heads.
824
+ bias: If true, add bias.
825
+ skip_bias_add: This was added to enable performance optimizations where
826
+ bias can be fused with other element-wise operations. we
827
+ skip adding bias but instead return it.
828
+ params_dtype: Data type for the parameters.
829
+ quant_config: Quantization configure.
830
+ prefix: The name of the layer in the state dict, including all parents
831
+ (e.g. model.layers.0.qkv_proj)
832
+ return_bias: If true, return bias together with outputs in forward pass.
833
+ """
834
+
835
+ def __init__(
836
+ self,
837
+ hidden_size: int,
838
+ head_size: int,
839
+ total_num_heads: int,
840
+ total_num_kv_heads: Optional[int] = None,
841
+ bias: bool = True,
842
+ skip_bias_add: bool = False,
843
+ params_dtype: Optional[torch.dtype] = None,
844
+ quant_config: Optional[QuantizationConfig] = None,
845
+ prefix: str = "",
846
+ *,
847
+ return_bias: bool = True,
848
+ ):
849
+ self.hidden_size = hidden_size
850
+ self.head_size = head_size
851
+ self.total_num_heads = total_num_heads
852
+ if total_num_kv_heads is None:
853
+ total_num_kv_heads = total_num_heads
854
+ self.total_num_kv_heads = total_num_kv_heads
855
+ # Divide the weight matrix along the last dimension.
856
+ tp_size = get_tensor_model_parallel_world_size()
857
+ self.num_heads = divide(self.total_num_heads, tp_size)
858
+ if tp_size >= self.total_num_kv_heads:
859
+ self.num_kv_heads = 1
860
+ self.num_kv_head_replicas = divide(tp_size,
861
+ self.total_num_kv_heads)
862
+ else:
863
+ self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
864
+ self.num_kv_head_replicas = 1
865
+ input_size = self.hidden_size
866
+ output_size = (self.num_heads +
867
+ 2 * self.num_kv_heads) * tp_size * self.head_size
868
+ self.output_sizes = [
869
+ self.num_heads * self.head_size * tp_size, # q_proj
870
+ self.num_kv_heads * self.head_size * tp_size, # k_proj
871
+ self.num_kv_heads * self.head_size * tp_size, # v_proj
872
+ ]
873
+
874
+ super().__init__(input_size=input_size,
875
+ output_size=output_size,
876
+ bias=bias,
877
+ gather_output=False,
878
+ skip_bias_add=skip_bias_add,
879
+ params_dtype=params_dtype,
880
+ quant_config=quant_config,
881
+ prefix=prefix,
882
+ return_bias=return_bias)
883
+
884
+ def _get_shard_offset_mapping(self, loaded_shard_id: str):
885
+ shard_offset_mapping = {
886
+ "q": 0,
887
+ "k": self.num_heads * self.head_size,
888
+ "v": (self.num_heads + self.num_kv_heads) * self.head_size,
889
+ "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size
890
+ }
891
+ return shard_offset_mapping.get(loaded_shard_id)
892
+
893
+ def _get_shard_size_mapping(self, loaded_shard_id: str):
894
+ shard_size_mapping = {
895
+ "q": self.num_heads * self.head_size,
896
+ "k": self.num_kv_heads * self.head_size,
897
+ "v": self.num_kv_heads * self.head_size,
898
+ }
899
+ return shard_size_mapping.get(loaded_shard_id)
900
+
901
+ def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
902
+ loaded_weight: torch.Tensor):
903
+ """
904
+ Handle special case for models where QKV layers are already
905
+ fused on disk. In this case, we have no shard id. This function
906
+ determmines the shard id by splitting these layers and then calls
907
+ the weight loader using the shard id.
908
+
909
+ An example of a model with these fused layers:
910
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
911
+ """
912
+ shard_offsets = [
913
+ # (shard_id, shard_offset, shard_size)
914
+ ("q", 0, self.total_num_heads * self.head_size),
915
+ ("k", self.total_num_heads * self.head_size,
916
+ self.total_num_kv_heads * self.head_size),
917
+ ("v",
918
+ (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
919
+ self.total_num_kv_heads * self.head_size),
920
+ ]
921
+
922
+ for shard_id, shard_offset, shard_size in shard_offsets:
923
+ # Special case for Quantization.
924
+ # If quantized, we need to adjust the offset and size to account
925
+ # for the packing.
926
+ if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
927
+ )) and param.packed_dim == param.output_dim:
928
+ shard_size, shard_offset = \
929
+ param.adjust_shard_indexes_for_packing(
930
+ shard_size=shard_size, shard_offset=shard_offset)
931
+
932
+ loaded_weight_shard = loaded_weight.narrow(param.output_dim,
933
+ shard_offset,
934
+ shard_size)
935
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
936
+
937
+ def weight_loader_v2(self,
938
+ param: BasevLLMParameter,
939
+ loaded_weight: torch.Tensor,
940
+ loaded_shard_id: Optional[str] = None):
941
+ if loaded_shard_id is None: # special case for certain models
942
+ if isinstance(param, PerTensorScaleParameter):
943
+ param.load_qkv_weight(loaded_weight=loaded_weight, shard_id=0)
944
+ return
945
+ elif type(param) in (RowvLLMParameter, BasevLLMParameter):
946
+ param.load_qkv_weight(loaded_weight=loaded_weight)
947
+ return
948
+ # TODO: @dsikka - move to parameter.py
949
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
950
+ return
951
+
952
+ assert loaded_shard_id in ["q", "k", "v"]
953
+
954
+ shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
955
+ shard_size = self._get_shard_size_mapping(loaded_shard_id)
956
+
957
+ # Note(simon): This is needed for Qwen3's fp8 quantization.
958
+ if isinstance(param, BlockQuantScaleParameter):
959
+ assert self.quant_method is not None
960
+ assert hasattr(self.quant_method, "quant_config")
961
+ weight_block_size = self.quant_method.quant_config.weight_block_size
962
+ block_n, _ = weight_block_size[0], weight_block_size[1]
963
+ shard_offset = (shard_offset + block_n - 1) // block_n
964
+ shard_size = (shard_size + block_n - 1) // block_n
965
+
966
+ param.load_qkv_weight(loaded_weight=loaded_weight,
967
+ num_heads=self.num_kv_head_replicas,
968
+ shard_id=loaded_shard_id,
969
+ shard_offset=shard_offset,
970
+ shard_size=shard_size)
971
+
972
+ def weight_loader(self,
973
+ param: Parameter,
974
+ loaded_weight: torch.Tensor,
975
+ loaded_shard_id: Optional[str] = None):
976
+
977
+ # Special case for GGUF
978
+ # initialize GGUF param after we know the quantize type
979
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
980
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
981
+ if is_gguf_weight_type:
982
+ idx_map = {"q": 0, "k": 1, "v": 2}
983
+ if loaded_shard_id is not None:
984
+ param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
985
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
986
+ else:
987
+ param.shard_weight_type = {
988
+ k: loaded_weight.item()
989
+ for k in idx_map
990
+ }
991
+ return
992
+
993
+ if is_gguf_weight:
994
+ tp_size = get_tensor_model_parallel_world_size()
995
+ tp_rank = get_tensor_model_parallel_rank()
996
+
997
+ output_dim = getattr(param, "output_dim", None)
998
+ shard_size = loaded_weight.size(output_dim) // tp_size
999
+ start_idx = tp_rank * shard_size
1000
+
1001
+ if loaded_shard_id is not None:
1002
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
1003
+ shard_size)
1004
+ param.shard_id.append(loaded_shard_id)
1005
+ param.shard_id_map[loaded_shard_id] = len(param.data_container)
1006
+ param.data_container.append(loaded_weight)
1007
+ return
1008
+
1009
+ param_data = param.data
1010
+ output_dim = getattr(param, "output_dim", None)
1011
+ # Special case for AQLM codebooks.
1012
+ is_metadata = getattr(param, "is_metadata", False)
1013
+
1014
+ # Special case for per-tensor scales in fused case.
1015
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
1016
+
1017
+ if loaded_shard_id is None:
1018
+ # Loaded weight is already fused on disk (qkv).
1019
+ # (e.g., Phi-3's qkv_proj).
1020
+ if output_dim is None:
1021
+ if needs_scalar_to_array:
1022
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
1023
+ param_data, loaded_weight, 0)
1024
+
1025
+ assert param_data.shape == loaded_weight.shape
1026
+ param_data.copy_(loaded_weight)
1027
+ return
1028
+ shard_offsets = [
1029
+ # (shard_id, shard_offset, shard_size)
1030
+ ("q", 0, self.total_num_heads * self.head_size),
1031
+ ("k", self.total_num_heads * self.head_size,
1032
+ self.total_num_kv_heads * self.head_size),
1033
+ ("v", (self.total_num_heads + self.total_num_kv_heads) *
1034
+ self.head_size, self.total_num_kv_heads * self.head_size),
1035
+ ]
1036
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
1037
+ False)
1038
+
1039
+ packed_dim = getattr(param, "packed_dim", None)
1040
+ for shard_id, shard_offset, shard_size in shard_offsets:
1041
+ # Special case for Quantized Weights.
1042
+ # If quantized, we need to adjust the offset and size to account
1043
+ # for the packing.
1044
+ if packed_dim == output_dim:
1045
+ shard_size = shard_size // param.pack_factor
1046
+ shard_offset = shard_offset // param.pack_factor
1047
+
1048
+ # Special case for Marlin.
1049
+ shard_size, shard_offset = adjust_marlin_shard(
1050
+ param, shard_size, shard_offset)
1051
+
1052
+ if use_bitsandbytes_4bit:
1053
+ orig_qkv_offsets = {
1054
+ "q": (0, self.total_num_heads * self.head_size),
1055
+ "k": (self.total_num_heads * self.head_size,
1056
+ self.total_num_kv_heads * self.head_size),
1057
+ "v":
1058
+ ((self.total_num_heads + self.total_num_kv_heads) *
1059
+ self.head_size,
1060
+ self.total_num_kv_heads * self.head_size),
1061
+ "total":
1062
+ ((self.total_num_heads + 2 * self.total_num_kv_heads) *
1063
+ self.head_size, 0)
1064
+ }
1065
+
1066
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
1067
+ param, orig_qkv_offsets, shard_id)
1068
+
1069
+ loaded_weight_shard = loaded_weight.narrow(
1070
+ output_dim, shard_offset, shard_size)
1071
+ self.weight_loader(param, loaded_weight_shard, shard_id)
1072
+ return
1073
+
1074
+ tp_rank = get_tensor_model_parallel_rank()
1075
+ assert loaded_shard_id in ["q", "k", "v"]
1076
+
1077
+ # If output dim is defined, use the default loading process.
1078
+ if output_dim is not None:
1079
+ if loaded_shard_id == "q":
1080
+ shard_offset = 0
1081
+ shard_size = self.num_heads * self.head_size
1082
+ elif loaded_shard_id == "k":
1083
+ shard_offset = self.num_heads * self.head_size
1084
+ shard_size = self.num_kv_heads * self.head_size
1085
+ elif loaded_shard_id == "v":
1086
+ shard_offset = (self.num_heads +
1087
+ self.num_kv_heads) * self.head_size
1088
+ shard_size = self.num_kv_heads * self.head_size
1089
+ # Special case for Quantized Weights.
1090
+ # If quantized, we need to adjust the offset and size to account
1091
+ # for the packing.
1092
+ packed_dim = getattr(param, "packed_dim", None)
1093
+ if packed_dim == output_dim:
1094
+ shard_size = shard_size // param.pack_factor
1095
+ shard_offset = shard_offset // param.pack_factor
1096
+
1097
+ # Special case for Marlin.
1098
+ shard_size, shard_offset = adjust_marlin_shard(
1099
+ param, shard_size, shard_offset)
1100
+
1101
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
1102
+ False)
1103
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
1104
+ # bitsandbytes loads the weights of the specific portion
1105
+ # no need to narrow
1106
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
1107
+
1108
+ if use_bitsandbytes_4bit:
1109
+ orig_qkv_offsets = {
1110
+ "q": (0, self.num_heads * self.head_size),
1111
+ "k": (self.num_heads * self.head_size,
1112
+ self.num_kv_heads * self.head_size),
1113
+ "v":
1114
+ ((self.num_heads + self.num_kv_heads) * self.head_size,
1115
+ self.num_kv_heads * self.head_size),
1116
+ "total":
1117
+ ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
1118
+ 0)
1119
+ }
1120
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
1121
+ param, orig_qkv_offsets, loaded_shard_id)
1122
+
1123
+ param_data = param_data.narrow(output_dim, shard_offset,
1124
+ shard_size)
1125
+ if loaded_shard_id == "q":
1126
+ shard_id = tp_rank
1127
+ else:
1128
+ shard_id = tp_rank // self.num_kv_head_replicas
1129
+ start_idx = shard_id * shard_size
1130
+
1131
+ if not is_sharded_weight:
1132
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
1133
+ shard_size)
1134
+
1135
+ # Special case for for AQLM codebooks.
1136
+ elif is_metadata:
1137
+ # metadata indicates fixed size concatenated along dim 0
1138
+ shard_size = loaded_weight.shape[0]
1139
+ shard_index = ["q", "k", "v"].index(loaded_shard_id)
1140
+ param_data = param_data.narrow(0, shard_index * shard_size,
1141
+ shard_size)
1142
+ # Special case for per-tensor scales in fused case.
1143
+ elif needs_scalar_to_array:
1144
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
1145
+ param_data, loaded_weight, loaded_shard_id)
1146
+ else:
1147
+ ignore_warning = getattr(param, "ignore_warning", False)
1148
+ if not ignore_warning:
1149
+ logger.warning(
1150
+ "Loading a weight without `output_dim` attribute in "
1151
+ "QKVParallelLinear, assume the weight is the same "
1152
+ "for all partitions.")
1153
+
1154
+ assert param_data.shape == loaded_weight.shape
1155
+ param_data.copy_(loaded_weight)
1156
+
1157
+
1158
+ class RowParallelLinear(LinearBase):
1159
+ """Linear layer with row parallelism.
1160
+
1161
+ The linear layer is defined as Y = XA + b. A is parallelized along
1162
+ its first dimension and X along its second dimension as:
1163
+ - -
1164
+ | A_1 |
1165
+ | . |
1166
+ A = | . | X = [X_1, ..., X_p]
1167
+ | . |
1168
+ | A_p |
1169
+ - -
1170
+ Arguments:
1171
+ input_size: first dimension of matrix A.
1172
+ output_size: second dimension of matrix A.
1173
+ bias: If true, add bias. Note that bias is not parallelized.
1174
+ input_is_parallel: If true, we assume that the input is already
1175
+ split across the GPUs and we do not split
1176
+ again.
1177
+ skip_bias_add: This was added to enable performance optimization where
1178
+ bias can be fused with other element-wise operations.
1179
+ We skip adding bias but instead return it.
1180
+ params_dtype: Data type for the parameters.
1181
+ reduce_results: If true, call all-reduce on output and make Y available
1182
+ to all GPUs, otherwise, every GPU will have its output
1183
+ which is Y = X_iA_i
1184
+ quant_config: Quantization configure.
1185
+ prefix: The name of the layer in the state dict, including all parents
1186
+ (e.g. model.layers.0.down_proj)
1187
+ return_bias: If true, return bias together with outputs in forward pass.
1188
+ """
1189
+
1190
+ def __init__(
1191
+ self,
1192
+ input_size: int,
1193
+ output_size: int,
1194
+ bias: bool = True,
1195
+ input_is_parallel: bool = True,
1196
+ skip_bias_add: bool = False,
1197
+ params_dtype: Optional[torch.dtype] = None,
1198
+ reduce_results: bool = True,
1199
+ quant_config: Optional[QuantizationConfig] = None,
1200
+ prefix: str = "",
1201
+ *,
1202
+ return_bias: bool = True,
1203
+ ):
1204
+ # Divide the weight matrix along the first dimension.
1205
+ self.tp_rank = get_tensor_model_parallel_rank()
1206
+ self.tp_size = get_tensor_model_parallel_world_size()
1207
+ self.input_size_per_partition = divide(input_size, self.tp_size)
1208
+ self.output_size_per_partition = output_size
1209
+ self.output_partition_sizes = [output_size]
1210
+
1211
+ super().__init__(input_size,
1212
+ output_size,
1213
+ skip_bias_add,
1214
+ params_dtype,
1215
+ quant_config,
1216
+ prefix,
1217
+ return_bias=return_bias)
1218
+
1219
+ self.input_is_parallel = input_is_parallel
1220
+ self.reduce_results = reduce_results
1221
+
1222
+ assert self.quant_method is not None
1223
+ self.quant_method.create_weights(
1224
+ layer=self,
1225
+ input_size_per_partition=self.input_size_per_partition,
1226
+ output_partition_sizes=self.output_partition_sizes,
1227
+ input_size=self.input_size,
1228
+ output_size=self.output_size,
1229
+ params_dtype=self.params_dtype,
1230
+ weight_loader=(
1231
+ self.weight_loader_v2 if self.quant_method.__class__.__name__
1232
+ in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
1233
+ if not reduce_results and (bias and not skip_bias_add):
1234
+ raise ValueError("When not reduce the results, adding bias to the "
1235
+ "results can lead to incorrect results")
1236
+
1237
+ if bias:
1238
+ self.bias = Parameter(
1239
+ torch.empty(self.output_size, dtype=params_dtype))
1240
+ set_weight_attrs(self.bias, {
1241
+ "output_dim": 0,
1242
+ "weight_loader": self.weight_loader,
1243
+ })
1244
+ else:
1245
+ self.register_parameter("bias", None)
1246
+
1247
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
1248
+ tp_rank = get_tensor_model_parallel_rank()
1249
+ tp_size = get_tensor_model_parallel_world_size()
1250
+ input_dim = getattr(param, "input_dim", None)
1251
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
1252
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
1253
+ # bitsandbytes loads the weights of the specific portion
1254
+ # no need to narrow
1255
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
1256
+
1257
+ # Special case for GGUF
1258
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
1259
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
1260
+ if is_gguf_weight_type:
1261
+ param.weight_type = loaded_weight.item()
1262
+
1263
+ # Materialize GGUF UninitializedParameter
1264
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
1265
+ weight_shape = list(loaded_weight.shape)
1266
+ if input_dim:
1267
+ weight_shape[input_dim] = weight_shape[input_dim] // tp_size
1268
+ param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)
1269
+
1270
+ param_data = param.data
1271
+ if input_dim is not None and not is_sharded_weight:
1272
+ shard_size = param_data.shape[input_dim]
1273
+ start_idx = tp_rank * shard_size
1274
+ loaded_weight = loaded_weight.narrow(input_dim, start_idx,
1275
+ shard_size)
1276
+
1277
+ # Special case for loading scales off disk, which often do not
1278
+ # have a shape (such as in the case of AutoFP8).
1279
+ if len(loaded_weight.shape) == 0:
1280
+ loaded_weight = loaded_weight.reshape(1)
1281
+
1282
+ assert param_data.shape == loaded_weight.shape
1283
+ param_data.copy_(loaded_weight)
1284
+
1285
+ def weight_loader_v2(self, param: BasevLLMParameter,
1286
+ loaded_weight: torch.Tensor):
1287
+
1288
+ # Special case for loading scales off disk, which often do not
1289
+ # have a shape (such as in the case of AutoFP8).
1290
+ if len(loaded_weight.shape) == 0:
1291
+ assert loaded_weight.numel() == 1
1292
+ loaded_weight = loaded_weight.reshape(1)
1293
+
1294
+ param.load_row_parallel_weight(loaded_weight=loaded_weight)
1295
+
1296
+ def forward(
1297
+ self, input_
1298
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
1299
+ if self.input_is_parallel:
1300
+ input_parallel = input_
1301
+ else:
1302
+ tp_rank = get_tensor_model_parallel_rank()
1303
+ splitted_input = split_tensor_along_last_dim(
1304
+ input_, num_partitions=self.tp_size)
1305
+ input_parallel = splitted_input[tp_rank].contiguous()
1306
+
1307
+ # Matrix multiply.
1308
+ assert self.quant_method is not None
1309
+ # Only fuse bias add into GEMM for rank 0 (this ensures that
1310
+ # bias will not get added more than once in TP>1 case)
1311
+ bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
1312
+ output_parallel = self.quant_method.apply(self,
1313
+ input_parallel,
1314
+ bias=bias_)
1315
+ if self.reduce_results and self.tp_size > 1:
1316
+ output = tensor_model_parallel_all_reduce(output_parallel)
1317
+ else:
1318
+ output = output_parallel
1319
+
1320
+ output_bias = self.bias if self.skip_bias_add else None
1321
+
1322
+ if not self.return_bias:
1323
+ return output
1324
+ return output, output_bias
1325
+
1326
+ def extra_repr(self) -> str:
1327
+ s = f"input_features={self.input_size_per_partition}"
1328
+ s += f", output_features={self.output_size}"
1329
+ s += f", bias={self.bias is not None}"
1330
+ s += f", tp_size={self.tp_size}"
1331
+ s += f", reduce_results={self.reduce_results}"
1332
+ return s
1333
+
1334
+
1335
+ class QKVCrossParallelLinear(LinearBase):
1336
+ """Linear layers for efficient cross-attention's QKV transformation.
1337
+
1338
+ Args:
1339
+ hidden_size: input hidden state size of the transformer.
1340
+ head_size: size of each attention head.
1341
+ total_num_heads: total number of attention query heads.
1342
+ total_num_kv_heads: total number of attention key/value heads. If
1343
+ None, assume total_num_kv_heads = total_num_heads.
1344
+ bias: If true, add bias.
1345
+ skip_bias_add: This was added to enable performance optimizations where
1346
+ bias can be fused with other element-wise operations. we
1347
+ skip adding bias but instead return it.
1348
+ params_dtype: Data type for the parameters.
1349
+ quant_config: Quantization configure.
1350
+ prefix: The name of the layer in the state dict, including all parents
1351
+ (e.g. model.layers.0.qkv_proj)
1352
+ """
1353
+
1354
+ def __init__(self,
1355
+ hidden_size: int,
1356
+ head_size: int,
1357
+ total_num_heads: int,
1358
+ total_num_kv_heads: Optional[int] = None,
1359
+ bias: bool = True,
1360
+ skip_bias_add: bool = False,
1361
+ params_dtype: Optional[torch.dtype] = None,
1362
+ quant_config: Optional[QuantizationConfig] = None,
1363
+ prefix: str = ""):
1364
+ # input_size and output_size are not used, just for alignment
1365
+ input_size = hidden_size
1366
+ output_size = (total_num_heads + (total_num_kv_heads or 0)) * head_size
1367
+ super().__init__(input_size=input_size,
1368
+ output_size=output_size,
1369
+ skip_bias_add=skip_bias_add,
1370
+ params_dtype=params_dtype,
1371
+ quant_config=quant_config,
1372
+ prefix=prefix)
1373
+
1374
+ self.quant_config = quant_config
1375
+
1376
+ # Empty placeholders for loading as a single module.
1377
+ placeholder_size = 0
1378
+ assert self.quant_method is not None
1379
+ self.quant_method.create_weights(self,
1380
+ placeholder_size, [placeholder_size],
1381
+ placeholder_size,
1382
+ placeholder_size,
1383
+ self.params_dtype,
1384
+ weight_loader=self.weight_loader)
1385
+
1386
+ # Use a dictionary to avoid submodules parameters auto-registration:
1387
+ # drop-in replacement for a `QKVParallelLinear` module.
1388
+ self.proj = dict()
1389
+ self.proj["q_proj_decoder"] = ColumnParallelLinear(
1390
+ input_size=hidden_size,
1391
+ output_size=total_num_heads * head_size,
1392
+ bias=bias,
1393
+ quant_config=quant_config,
1394
+ skip_bias_add=skip_bias_add,
1395
+ params_dtype=params_dtype,
1396
+ prefix=f"{prefix}.q_proj_decoder")
1397
+
1398
+ self.proj["kv_proj_encoder"] = QKVParallelLinear(
1399
+ hidden_size=hidden_size,
1400
+ head_size=head_size,
1401
+ total_num_heads=0,
1402
+ total_num_kv_heads=total_num_kv_heads,
1403
+ bias=bias,
1404
+ quant_config=quant_config,
1405
+ skip_bias_add=skip_bias_add,
1406
+ params_dtype=params_dtype,
1407
+ prefix=f"{prefix}.kv_proj_encoder")
1408
+
1409
+ # `kv_proj_encoder.num_kv_heads` accounts for sharding with tp>1.
1410
+ self.q_size = self.q_proj_decoder.output_size_per_partition
1411
+ self.kv_size = self.kv_proj_encoder.num_kv_heads * head_size
1412
+
1413
+ if bias:
1414
+ self.bias = torch.nn.Parameter()
1415
+ set_weight_attrs(self.bias, {
1416
+ "output_dim": 0,
1417
+ "weight_loader": self.weight_loader,
1418
+ })
1419
+ else:
1420
+ self.bias = None
1421
+
1422
+ def process_weights_after_loading(self):
1423
+ for layer in self.proj.values():
1424
+ if self.quant_method is not None:
1425
+ self.quant_method.process_weights_after_loading(layer)
1426
+
1427
+ @property
1428
+ def q_proj_decoder(self) -> ColumnParallelLinear:
1429
+ layer = self.proj["q_proj_decoder"]
1430
+ for name, param in self.named_parameters():
1431
+ target_param = getattr(layer, name, None)
1432
+ if target_param is not None:
1433
+ self.sync_weight_attrs(param,
1434
+ target_param,
1435
+ mode="q_proj_decoder")
1436
+ return layer
1437
+
1438
+ @property
1439
+ def kv_proj_encoder(self) -> QKVParallelLinear:
1440
+ layer = self.proj["kv_proj_encoder"]
1441
+ for name, param in self.named_parameters():
1442
+ target_param = getattr(layer, name, None)
1443
+ if target_param is not None:
1444
+ self.sync_weight_attrs(param,
1445
+ target_param,
1446
+ mode="kv_proj_encoder")
1447
+ return layer
1448
+
1449
+ def sync_weight_attrs(
1450
+ self,
1451
+ src_param: nn.Parameter,
1452
+ tgt_param: nn.Parameter,
1453
+ mode: Literal["q_proj_decoder", "kv_proj_encoder"],
1454
+ ):
1455
+ missing_attrs_dict = {
1456
+ k: getattr(src_param, k)
1457
+ for k in (set(vars(src_param).keys()) -
1458
+ set(vars(tgt_param).keys()))
1459
+ }
1460
+ # TODO(Isotr0py): handle bitsandbytes 8bit
1461
+ use_bitsandbytes_4bit = getattr(src_param, "use_bitsandbytes_4bit",
1462
+ False)
1463
+ if (missing_attrs_dict and use_bitsandbytes_4bit):
1464
+ q_proj_attrs, kv_proj_attrs = left_shift_bitsandbytes_4bit_shard(
1465
+ missing_attrs_dict)
1466
+ if mode == "q_proj_decoder":
1467
+ set_weight_attrs(tgt_param, q_proj_attrs)
1468
+ elif mode == "kv_proj_encoder":
1469
+ set_weight_attrs(tgt_param, kv_proj_attrs)
1470
+ else:
1471
+ set_weight_attrs(tgt_param, missing_attrs_dict)
1472
+
1473
+ def _is_same_param(
1474
+ self,
1475
+ src_param: torch.nn.Parameter,
1476
+ map_param: torch.nn.Parameter,
1477
+ ) -> bool:
1478
+ """Check if two parameters are exactly pointing to same things."""
1479
+ # ignore weight_loader because it's always different
1480
+ key_to_ignore = ["weight_loader", "_weight_loader"]
1481
+ has_same_type_name = type(src_param) is type(map_param)
1482
+ src_param_attrs = {
1483
+ k: v
1484
+ for k, v in src_param.__dict__.items() if k not in key_to_ignore
1485
+ }
1486
+ map_param_attrs = {
1487
+ k: v
1488
+ for k, v in map_param.__dict__.items() if k not in key_to_ignore
1489
+ }
1490
+ has_same_attrs = src_param_attrs == map_param_attrs
1491
+ return has_same_type_name and has_same_attrs
1492
+
1493
+ def select_proj_params(
1494
+ self,
1495
+ layer: nn.Module,
1496
+ param: nn.Parameter,
1497
+ ) -> nn.Parameter:
1498
+ """
1499
+ Given the placeholder param,
1500
+ return the corresponding param in the proj layers.
1501
+ """
1502
+ target_param_list = [
1503
+ v for _, v in layer.named_parameters()
1504
+ if self._is_same_param(param, v)
1505
+ ]
1506
+ assert len(target_param_list) == 1
1507
+ target_param = target_param_list[0]
1508
+ return target_param
1509
+
1510
+ def forward( # type: ignore[override]
1511
+ self,
1512
+ decoder_hidden_states: torch.Tensor,
1513
+ encoder_hidden_states: torch.Tensor,
1514
+ ) -> tuple[torch.Tensor, ...]:
1515
+ q, _ = self.q_proj_decoder(decoder_hidden_states)
1516
+ if encoder_hidden_states is None:
1517
+ # Encoder KV already cached.
1518
+ k = None
1519
+ v = None
1520
+ else:
1521
+ # Prefill phase, encoder KV cached here.
1522
+ kv_enc, _ = self.kv_proj_encoder(encoder_hidden_states)
1523
+ # Split kv in half
1524
+ k, v = kv_enc.split(self.kv_size, dim=-1)
1525
+ return q, k, v
1526
+
1527
+ def weight_loader(self,
1528
+ param: torch.nn.Parameter,
1529
+ loaded_weight: torch.Tensor,
1530
+ loaded_shard_id: Optional[str] = None):
1531
+ layer = (self.q_proj_decoder
1532
+ if loaded_shard_id == "q" else self.kv_proj_encoder)
1533
+ target_param = self.select_proj_params(layer, param)
1534
+ shard_id_args = (loaded_shard_id, ) if loaded_shard_id != "q" else ()
1535
+ if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED:
1536
+ layer.weight_loader_v2(target_param, loaded_weight, *shard_id_args)
1537
+ else:
1538
+ layer.weight_loader(target_param, loaded_weight, *shard_id_args)
1539
+
1540
+ def extra_repr(self) -> str:
1541
+ s = f"in_features={self.input_size}"
1542
+ s += f", q_size={self.q_size}"
1543
+ s += f", kv_size={self.kv_size}"
1544
+ s += f", bias={self.bias is not None}"
1545
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
1546
+ s += ", gather_output=False"
1547
+ return s