vllm-cpu-amxbf16 0.11.2.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1536) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +983 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +2863 -0
  6. vllm/_ipex_ops.py +457 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +59 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +18 -0
  14. vllm/attention/backends/__init__.py +0 -0
  15. vllm/attention/backends/abstract.py +391 -0
  16. vllm/attention/backends/registry.py +195 -0
  17. vllm/attention/backends/utils.py +33 -0
  18. vllm/attention/layer.py +1052 -0
  19. vllm/attention/layers/__init__.py +0 -0
  20. vllm/attention/layers/chunked_local_attention.py +121 -0
  21. vllm/attention/layers/cross_attention.py +178 -0
  22. vllm/attention/layers/encoder_only_attention.py +103 -0
  23. vllm/attention/ops/__init__.py +0 -0
  24. vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
  25. vllm/attention/ops/common.py +414 -0
  26. vllm/attention/ops/flashmla.py +251 -0
  27. vllm/attention/ops/merge_attn_states.py +47 -0
  28. vllm/attention/ops/paged_attn.py +262 -0
  29. vllm/attention/ops/pallas_kv_cache_update.py +130 -0
  30. vllm/attention/ops/prefix_prefill.py +814 -0
  31. vllm/attention/ops/rocm_aiter_paged_attn.py +123 -0
  32. vllm/attention/ops/triton_decode_attention.py +712 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +105 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
  35. vllm/attention/ops/triton_unified_attention.py +941 -0
  36. vllm/attention/ops/vit_attn_wrappers.py +178 -0
  37. vllm/attention/selector.py +231 -0
  38. vllm/attention/utils/__init__.py +0 -0
  39. vllm/attention/utils/fa_utils.py +109 -0
  40. vllm/attention/utils/kv_sharing_utils.py +33 -0
  41. vllm/attention/utils/kv_transfer_utils.py +60 -0
  42. vllm/beam_search.py +88 -0
  43. vllm/benchmarks/__init__.py +0 -0
  44. vllm/benchmarks/datasets.py +3222 -0
  45. vllm/benchmarks/latency.py +172 -0
  46. vllm/benchmarks/lib/__init__.py +3 -0
  47. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  48. vllm/benchmarks/lib/ready_checker.py +72 -0
  49. vllm/benchmarks/lib/utils.py +79 -0
  50. vllm/benchmarks/serve.py +1531 -0
  51. vllm/benchmarks/sweep/__init__.py +0 -0
  52. vllm/benchmarks/sweep/cli.py +38 -0
  53. vllm/benchmarks/sweep/param_sweep.py +91 -0
  54. vllm/benchmarks/sweep/plot.py +580 -0
  55. vllm/benchmarks/sweep/serve.py +416 -0
  56. vllm/benchmarks/sweep/serve_sla.py +492 -0
  57. vllm/benchmarks/sweep/server.py +114 -0
  58. vllm/benchmarks/sweep/sla_sweep.py +132 -0
  59. vllm/benchmarks/sweep/utils.py +4 -0
  60. vllm/benchmarks/throughput.py +799 -0
  61. vllm/collect_env.py +857 -0
  62. vllm/compilation/__init__.py +0 -0
  63. vllm/compilation/activation_quant_fusion.py +209 -0
  64. vllm/compilation/backends.py +759 -0
  65. vllm/compilation/base_static_graph.py +57 -0
  66. vllm/compilation/caching.py +178 -0
  67. vllm/compilation/collective_fusion.py +1234 -0
  68. vllm/compilation/compiler_interface.py +639 -0
  69. vllm/compilation/counter.py +48 -0
  70. vllm/compilation/cuda_graph.py +208 -0
  71. vllm/compilation/decorators.py +571 -0
  72. vllm/compilation/fix_functionalization.py +253 -0
  73. vllm/compilation/fusion.py +374 -0
  74. vllm/compilation/fusion_attn.py +359 -0
  75. vllm/compilation/fx_utils.py +91 -0
  76. vllm/compilation/inductor_pass.py +133 -0
  77. vllm/compilation/matcher_utils.py +317 -0
  78. vllm/compilation/monitor.py +62 -0
  79. vllm/compilation/noop_elimination.py +134 -0
  80. vllm/compilation/partition_rules.py +72 -0
  81. vllm/compilation/pass_manager.py +135 -0
  82. vllm/compilation/piecewise_backend.py +121 -0
  83. vllm/compilation/post_cleanup.py +21 -0
  84. vllm/compilation/qk_norm_rope_fusion.py +238 -0
  85. vllm/compilation/sequence_parallelism.py +363 -0
  86. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  87. vllm/compilation/vllm_inductor_pass.py +173 -0
  88. vllm/compilation/wrapper.py +238 -0
  89. vllm/config/__init__.py +102 -0
  90. vllm/config/cache.py +207 -0
  91. vllm/config/compilation.py +975 -0
  92. vllm/config/device.py +75 -0
  93. vllm/config/ec_transfer.py +110 -0
  94. vllm/config/kv_events.py +56 -0
  95. vllm/config/kv_transfer.py +114 -0
  96. vllm/config/load.py +124 -0
  97. vllm/config/lora.py +112 -0
  98. vllm/config/model.py +2162 -0
  99. vllm/config/multimodal.py +248 -0
  100. vllm/config/observability.py +123 -0
  101. vllm/config/parallel.py +655 -0
  102. vllm/config/pooler.py +122 -0
  103. vllm/config/scheduler.py +298 -0
  104. vllm/config/speculative.py +654 -0
  105. vllm/config/speech_to_text.py +38 -0
  106. vllm/config/structured_outputs.py +92 -0
  107. vllm/config/utils.py +178 -0
  108. vllm/config/vllm.py +1166 -0
  109. vllm/connections.py +189 -0
  110. vllm/device_allocator/__init__.py +0 -0
  111. vllm/device_allocator/cumem.py +327 -0
  112. vllm/distributed/__init__.py +6 -0
  113. vllm/distributed/communication_op.py +43 -0
  114. vllm/distributed/device_communicators/__init__.py +0 -0
  115. vllm/distributed/device_communicators/all2all.py +490 -0
  116. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  117. vllm/distributed/device_communicators/base_device_communicator.py +297 -0
  118. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  119. vllm/distributed/device_communicators/cuda_communicator.py +340 -0
  120. vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
  121. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  122. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  123. vllm/distributed/device_communicators/pynccl.py +386 -0
  124. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  125. vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
  126. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  127. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  128. vllm/distributed/device_communicators/shm_broadcast.py +733 -0
  129. vllm/distributed/device_communicators/shm_object_storage.py +660 -0
  130. vllm/distributed/device_communicators/symm_mem.py +156 -0
  131. vllm/distributed/device_communicators/tpu_communicator.py +107 -0
  132. vllm/distributed/device_communicators/xpu_communicator.py +95 -0
  133. vllm/distributed/ec_transfer/__init__.py +14 -0
  134. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  135. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  136. vllm/distributed/ec_transfer/ec_connector/factory.py +88 -0
  137. vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
  138. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  139. vllm/distributed/eplb/__init__.py +8 -0
  140. vllm/distributed/eplb/eplb_state.py +837 -0
  141. vllm/distributed/eplb/rebalance_algo.py +260 -0
  142. vllm/distributed/eplb/rebalance_execute.py +431 -0
  143. vllm/distributed/kv_events.py +371 -0
  144. vllm/distributed/kv_transfer/README.md +29 -0
  145. vllm/distributed/kv_transfer/__init__.py +20 -0
  146. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  147. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  149. vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
  150. vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/base.py +546 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +379 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
  159. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +867 -0
  160. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
  161. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
  162. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2440 -0
  163. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +504 -0
  164. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  165. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  166. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  167. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  168. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
  169. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  170. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
  171. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
  172. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
  173. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  174. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  175. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
  176. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
  177. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  178. vllm/distributed/parallel_state.py +1759 -0
  179. vllm/distributed/tpu_distributed_utils.py +188 -0
  180. vllm/distributed/utils.py +543 -0
  181. vllm/engine/__init__.py +0 -0
  182. vllm/engine/arg_utils.py +2144 -0
  183. vllm/engine/async_llm_engine.py +6 -0
  184. vllm/engine/llm_engine.py +6 -0
  185. vllm/engine/protocol.py +170 -0
  186. vllm/entrypoints/__init__.py +0 -0
  187. vllm/entrypoints/anthropic/__init__.py +0 -0
  188. vllm/entrypoints/anthropic/protocol.py +162 -0
  189. vllm/entrypoints/anthropic/serving_messages.py +460 -0
  190. vllm/entrypoints/api_server.py +184 -0
  191. vllm/entrypoints/chat_utils.py +1690 -0
  192. vllm/entrypoints/cli/__init__.py +13 -0
  193. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  194. vllm/entrypoints/cli/benchmark/base.py +25 -0
  195. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  196. vllm/entrypoints/cli/benchmark/main.py +56 -0
  197. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  198. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  199. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  200. vllm/entrypoints/cli/collect_env.py +38 -0
  201. vllm/entrypoints/cli/main.py +79 -0
  202. vllm/entrypoints/cli/openai.py +256 -0
  203. vllm/entrypoints/cli/run_batch.py +68 -0
  204. vllm/entrypoints/cli/serve.py +249 -0
  205. vllm/entrypoints/cli/types.py +29 -0
  206. vllm/entrypoints/constants.py +10 -0
  207. vllm/entrypoints/context.py +572 -0
  208. vllm/entrypoints/dynamic_lora.py +57 -0
  209. vllm/entrypoints/harmony_utils.py +535 -0
  210. vllm/entrypoints/launcher.py +175 -0
  211. vllm/entrypoints/llm.py +1768 -0
  212. vllm/entrypoints/logger.py +84 -0
  213. vllm/entrypoints/openai/__init__.py +0 -0
  214. vllm/entrypoints/openai/api_server.py +2096 -0
  215. vllm/entrypoints/openai/cli_args.py +302 -0
  216. vllm/entrypoints/openai/orca_metrics.py +120 -0
  217. vllm/entrypoints/openai/protocol.py +3299 -0
  218. vllm/entrypoints/openai/run_batch.py +547 -0
  219. vllm/entrypoints/openai/serving_chat.py +1772 -0
  220. vllm/entrypoints/openai/serving_classification.py +235 -0
  221. vllm/entrypoints/openai/serving_completion.py +715 -0
  222. vllm/entrypoints/openai/serving_embedding.py +695 -0
  223. vllm/entrypoints/openai/serving_engine.py +1433 -0
  224. vllm/entrypoints/openai/serving_models.py +304 -0
  225. vllm/entrypoints/openai/serving_pooling.py +346 -0
  226. vllm/entrypoints/openai/serving_responses.py +2021 -0
  227. vllm/entrypoints/openai/serving_score.py +503 -0
  228. vllm/entrypoints/openai/serving_tokenization.py +203 -0
  229. vllm/entrypoints/openai/serving_tokens.py +269 -0
  230. vllm/entrypoints/openai/serving_transcription.py +148 -0
  231. vllm/entrypoints/openai/speech_to_text.py +405 -0
  232. vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
  233. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
  234. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
  235. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
  236. vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
  237. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
  238. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  239. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
  240. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
  241. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  242. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
  243. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +323 -0
  244. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
  245. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  246. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +290 -0
  247. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
  248. vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
  249. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
  250. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
  251. vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
  252. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
  253. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
  254. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
  255. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
  256. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  257. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
  258. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
  259. vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
  260. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
  261. vllm/entrypoints/renderer.py +409 -0
  262. vllm/entrypoints/responses_utils.py +77 -0
  263. vllm/entrypoints/sagemaker/__init__.py +4 -0
  264. vllm/entrypoints/sagemaker/routes.py +72 -0
  265. vllm/entrypoints/score_utils.py +242 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +143 -0
  268. vllm/entrypoints/tool_server.py +209 -0
  269. vllm/entrypoints/utils.py +319 -0
  270. vllm/env_override.py +378 -0
  271. vllm/envs.py +1659 -0
  272. vllm/forward_context.py +356 -0
  273. vllm/inputs/__init__.py +44 -0
  274. vllm/inputs/data.py +359 -0
  275. vllm/inputs/parse.py +137 -0
  276. vllm/inputs/preprocess.py +727 -0
  277. vllm/logger.py +267 -0
  278. vllm/logging_utils/__init__.py +10 -0
  279. vllm/logging_utils/dump_input.py +83 -0
  280. vllm/logging_utils/formatter.py +77 -0
  281. vllm/logging_utils/log_time.py +34 -0
  282. vllm/logits_process.py +121 -0
  283. vllm/logprobs.py +208 -0
  284. vllm/lora/__init__.py +0 -0
  285. vllm/lora/layers/__init__.py +41 -0
  286. vllm/lora/layers/base.py +67 -0
  287. vllm/lora/layers/base_linear.py +164 -0
  288. vllm/lora/layers/column_parallel_linear.py +578 -0
  289. vllm/lora/layers/fused_moe.py +472 -0
  290. vllm/lora/layers/logits_processor.py +252 -0
  291. vllm/lora/layers/replicated_linear.py +70 -0
  292. vllm/lora/layers/row_parallel_linear.py +181 -0
  293. vllm/lora/layers/utils.py +65 -0
  294. vllm/lora/layers/vocal_parallel_embedding.py +166 -0
  295. vllm/lora/lora_weights.py +198 -0
  296. vllm/lora/models.py +890 -0
  297. vllm/lora/ops/__init__.py +0 -0
  298. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  299. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  300. vllm/lora/ops/torch_ops/__init__.py +20 -0
  301. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  302. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  303. vllm/lora/ops/triton_ops/__init__.py +21 -0
  304. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +641 -0
  305. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  306. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  307. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  308. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  309. vllm/lora/ops/triton_ops/utils.py +295 -0
  310. vllm/lora/ops/xla_ops/__init__.py +6 -0
  311. vllm/lora/ops/xla_ops/lora_ops.py +141 -0
  312. vllm/lora/peft_helper.py +128 -0
  313. vllm/lora/punica_wrapper/__init__.py +10 -0
  314. vllm/lora/punica_wrapper/punica_base.py +492 -0
  315. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  316. vllm/lora/punica_wrapper/punica_gpu.py +411 -0
  317. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  318. vllm/lora/punica_wrapper/punica_tpu.py +359 -0
  319. vllm/lora/punica_wrapper/punica_xpu.py +279 -0
  320. vllm/lora/punica_wrapper/utils.py +150 -0
  321. vllm/lora/request.py +100 -0
  322. vllm/lora/resolver.py +88 -0
  323. vllm/lora/utils.py +293 -0
  324. vllm/lora/worker_manager.py +279 -0
  325. vllm/model_executor/__init__.py +11 -0
  326. vllm/model_executor/custom_op.py +194 -0
  327. vllm/model_executor/layers/__init__.py +0 -0
  328. vllm/model_executor/layers/activation.py +569 -0
  329. vllm/model_executor/layers/attention_layer_base.py +35 -0
  330. vllm/model_executor/layers/batch_invariant.py +854 -0
  331. vllm/model_executor/layers/conv.py +236 -0
  332. vllm/model_executor/layers/fla/__init__.py +8 -0
  333. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  334. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  335. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  336. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  337. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  338. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  339. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  340. vllm/model_executor/layers/fla/ops/index.py +41 -0
  341. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  342. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  343. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  344. vllm/model_executor/layers/fla/ops/op.py +60 -0
  345. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  346. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  347. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  348. vllm/model_executor/layers/fused_moe/__init__.py +106 -0
  349. vllm/model_executor/layers/fused_moe/all2all_utils.py +160 -0
  350. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
  351. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
  352. vllm/model_executor/layers/fused_moe/config.py +916 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  625. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +354 -0
  626. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
  627. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
  628. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
  629. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  630. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +367 -0
  631. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
  632. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
  633. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  634. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
  635. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +792 -0
  636. vllm/model_executor/layers/fused_moe/fused_moe.py +2175 -0
  637. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +112 -0
  638. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +164 -0
  639. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +316 -0
  640. vllm/model_executor/layers/fused_moe/layer.py +1944 -0
  641. vllm/model_executor/layers/fused_moe/modular_kernel.py +1222 -0
  642. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
  643. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  644. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  645. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  646. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  647. vllm/model_executor/layers/fused_moe/prepare_finalize.py +77 -0
  648. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
  649. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  650. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +97 -0
  651. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  652. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
  653. vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
  654. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +578 -0
  655. vllm/model_executor/layers/fused_moe/utils.py +332 -0
  656. vllm/model_executor/layers/kda.py +448 -0
  657. vllm/model_executor/layers/layernorm.py +442 -0
  658. vllm/model_executor/layers/lightning_attn.py +729 -0
  659. vllm/model_executor/layers/linear.py +1424 -0
  660. vllm/model_executor/layers/logits_processor.py +106 -0
  661. vllm/model_executor/layers/mamba/__init__.py +0 -0
  662. vllm/model_executor/layers/mamba/abstract.py +71 -0
  663. vllm/model_executor/layers/mamba/linear_attn.py +402 -0
  664. vllm/model_executor/layers/mamba/mamba_mixer.py +535 -0
  665. vllm/model_executor/layers/mamba/mamba_mixer2.py +928 -0
  666. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  667. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  668. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  669. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  670. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
  671. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  672. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  673. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  674. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  675. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  676. vllm/model_executor/layers/mamba/short_conv.py +264 -0
  677. vllm/model_executor/layers/mla.py +168 -0
  678. vllm/model_executor/layers/pooler.py +817 -0
  679. vllm/model_executor/layers/quantization/__init__.py +174 -0
  680. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  681. vllm/model_executor/layers/quantization/awq.py +277 -0
  682. vllm/model_executor/layers/quantization/awq_marlin.py +659 -0
  683. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  684. vllm/model_executor/layers/quantization/base_config.py +170 -0
  685. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  686. vllm/model_executor/layers/quantization/bitsandbytes.py +658 -0
  687. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  688. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +914 -0
  689. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2284 -0
  690. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
  691. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  692. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  693. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  694. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  695. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  696. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
  697. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  698. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  699. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
  700. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  701. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +219 -0
  702. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  703. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  704. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  705. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  706. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  707. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  708. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  709. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  710. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  711. vllm/model_executor/layers/quantization/experts_int8.py +240 -0
  712. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  713. vllm/model_executor/layers/quantization/fp8.py +1333 -0
  714. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  715. vllm/model_executor/layers/quantization/gguf.py +643 -0
  716. vllm/model_executor/layers/quantization/gptq.py +393 -0
  717. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  718. vllm/model_executor/layers/quantization/gptq_marlin.py +789 -0
  719. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  720. vllm/model_executor/layers/quantization/hqq_marlin.py +371 -0
  721. vllm/model_executor/layers/quantization/inc.py +65 -0
  722. vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
  723. vllm/model_executor/layers/quantization/ipex_quant.py +467 -0
  724. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  725. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  726. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
  727. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  728. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  729. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  730. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
  731. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  732. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
  733. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  734. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +166 -0
  735. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
  736. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
  737. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  738. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
  739. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
  740. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
  741. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  742. vllm/model_executor/layers/quantization/kv_cache.py +146 -0
  743. vllm/model_executor/layers/quantization/modelopt.py +1788 -0
  744. vllm/model_executor/layers/quantization/moe_wna16.py +541 -0
  745. vllm/model_executor/layers/quantization/mxfp4.py +1162 -0
  746. vllm/model_executor/layers/quantization/petit.py +320 -0
  747. vllm/model_executor/layers/quantization/ptpc_fp8.py +137 -0
  748. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  749. vllm/model_executor/layers/quantization/quark/quark.py +528 -0
  750. vllm/model_executor/layers/quantization/quark/quark_moe.py +683 -0
  751. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  752. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +306 -0
  753. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  754. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  755. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  756. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  757. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  758. vllm/model_executor/layers/quantization/rtn.py +652 -0
  759. vllm/model_executor/layers/quantization/schema.py +90 -0
  760. vllm/model_executor/layers/quantization/torchao.py +380 -0
  761. vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
  762. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  763. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  764. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  976. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +89 -0
  977. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +298 -0
  978. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
  979. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  980. vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
  981. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  982. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  983. vllm/model_executor/layers/quantization/utils/marlin_utils.py +575 -0
  984. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +397 -0
  985. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +351 -0
  986. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +161 -0
  987. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  988. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +181 -0
  989. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  990. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  991. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  992. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +63 -0
  993. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  994. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  995. vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
  996. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
  997. vllm/model_executor/layers/resampler.py +283 -0
  998. vllm/model_executor/layers/rotary_embedding/__init__.py +278 -0
  999. vllm/model_executor/layers/rotary_embedding/base.py +235 -0
  1000. vllm/model_executor/layers/rotary_embedding/common.py +188 -0
  1001. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
  1002. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
  1003. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1004. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1005. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
  1006. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1007. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1008. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
  1009. vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
  1010. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1011. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1012. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +81 -0
  1013. vllm/model_executor/layers/utils.py +251 -0
  1014. vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
  1015. vllm/model_executor/model_loader/__init__.py +148 -0
  1016. vllm/model_executor/model_loader/base_loader.py +57 -0
  1017. vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
  1018. vllm/model_executor/model_loader/default_loader.py +327 -0
  1019. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1020. vllm/model_executor/model_loader/gguf_loader.py +176 -0
  1021. vllm/model_executor/model_loader/online_quantization.py +224 -0
  1022. vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
  1023. vllm/model_executor/model_loader/sharded_state_loader.py +206 -0
  1024. vllm/model_executor/model_loader/tensorizer.py +790 -0
  1025. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1026. vllm/model_executor/model_loader/tpu.py +118 -0
  1027. vllm/model_executor/model_loader/utils.py +288 -0
  1028. vllm/model_executor/model_loader/weight_utils.py +1084 -0
  1029. vllm/model_executor/models/__init__.py +44 -0
  1030. vllm/model_executor/models/adapters.py +543 -0
  1031. vllm/model_executor/models/afmoe.py +711 -0
  1032. vllm/model_executor/models/aimv2.py +247 -0
  1033. vllm/model_executor/models/apertus.py +587 -0
  1034. vllm/model_executor/models/arcee.py +439 -0
  1035. vllm/model_executor/models/arctic.py +635 -0
  1036. vllm/model_executor/models/aria.py +655 -0
  1037. vllm/model_executor/models/aya_vision.py +450 -0
  1038. vllm/model_executor/models/baichuan.py +496 -0
  1039. vllm/model_executor/models/bailing_moe.py +646 -0
  1040. vllm/model_executor/models/bamba.py +522 -0
  1041. vllm/model_executor/models/bee.py +157 -0
  1042. vllm/model_executor/models/bert.py +925 -0
  1043. vllm/model_executor/models/bert_with_rope.py +732 -0
  1044. vllm/model_executor/models/blip.py +349 -0
  1045. vllm/model_executor/models/blip2.py +695 -0
  1046. vllm/model_executor/models/bloom.py +390 -0
  1047. vllm/model_executor/models/chameleon.py +1120 -0
  1048. vllm/model_executor/models/chatglm.py +498 -0
  1049. vllm/model_executor/models/clip.py +965 -0
  1050. vllm/model_executor/models/cohere2_vision.py +472 -0
  1051. vllm/model_executor/models/commandr.py +473 -0
  1052. vllm/model_executor/models/config.py +503 -0
  1053. vllm/model_executor/models/dbrx.py +482 -0
  1054. vllm/model_executor/models/deepencoder.py +673 -0
  1055. vllm/model_executor/models/deepseek_eagle.py +260 -0
  1056. vllm/model_executor/models/deepseek_mtp.py +360 -0
  1057. vllm/model_executor/models/deepseek_ocr.py +593 -0
  1058. vllm/model_executor/models/deepseek_v2.py +1649 -0
  1059. vllm/model_executor/models/deepseek_vl2.py +655 -0
  1060. vllm/model_executor/models/dots1.py +574 -0
  1061. vllm/model_executor/models/dots_ocr.py +900 -0
  1062. vllm/model_executor/models/ernie45.py +53 -0
  1063. vllm/model_executor/models/ernie45_moe.py +759 -0
  1064. vllm/model_executor/models/ernie45_vl.py +1742 -0
  1065. vllm/model_executor/models/ernie45_vl_moe.py +803 -0
  1066. vllm/model_executor/models/ernie_mtp.py +279 -0
  1067. vllm/model_executor/models/exaone.py +545 -0
  1068. vllm/model_executor/models/exaone4.py +531 -0
  1069. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1070. vllm/model_executor/models/falcon.py +545 -0
  1071. vllm/model_executor/models/falcon_h1.py +685 -0
  1072. vllm/model_executor/models/flex_olmo.py +155 -0
  1073. vllm/model_executor/models/fuyu.py +373 -0
  1074. vllm/model_executor/models/gemma.py +426 -0
  1075. vllm/model_executor/models/gemma2.py +439 -0
  1076. vllm/model_executor/models/gemma3.py +571 -0
  1077. vllm/model_executor/models/gemma3_mm.py +741 -0
  1078. vllm/model_executor/models/gemma3n.py +1165 -0
  1079. vllm/model_executor/models/gemma3n_mm.py +811 -0
  1080. vllm/model_executor/models/glm.py +23 -0
  1081. vllm/model_executor/models/glm4.py +305 -0
  1082. vllm/model_executor/models/glm4_1v.py +1821 -0
  1083. vllm/model_executor/models/glm4_moe.py +747 -0
  1084. vllm/model_executor/models/glm4_moe_mtp.py +359 -0
  1085. vllm/model_executor/models/glm4v.py +784 -0
  1086. vllm/model_executor/models/gpt2.py +397 -0
  1087. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1088. vllm/model_executor/models/gpt_j.py +346 -0
  1089. vllm/model_executor/models/gpt_neox.py +344 -0
  1090. vllm/model_executor/models/gpt_oss.py +738 -0
  1091. vllm/model_executor/models/granite.py +516 -0
  1092. vllm/model_executor/models/granite_speech.py +913 -0
  1093. vllm/model_executor/models/granitemoe.py +569 -0
  1094. vllm/model_executor/models/granitemoehybrid.py +709 -0
  1095. vllm/model_executor/models/granitemoeshared.py +333 -0
  1096. vllm/model_executor/models/gritlm.py +245 -0
  1097. vllm/model_executor/models/grok1.py +558 -0
  1098. vllm/model_executor/models/h2ovl.py +554 -0
  1099. vllm/model_executor/models/hunyuan_v1.py +1053 -0
  1100. vllm/model_executor/models/hyperclovax_vision.py +1166 -0
  1101. vllm/model_executor/models/idefics2_vision_model.py +426 -0
  1102. vllm/model_executor/models/idefics3.py +717 -0
  1103. vllm/model_executor/models/interfaces.py +1092 -0
  1104. vllm/model_executor/models/interfaces_base.py +214 -0
  1105. vllm/model_executor/models/intern_vit.py +453 -0
  1106. vllm/model_executor/models/internlm2.py +460 -0
  1107. vllm/model_executor/models/internlm2_ve.py +142 -0
  1108. vllm/model_executor/models/interns1.py +830 -0
  1109. vllm/model_executor/models/interns1_vit.py +432 -0
  1110. vllm/model_executor/models/internvl.py +1452 -0
  1111. vllm/model_executor/models/jais.py +397 -0
  1112. vllm/model_executor/models/jamba.py +610 -0
  1113. vllm/model_executor/models/jina_vl.py +147 -0
  1114. vllm/model_executor/models/keye.py +1761 -0
  1115. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1116. vllm/model_executor/models/kimi_linear.py +663 -0
  1117. vllm/model_executor/models/kimi_vl.py +578 -0
  1118. vllm/model_executor/models/lfm2.py +532 -0
  1119. vllm/model_executor/models/lfm2_moe.py +762 -0
  1120. vllm/model_executor/models/lightonocr.py +195 -0
  1121. vllm/model_executor/models/llama.py +732 -0
  1122. vllm/model_executor/models/llama4.py +859 -0
  1123. vllm/model_executor/models/llama4_eagle.py +223 -0
  1124. vllm/model_executor/models/llama_eagle.py +218 -0
  1125. vllm/model_executor/models/llama_eagle3.py +367 -0
  1126. vllm/model_executor/models/llava.py +842 -0
  1127. vllm/model_executor/models/llava_next.py +583 -0
  1128. vllm/model_executor/models/llava_next_video.py +467 -0
  1129. vllm/model_executor/models/llava_onevision.py +923 -0
  1130. vllm/model_executor/models/longcat_flash.py +749 -0
  1131. vllm/model_executor/models/longcat_flash_mtp.py +349 -0
  1132. vllm/model_executor/models/mamba.py +276 -0
  1133. vllm/model_executor/models/mamba2.py +289 -0
  1134. vllm/model_executor/models/medusa.py +179 -0
  1135. vllm/model_executor/models/midashenglm.py +827 -0
  1136. vllm/model_executor/models/mimo.py +188 -0
  1137. vllm/model_executor/models/mimo_mtp.py +294 -0
  1138. vllm/model_executor/models/minicpm.py +664 -0
  1139. vllm/model_executor/models/minicpm3.py +242 -0
  1140. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1141. vllm/model_executor/models/minicpmo.py +768 -0
  1142. vllm/model_executor/models/minicpmv.py +1745 -0
  1143. vllm/model_executor/models/minimax_m2.py +552 -0
  1144. vllm/model_executor/models/minimax_text_01.py +1012 -0
  1145. vllm/model_executor/models/minimax_vl_01.py +396 -0
  1146. vllm/model_executor/models/mistral3.py +637 -0
  1147. vllm/model_executor/models/mixtral.py +621 -0
  1148. vllm/model_executor/models/mllama4.py +1147 -0
  1149. vllm/model_executor/models/mlp_speculator.py +235 -0
  1150. vllm/model_executor/models/modernbert.py +450 -0
  1151. vllm/model_executor/models/module_mapping.py +74 -0
  1152. vllm/model_executor/models/molmo.py +1555 -0
  1153. vllm/model_executor/models/moonvit.py +677 -0
  1154. vllm/model_executor/models/mpt.py +335 -0
  1155. vllm/model_executor/models/nano_nemotron_vl.py +1740 -0
  1156. vllm/model_executor/models/nemotron.py +518 -0
  1157. vllm/model_executor/models/nemotron_h.py +852 -0
  1158. vllm/model_executor/models/nemotron_nas.py +491 -0
  1159. vllm/model_executor/models/nemotron_vl.py +653 -0
  1160. vllm/model_executor/models/nvlm_d.py +216 -0
  1161. vllm/model_executor/models/olmo.py +414 -0
  1162. vllm/model_executor/models/olmo2.py +454 -0
  1163. vllm/model_executor/models/olmoe.py +498 -0
  1164. vllm/model_executor/models/openpangu.py +1062 -0
  1165. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1166. vllm/model_executor/models/opt.py +426 -0
  1167. vllm/model_executor/models/orion.py +372 -0
  1168. vllm/model_executor/models/ouro.py +516 -0
  1169. vllm/model_executor/models/ovis.py +559 -0
  1170. vllm/model_executor/models/ovis2_5.py +673 -0
  1171. vllm/model_executor/models/paddleocr_vl.py +1407 -0
  1172. vllm/model_executor/models/paligemma.py +412 -0
  1173. vllm/model_executor/models/persimmon.py +377 -0
  1174. vllm/model_executor/models/phi.py +374 -0
  1175. vllm/model_executor/models/phi3.py +18 -0
  1176. vllm/model_executor/models/phi3v.py +737 -0
  1177. vllm/model_executor/models/phi4_multimodal.py +1447 -0
  1178. vllm/model_executor/models/phi4mm.py +1253 -0
  1179. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1180. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1181. vllm/model_executor/models/phimoe.py +675 -0
  1182. vllm/model_executor/models/pixtral.py +1352 -0
  1183. vllm/model_executor/models/plamo2.py +981 -0
  1184. vllm/model_executor/models/qwen.py +368 -0
  1185. vllm/model_executor/models/qwen2.py +541 -0
  1186. vllm/model_executor/models/qwen2_5_omni_thinker.py +1246 -0
  1187. vllm/model_executor/models/qwen2_5_vl.py +1613 -0
  1188. vllm/model_executor/models/qwen2_audio.py +473 -0
  1189. vllm/model_executor/models/qwen2_moe.py +596 -0
  1190. vllm/model_executor/models/qwen2_rm.py +123 -0
  1191. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1192. vllm/model_executor/models/qwen3.py +336 -0
  1193. vllm/model_executor/models/qwen3_moe.py +744 -0
  1194. vllm/model_executor/models/qwen3_next.py +1395 -0
  1195. vllm/model_executor/models/qwen3_next_mtp.py +296 -0
  1196. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1721 -0
  1197. vllm/model_executor/models/qwen3_vl.py +1673 -0
  1198. vllm/model_executor/models/qwen3_vl_moe.py +415 -0
  1199. vllm/model_executor/models/qwen_vl.py +802 -0
  1200. vllm/model_executor/models/radio.py +555 -0
  1201. vllm/model_executor/models/registry.py +1155 -0
  1202. vllm/model_executor/models/roberta.py +259 -0
  1203. vllm/model_executor/models/rvl.py +107 -0
  1204. vllm/model_executor/models/seed_oss.py +497 -0
  1205. vllm/model_executor/models/siglip.py +1174 -0
  1206. vllm/model_executor/models/siglip2navit.py +724 -0
  1207. vllm/model_executor/models/skyworkr1v.py +953 -0
  1208. vllm/model_executor/models/smolvlm.py +38 -0
  1209. vllm/model_executor/models/solar.py +502 -0
  1210. vllm/model_executor/models/stablelm.py +359 -0
  1211. vllm/model_executor/models/starcoder2.py +367 -0
  1212. vllm/model_executor/models/step3_text.py +559 -0
  1213. vllm/model_executor/models/step3_vl.py +1148 -0
  1214. vllm/model_executor/models/swin.py +514 -0
  1215. vllm/model_executor/models/tarsier.py +619 -0
  1216. vllm/model_executor/models/telechat2.py +153 -0
  1217. vllm/model_executor/models/teleflm.py +78 -0
  1218. vllm/model_executor/models/terratorch.py +319 -0
  1219. vllm/model_executor/models/transformers/__init__.py +127 -0
  1220. vllm/model_executor/models/transformers/base.py +464 -0
  1221. vllm/model_executor/models/transformers/causal.py +65 -0
  1222. vllm/model_executor/models/transformers/legacy.py +90 -0
  1223. vllm/model_executor/models/transformers/moe.py +318 -0
  1224. vllm/model_executor/models/transformers/multimodal.py +411 -0
  1225. vllm/model_executor/models/transformers/pooling.py +119 -0
  1226. vllm/model_executor/models/transformers/utils.py +207 -0
  1227. vllm/model_executor/models/ultravox.py +681 -0
  1228. vllm/model_executor/models/utils.py +877 -0
  1229. vllm/model_executor/models/vision.py +552 -0
  1230. vllm/model_executor/models/voxtral.py +845 -0
  1231. vllm/model_executor/models/whisper.py +959 -0
  1232. vllm/model_executor/models/zamba2.py +986 -0
  1233. vllm/model_executor/parameter.py +642 -0
  1234. vllm/model_executor/utils.py +94 -0
  1235. vllm/model_executor/warmup/__init__.py +0 -0
  1236. vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
  1237. vllm/model_executor/warmup/kernel_warmup.py +98 -0
  1238. vllm/multimodal/__init__.py +40 -0
  1239. vllm/multimodal/audio.py +118 -0
  1240. vllm/multimodal/base.py +26 -0
  1241. vllm/multimodal/cache.py +755 -0
  1242. vllm/multimodal/evs.py +294 -0
  1243. vllm/multimodal/hasher.py +106 -0
  1244. vllm/multimodal/image.py +130 -0
  1245. vllm/multimodal/inputs.py +1036 -0
  1246. vllm/multimodal/parse.py +544 -0
  1247. vllm/multimodal/processing.py +2186 -0
  1248. vllm/multimodal/profiling.py +369 -0
  1249. vllm/multimodal/registry.py +360 -0
  1250. vllm/multimodal/utils.py +512 -0
  1251. vllm/multimodal/video.py +306 -0
  1252. vllm/outputs.py +345 -0
  1253. vllm/platforms/__init__.py +277 -0
  1254. vllm/platforms/cpu.py +414 -0
  1255. vllm/platforms/cuda.py +657 -0
  1256. vllm/platforms/interface.py +639 -0
  1257. vllm/platforms/rocm.py +466 -0
  1258. vllm/platforms/tpu.py +276 -0
  1259. vllm/platforms/xpu.py +274 -0
  1260. vllm/plugins/__init__.py +78 -0
  1261. vllm/plugins/io_processors/__init__.py +68 -0
  1262. vllm/plugins/io_processors/interface.py +77 -0
  1263. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1264. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1265. vllm/pooling_params.py +228 -0
  1266. vllm/profiler/__init__.py +0 -0
  1267. vllm/profiler/gpu_profiler.py +37 -0
  1268. vllm/profiler/layerwise_profile.py +392 -0
  1269. vllm/profiler/utils.py +151 -0
  1270. vllm/py.typed +2 -0
  1271. vllm/ray/__init__.py +0 -0
  1272. vllm/ray/lazy_utils.py +26 -0
  1273. vllm/ray/ray_env.py +79 -0
  1274. vllm/reasoning/__init__.py +92 -0
  1275. vllm/reasoning/abs_reasoning_parsers.py +290 -0
  1276. vllm/reasoning/basic_parsers.py +162 -0
  1277. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1278. vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
  1279. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1280. vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
  1281. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1282. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1283. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1284. vllm/reasoning/identity_reasoning_parser.py +58 -0
  1285. vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
  1286. vllm/reasoning/mistral_reasoning_parser.py +55 -0
  1287. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1288. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1289. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1290. vllm/reasoning/step3_reasoning_parser.py +107 -0
  1291. vllm/sampling_params.py +669 -0
  1292. vllm/scalar_type.py +355 -0
  1293. vllm/scripts.py +17 -0
  1294. vllm/sequence.py +98 -0
  1295. vllm/tasks.py +13 -0
  1296. vllm/third_party/__init__.py +0 -0
  1297. vllm/third_party/pynvml.py +6140 -0
  1298. vllm/tracing.py +135 -0
  1299. vllm/transformers_utils/__init__.py +26 -0
  1300. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1301. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1302. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1303. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1304. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1305. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1306. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1307. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1308. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1309. vllm/transformers_utils/config.py +1203 -0
  1310. vllm/transformers_utils/config_parser_base.py +20 -0
  1311. vllm/transformers_utils/configs/__init__.py +70 -0
  1312. vllm/transformers_utils/configs/afmoe.py +84 -0
  1313. vllm/transformers_utils/configs/arctic.py +206 -0
  1314. vllm/transformers_utils/configs/chatglm.py +75 -0
  1315. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1316. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1317. vllm/transformers_utils/configs/eagle.py +84 -0
  1318. vllm/transformers_utils/configs/falcon.py +89 -0
  1319. vllm/transformers_utils/configs/flex_olmo.py +77 -0
  1320. vllm/transformers_utils/configs/jais.py +243 -0
  1321. vllm/transformers_utils/configs/kimi_linear.py +144 -0
  1322. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1323. vllm/transformers_utils/configs/lfm2_moe.py +159 -0
  1324. vllm/transformers_utils/configs/medusa.py +65 -0
  1325. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1326. vllm/transformers_utils/configs/mistral.py +174 -0
  1327. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1328. vllm/transformers_utils/configs/moonvit.py +33 -0
  1329. vllm/transformers_utils/configs/nemotron.py +212 -0
  1330. vllm/transformers_utils/configs/nemotron_h.py +282 -0
  1331. vllm/transformers_utils/configs/olmo3.py +79 -0
  1332. vllm/transformers_utils/configs/ovis.py +182 -0
  1333. vllm/transformers_utils/configs/qwen3_next.py +274 -0
  1334. vllm/transformers_utils/configs/radio.py +89 -0
  1335. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1336. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1337. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1338. vllm/transformers_utils/configs/step3_vl.py +174 -0
  1339. vllm/transformers_utils/configs/ultravox.py +118 -0
  1340. vllm/transformers_utils/detokenizer_utils.py +198 -0
  1341. vllm/transformers_utils/dynamic_module.py +59 -0
  1342. vllm/transformers_utils/processor.py +402 -0
  1343. vllm/transformers_utils/processors/__init__.py +15 -0
  1344. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1345. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1346. vllm/transformers_utils/processors/ovis.py +453 -0
  1347. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1348. vllm/transformers_utils/runai_utils.py +104 -0
  1349. vllm/transformers_utils/s3_utils.py +95 -0
  1350. vllm/transformers_utils/tokenizer.py +293 -0
  1351. vllm/transformers_utils/tokenizer_base.py +155 -0
  1352. vllm/transformers_utils/tokenizers/__init__.py +16 -0
  1353. vllm/transformers_utils/tokenizers/mistral.py +502 -0
  1354. vllm/transformers_utils/utils.py +130 -0
  1355. vllm/triton_utils/__init__.py +19 -0
  1356. vllm/triton_utils/importing.py +103 -0
  1357. vllm/usage/__init__.py +0 -0
  1358. vllm/usage/usage_lib.py +294 -0
  1359. vllm/utils/__init__.py +82 -0
  1360. vllm/utils/argparse_utils.py +487 -0
  1361. vllm/utils/async_utils.py +303 -0
  1362. vllm/utils/cache.py +214 -0
  1363. vllm/utils/collection_utils.py +139 -0
  1364. vllm/utils/counter.py +45 -0
  1365. vllm/utils/deep_gemm.py +391 -0
  1366. vllm/utils/flashinfer.py +490 -0
  1367. vllm/utils/func_utils.py +236 -0
  1368. vllm/utils/gc_utils.py +147 -0
  1369. vllm/utils/hashing.py +63 -0
  1370. vllm/utils/import_utils.py +411 -0
  1371. vllm/utils/jsontree.py +165 -0
  1372. vllm/utils/math_utils.py +32 -0
  1373. vllm/utils/mem_constants.py +13 -0
  1374. vllm/utils/mem_utils.py +232 -0
  1375. vllm/utils/nccl.py +64 -0
  1376. vllm/utils/network_utils.py +331 -0
  1377. vllm/utils/platform_utils.py +59 -0
  1378. vllm/utils/profiling.py +56 -0
  1379. vllm/utils/registry.py +49 -0
  1380. vllm/utils/serial_utils.py +169 -0
  1381. vllm/utils/system_utils.py +229 -0
  1382. vllm/utils/tensor_schema.py +255 -0
  1383. vllm/utils/torch_utils.py +657 -0
  1384. vllm/v1/__init__.py +0 -0
  1385. vllm/v1/attention/__init__.py +0 -0
  1386. vllm/v1/attention/backends/__init__.py +0 -0
  1387. vllm/v1/attention/backends/cpu_attn.py +496 -0
  1388. vllm/v1/attention/backends/flash_attn.py +1028 -0
  1389. vllm/v1/attention/backends/flashinfer.py +1572 -0
  1390. vllm/v1/attention/backends/flex_attention.py +926 -0
  1391. vllm/v1/attention/backends/gdn_attn.py +387 -0
  1392. vllm/v1/attention/backends/linear_attn.py +74 -0
  1393. vllm/v1/attention/backends/mamba1_attn.py +165 -0
  1394. vllm/v1/attention/backends/mamba2_attn.py +354 -0
  1395. vllm/v1/attention/backends/mamba_attn.py +115 -0
  1396. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1397. vllm/v1/attention/backends/mla/common.py +2031 -0
  1398. vllm/v1/attention/backends/mla/cutlass_mla.py +275 -0
  1399. vllm/v1/attention/backends/mla/flashattn_mla.py +337 -0
  1400. vllm/v1/attention/backends/mla/flashinfer_mla.py +171 -0
  1401. vllm/v1/attention/backends/mla/flashmla.py +314 -0
  1402. vllm/v1/attention/backends/mla/flashmla_sparse.py +548 -0
  1403. vllm/v1/attention/backends/mla/indexer.py +362 -0
  1404. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +294 -0
  1405. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1406. vllm/v1/attention/backends/pallas.py +436 -0
  1407. vllm/v1/attention/backends/rocm_aiter_fa.py +816 -0
  1408. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +196 -0
  1409. vllm/v1/attention/backends/rocm_attn.py +362 -0
  1410. vllm/v1/attention/backends/short_conv_attn.py +105 -0
  1411. vllm/v1/attention/backends/tree_attn.py +425 -0
  1412. vllm/v1/attention/backends/triton_attn.py +373 -0
  1413. vllm/v1/attention/backends/utils.py +1116 -0
  1414. vllm/v1/attention/backends/xformers.py +417 -0
  1415. vllm/v1/core/__init__.py +0 -0
  1416. vllm/v1/core/block_pool.py +428 -0
  1417. vllm/v1/core/encoder_cache_manager.py +343 -0
  1418. vllm/v1/core/kv_cache_coordinator.py +480 -0
  1419. vllm/v1/core/kv_cache_manager.py +420 -0
  1420. vllm/v1/core/kv_cache_utils.py +1340 -0
  1421. vllm/v1/core/sched/__init__.py +0 -0
  1422. vllm/v1/core/sched/async_scheduler.py +62 -0
  1423. vllm/v1/core/sched/interface.py +181 -0
  1424. vllm/v1/core/sched/output.py +202 -0
  1425. vllm/v1/core/sched/request_queue.py +221 -0
  1426. vllm/v1/core/sched/scheduler.py +1617 -0
  1427. vllm/v1/core/sched/utils.py +72 -0
  1428. vllm/v1/core/single_type_kv_cache_manager.py +736 -0
  1429. vllm/v1/cudagraph_dispatcher.py +148 -0
  1430. vllm/v1/engine/__init__.py +206 -0
  1431. vllm/v1/engine/async_llm.py +797 -0
  1432. vllm/v1/engine/coordinator.py +377 -0
  1433. vllm/v1/engine/core.py +1420 -0
  1434. vllm/v1/engine/core_client.py +1400 -0
  1435. vllm/v1/engine/detokenizer.py +351 -0
  1436. vllm/v1/engine/exceptions.py +18 -0
  1437. vllm/v1/engine/llm_engine.py +408 -0
  1438. vllm/v1/engine/logprobs.py +182 -0
  1439. vllm/v1/engine/output_processor.py +642 -0
  1440. vllm/v1/engine/parallel_sampling.py +145 -0
  1441. vllm/v1/engine/processor.py +621 -0
  1442. vllm/v1/engine/utils.py +1072 -0
  1443. vllm/v1/executor/__init__.py +6 -0
  1444. vllm/v1/executor/abstract.py +352 -0
  1445. vllm/v1/executor/multiproc_executor.py +877 -0
  1446. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1447. vllm/v1/executor/ray_executor.py +626 -0
  1448. vllm/v1/executor/ray_utils.py +465 -0
  1449. vllm/v1/executor/uniproc_executor.py +183 -0
  1450. vllm/v1/kv_cache_interface.py +403 -0
  1451. vllm/v1/kv_offload/__init__.py +0 -0
  1452. vllm/v1/kv_offload/abstract.py +161 -0
  1453. vllm/v1/kv_offload/arc_manager.py +237 -0
  1454. vllm/v1/kv_offload/backend.py +97 -0
  1455. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1456. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1457. vllm/v1/kv_offload/cpu.py +93 -0
  1458. vllm/v1/kv_offload/factory.py +56 -0
  1459. vllm/v1/kv_offload/lru_manager.py +139 -0
  1460. vllm/v1/kv_offload/mediums.py +39 -0
  1461. vllm/v1/kv_offload/spec.py +62 -0
  1462. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1463. vllm/v1/kv_offload/worker/cpu_gpu.py +185 -0
  1464. vllm/v1/kv_offload/worker/worker.py +144 -0
  1465. vllm/v1/metrics/__init__.py +0 -0
  1466. vllm/v1/metrics/loggers.py +1238 -0
  1467. vllm/v1/metrics/prometheus.py +82 -0
  1468. vllm/v1/metrics/ray_wrappers.py +169 -0
  1469. vllm/v1/metrics/reader.py +257 -0
  1470. vllm/v1/metrics/stats.py +420 -0
  1471. vllm/v1/outputs.py +249 -0
  1472. vllm/v1/pool/__init__.py +0 -0
  1473. vllm/v1/pool/metadata.py +82 -0
  1474. vllm/v1/request.py +259 -0
  1475. vllm/v1/sample/__init__.py +0 -0
  1476. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1477. vllm/v1/sample/logits_processor/builtin.py +274 -0
  1478. vllm/v1/sample/logits_processor/interface.py +106 -0
  1479. vllm/v1/sample/logits_processor/state.py +165 -0
  1480. vllm/v1/sample/metadata.py +44 -0
  1481. vllm/v1/sample/ops/__init__.py +0 -0
  1482. vllm/v1/sample/ops/bad_words.py +52 -0
  1483. vllm/v1/sample/ops/logprobs.py +25 -0
  1484. vllm/v1/sample/ops/penalties.py +57 -0
  1485. vllm/v1/sample/ops/topk_topp_sampler.py +290 -0
  1486. vllm/v1/sample/rejection_sampler.py +793 -0
  1487. vllm/v1/sample/sampler.py +316 -0
  1488. vllm/v1/sample/tpu/__init__.py +0 -0
  1489. vllm/v1/sample/tpu/metadata.py +120 -0
  1490. vllm/v1/sample/tpu/sampler.py +215 -0
  1491. vllm/v1/serial_utils.py +532 -0
  1492. vllm/v1/spec_decode/__init__.py +0 -0
  1493. vllm/v1/spec_decode/eagle.py +1225 -0
  1494. vllm/v1/spec_decode/medusa.py +73 -0
  1495. vllm/v1/spec_decode/metadata.py +66 -0
  1496. vllm/v1/spec_decode/metrics.py +224 -0
  1497. vllm/v1/spec_decode/ngram_proposer.py +291 -0
  1498. vllm/v1/spec_decode/suffix_decoding.py +103 -0
  1499. vllm/v1/spec_decode/utils.py +16 -0
  1500. vllm/v1/structured_output/__init__.py +338 -0
  1501. vllm/v1/structured_output/backend_guidance.py +265 -0
  1502. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1503. vllm/v1/structured_output/backend_outlines.py +324 -0
  1504. vllm/v1/structured_output/backend_types.py +136 -0
  1505. vllm/v1/structured_output/backend_xgrammar.py +362 -0
  1506. vllm/v1/structured_output/request.py +94 -0
  1507. vllm/v1/structured_output/utils.py +469 -0
  1508. vllm/v1/utils.py +414 -0
  1509. vllm/v1/worker/__init__.py +0 -0
  1510. vllm/v1/worker/block_table.py +327 -0
  1511. vllm/v1/worker/cpu_model_runner.py +122 -0
  1512. vllm/v1/worker/cpu_worker.py +206 -0
  1513. vllm/v1/worker/dp_utils.py +230 -0
  1514. vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
  1515. vllm/v1/worker/gpu_input_batch.py +975 -0
  1516. vllm/v1/worker/gpu_model_runner.py +5102 -0
  1517. vllm/v1/worker/gpu_ubatch_wrapper.py +466 -0
  1518. vllm/v1/worker/gpu_worker.py +894 -0
  1519. vllm/v1/worker/kv_connector_model_runner_mixin.py +144 -0
  1520. vllm/v1/worker/lora_model_runner_mixin.py +213 -0
  1521. vllm/v1/worker/tpu_input_batch.py +593 -0
  1522. vllm/v1/worker/tpu_model_runner.py +2173 -0
  1523. vllm/v1/worker/tpu_worker.py +355 -0
  1524. vllm/v1/worker/ubatch_utils.py +73 -0
  1525. vllm/v1/worker/ubatching.py +231 -0
  1526. vllm/v1/worker/utils.py +366 -0
  1527. vllm/v1/worker/worker_base.py +375 -0
  1528. vllm/v1/worker/xpu_model_runner.py +55 -0
  1529. vllm/v1/worker/xpu_worker.py +189 -0
  1530. vllm/version.py +39 -0
  1531. vllm/vllm_flash_attn/.gitkeep +0 -0
  1532. vllm_cpu_amxbf16-0.11.2.post2.dist-info/METADATA +345 -0
  1533. vllm_cpu_amxbf16-0.11.2.post2.dist-info/RECORD +1536 -0
  1534. vllm_cpu_amxbf16-0.11.2.post2.dist-info/WHEEL +5 -0
  1535. vllm_cpu_amxbf16-0.11.2.post2.dist-info/entry_points.txt +5 -0
  1536. vllm_cpu_amxbf16-0.11.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1944 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from collections.abc import Callable, Iterable
5
+ from contextlib import nullcontext
6
+ from enum import Enum
7
+ from functools import partial
8
+ from typing import Literal, get_args, overload
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from torch.nn.parameter import UninitializedParameter
13
+
14
+ import vllm.envs as envs
15
+ from vllm._aiter_ops import rocm_aiter_ops
16
+ from vllm.config import VllmConfig, get_current_vllm_config
17
+ from vllm.config.parallel import ExpertPlacementStrategy
18
+ from vllm.distributed import (
19
+ get_dp_group,
20
+ get_ep_group,
21
+ get_tensor_model_parallel_world_size,
22
+ tensor_model_parallel_all_reduce,
23
+ )
24
+ from vllm.distributed.eplb.eplb_state import EplbState
25
+ from vllm.forward_context import ForwardContext, get_forward_context
26
+ from vllm.logger import init_logger
27
+ from vllm.model_executor.custom_op import CustomOp
28
+ from vllm.model_executor.layers.fused_moe.config import (
29
+ FusedMoEConfig,
30
+ FusedMoEParallelConfig,
31
+ FusedMoEQuantConfig,
32
+ RoutingMethodType,
33
+ )
34
+ from vllm.model_executor.layers.fused_moe.fused_moe import zero_experts_compute_triton
35
+ from vllm.model_executor.layers.fused_moe.modular_kernel import (
36
+ FusedMoEPermuteExpertsUnpermute,
37
+ FusedMoEPrepareAndFinalize,
38
+ )
39
+ from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import (
40
+ init_aiter_topK_meta_data,
41
+ )
42
+ from vllm.model_executor.layers.fused_moe.routing_simulator import RoutingSimulator
43
+ from vllm.model_executor.layers.quantization.base_config import (
44
+ QuantizationConfig,
45
+ )
46
+ from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
47
+ is_flashinfer_supporting_global_sf,
48
+ )
49
+ from vllm.platforms import current_platform
50
+ from vllm.utils.math_utils import cdiv, round_up
51
+ from vllm.utils.torch_utils import (
52
+ aux_stream,
53
+ current_stream,
54
+ direct_register_custom_op,
55
+ )
56
+ from vllm.v1.worker.ubatching import dbo_current_ubatch_id
57
+
58
+ if current_platform.is_cuda_alike():
59
+ from .fused_moe import eplb_map_to_physical_and_record, fused_experts
60
+ else:
61
+ fused_experts = None # type: ignore
62
+ FusedMoEPermuteExpertsUnpermute = object # type: ignore
63
+ FusedMoEPrepareAndFinalize = object # type: ignore
64
+
65
+ def _eplb_map_to_physical_and_record(
66
+ topk_ids: torch.Tensor,
67
+ expert_load_view: torch.Tensor,
68
+ logical_to_physical_map: torch.Tensor,
69
+ logical_replica_count: torch.Tensor,
70
+ indices_type: torch.dtype | None,
71
+ ) -> torch.Tensor:
72
+ # CPU fallback: no EPLB so just return as is
73
+ return topk_ids
74
+
75
+ eplb_map_to_physical_and_record = _eplb_map_to_physical_and_record
76
+ from vllm.model_executor.layers.fused_moe.fused_moe import grouped_topk
77
+ from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( # noqa: E501
78
+ rocm_aiter_grouped_topk,
79
+ )
80
+
81
+ if current_platform.is_tpu():
82
+ from .moe_pallas import fused_moe as fused_moe_pallas
83
+ else:
84
+ fused_moe_pallas = None # type: ignore
85
+
86
+ from vllm.model_executor.layers.fused_moe.fused_moe_method_base import (
87
+ FusedMoEMethodBase,
88
+ )
89
+ from vllm.model_executor.layers.fused_moe.fused_moe_modular_method import (
90
+ FusedMoEModularMethod,
91
+ )
92
+ from vllm.model_executor.layers.fused_moe.unquantized_fused_moe_method import (
93
+ UnquantizedFusedMoEMethod,
94
+ )
95
+
96
+ logger = init_logger(__name__)
97
+
98
+
99
+ class FusedMoeWeightScaleSupported(Enum):
100
+ TENSOR = "tensor"
101
+ CHANNEL = "channel"
102
+ GROUP = "group"
103
+ BLOCK = "block"
104
+
105
+
106
+ def determine_expert_map(
107
+ ep_size: int,
108
+ ep_rank: int,
109
+ global_num_experts: int,
110
+ expert_placement_strategy: ExpertPlacementStrategy = "linear",
111
+ num_fused_shared_experts: int = 0,
112
+ return_expert_mask: bool = False,
113
+ ) -> tuple[int, torch.Tensor | None, torch.Tensor | None]:
114
+ """
115
+ Calculates how many experts should be assigned to each rank for EP and
116
+ creates a mapping from global to local expert index. Experts are
117
+ distributed evenly across ranks. Any remaining are assigned to the
118
+ last rank.
119
+
120
+ Args:
121
+ ep_size: The size of the expert parallel group
122
+ ep_rank: The rank of the current process in the expert parallel
123
+ group
124
+ global_num_experts: The total number of experts in the model.
125
+ expert_placement_strategy: The expert placement strategy.
126
+
127
+ Returns:
128
+ tuple[int, Optional[torch.Tensor]]: A tuple containing:
129
+ - local_num_experts (int): The number of experts assigned
130
+ to the current rank.
131
+ - expert_map (Optional[torch.Tensor]): A tensor of shape
132
+ (global_num_experts,) mapping from global to local index.
133
+ Contains -1 for experts not assigned to the current rank.
134
+ Returns None if ep_size is 1.
135
+ - expert_mask (Optional[torch.Tensor]): A tensor of shape
136
+ (global_num_experts + num_fused_shared_experts + 1,)
137
+ containing 1 for experts assigned to the current rank
138
+ and 0 for sentinel.
139
+ Returns None if ep_size is 1.
140
+ Used only when AITER MOE is enabled.
141
+ """
142
+ assert ep_size > 0
143
+ if ep_size == 1:
144
+ return (global_num_experts, None, None)
145
+
146
+ # Distribute experts as evenly as possible to each rank.
147
+ base_experts = global_num_experts // ep_size
148
+ remainder = global_num_experts % ep_size
149
+ local_num_experts = base_experts + 1 if ep_rank < remainder else base_experts
150
+
151
+ # Create a tensor of size num_experts filled with -1
152
+ expert_map = torch.full((global_num_experts,), -1, dtype=torch.int32)
153
+ # Create an expert map for the local experts
154
+ if expert_placement_strategy == "linear":
155
+ start_idx = ep_rank * base_experts + min(ep_rank, remainder)
156
+ expert_map[start_idx : start_idx + local_num_experts] = torch.arange(
157
+ 0, local_num_experts, dtype=torch.int32
158
+ )
159
+ elif expert_placement_strategy == "round_robin":
160
+ local_log_experts = torch.arange(
161
+ ep_rank, global_num_experts, ep_size, dtype=torch.int32
162
+ )
163
+
164
+ expert_map[local_log_experts] = torch.arange(
165
+ 0, local_num_experts, dtype=torch.int32
166
+ )
167
+ else:
168
+ raise ValueError(
169
+ "Unsupported expert placement strategy "
170
+ f"'{expert_placement_strategy}', expected one of "
171
+ f"{get_args(ExpertPlacementStrategy)}"
172
+ )
173
+
174
+ expert_mask = None
175
+ if return_expert_mask:
176
+ expert_mask = torch.ones(
177
+ (global_num_experts + num_fused_shared_experts + 1,), dtype=torch.int32
178
+ )
179
+ expert_mask[-1] = 0
180
+ expert_mask[:global_num_experts] = expert_map > -1
181
+ expert_map = torch.cat(
182
+ (
183
+ expert_map,
184
+ torch.tensor(
185
+ [local_num_experts + i for i in range(num_fused_shared_experts)],
186
+ dtype=torch.int32,
187
+ ),
188
+ ),
189
+ dim=0,
190
+ )
191
+
192
+ return (local_num_experts, expert_map, expert_mask)
193
+
194
+
195
+ def get_compressed_expert_map(expert_map: torch.Tensor) -> str:
196
+ """
197
+ Compresses the expert map by removing any -1 entries.
198
+
199
+ Args:
200
+ expert_map (torch.Tensor): A tensor of shape (global_num_experts,)
201
+ mapping from global to local index. Contains -1 for experts not
202
+ assigned to the current rank.
203
+
204
+ Returns:
205
+ str: A string mapping from local to global index.
206
+ Using str to support hashing for logging once only.
207
+ """
208
+ global_indices = torch.where(expert_map != -1)[0]
209
+ local_indices = expert_map[global_indices]
210
+ return ", ".join(
211
+ f"{local_index.item()}->{global_index.item()}"
212
+ for local_index, global_index in zip(local_indices, global_indices)
213
+ )
214
+
215
+
216
+ def maybe_roundup_hidden_size(
217
+ hidden_size: int,
218
+ act_dtype: torch.dtype,
219
+ quant_config: QuantizationConfig | None,
220
+ moe_parallel_config: FusedMoEParallelConfig,
221
+ is_lora_enabled: bool,
222
+ ) -> int:
223
+ """
224
+ Given layer hidden size and MoE configurations, round up hidden_size
225
+ if necessary.
226
+
227
+ Args:
228
+ hidden_size: Layer hidden-size
229
+ act_dtype: Data type of the layer activations.
230
+ quant_config: Fused MoE quantization configuration.
231
+ moe_parallel_config: Fused MoE parallelization strategy configuration.
232
+ is_lora_enabled: True if the engine is enabled with LoRA. This
233
+ is used in the case of mxfp4 quantization in selecting the
234
+ MxFP4Backend.
235
+
236
+ Return:
237
+ Rounded up hidden_size if rounding up is required based on the configs.
238
+ Original hidden size otherwise.
239
+ """
240
+ from vllm.model_executor.layers.fused_moe.all2all_utils import (
241
+ maybe_roundup_layer_hidden_size,
242
+ )
243
+
244
+ hidden_size = maybe_roundup_layer_hidden_size(
245
+ hidden_size, act_dtype, moe_parallel_config
246
+ )
247
+
248
+ # we are padding globally so EP buffer allocation works
249
+ if quant_config and quant_config.get_name() == "mxfp4":
250
+ from vllm.model_executor.layers.quantization.mxfp4 import (
251
+ Mxfp4Backend,
252
+ get_mxfp4_backend,
253
+ )
254
+
255
+ current_mxfp4_backend = get_mxfp4_backend(is_lora_enabled)
256
+ if (
257
+ current_mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16
258
+ or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
259
+ ):
260
+ hidden_size = round_up(hidden_size, 128)
261
+ elif (
262
+ current_platform.is_rocm()
263
+ or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
264
+ or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
265
+ ):
266
+ hidden_size = round_up(hidden_size, 256)
267
+
268
+ return hidden_size
269
+
270
+
271
+ @CustomOp.register("fused_moe")
272
+ class FusedMoE(CustomOp):
273
+ """FusedMoE layer for MoE models.
274
+
275
+ This layer contains both MergedColumnParallel weights (gate_up_proj /
276
+ w13) and RowParallelLinear weights (down_proj/ w2).
277
+
278
+ Note: Mixtral uses w1, w2, and w3 for gate, up, and down_proj. We
279
+ copy that naming convention here and handle any remapping in the
280
+ load_weights function in each model implementation.
281
+
282
+ Args:
283
+ num_experts: Number of experts in the model
284
+ top_k: Number of experts selected for each token
285
+ hidden_size: Input hidden state size of the transformer
286
+ intermediate_size: Intermediate size of the experts
287
+ params_dtype: Data type for the parameters.
288
+ reduce_results: Whether to all_reduce on the output of the layer
289
+ renormalize: Whether to renormalize the logits in the fused_moe kernel
290
+ quant_config: Quantization configure.
291
+ enable_eplb: Whether to enable expert parallelism load balancer.
292
+ """
293
+
294
+ def __init__(
295
+ self,
296
+ num_experts: int, # Global number of experts
297
+ top_k: int,
298
+ hidden_size: int,
299
+ intermediate_size: int,
300
+ params_dtype: torch.dtype | None = None,
301
+ reduce_results: bool = False,
302
+ renormalize: bool = True,
303
+ use_grouped_topk: bool = False,
304
+ num_expert_group: int | None = None,
305
+ topk_group: int | None = None,
306
+ quant_config: QuantizationConfig | None = None,
307
+ tp_size: int | None = None,
308
+ ep_size: int | None = None,
309
+ dp_size: int | None = None,
310
+ prefix: str = "",
311
+ custom_routing_function: Callable | None = None,
312
+ scoring_func: str = "softmax",
313
+ routed_scaling_factor: float = 1.0,
314
+ e_score_correction_bias: torch.Tensor | None = None,
315
+ apply_router_weight_on_input: bool = False,
316
+ activation: str = "silu",
317
+ is_act_and_mul: bool = True,
318
+ enable_eplb: bool = False,
319
+ num_redundant_experts: int = 0,
320
+ has_bias: bool = False,
321
+ is_sequence_parallel=False,
322
+ zero_expert_num: int | None = 0,
323
+ zero_expert_type: str | None = None,
324
+ expert_mapping: list[tuple[str, str, int, str]] | None = None,
325
+ n_shared_experts: int | None = None,
326
+ routing_method_type: int | None = None,
327
+ ):
328
+ super().__init__()
329
+
330
+ # Allow disabling of the separate shared experts stream for
331
+ # debug purposes.
332
+ # TODO: Remove this after more extensive testings with TP/DP
333
+ # and other execution modes
334
+ if envs.VLLM_DISABLE_SHARED_EXPERTS_STREAM:
335
+ logger.info_once("Disabling MoE shared_experts cuda stream")
336
+ self.shared_experts_stream = None
337
+ else:
338
+ # TODO(rob): enable shared expert overlap with non-cuda.
339
+ # aux_stream() returns None on non-cuda platforms.
340
+ self.shared_experts_stream = aux_stream()
341
+ if self.shared_experts_stream is not None:
342
+ logger.info_once("Enabled separate cuda stream for MoE shared_experts")
343
+
344
+ if params_dtype is None:
345
+ params_dtype = torch.get_default_dtype()
346
+ self.params_dtype = params_dtype
347
+
348
+ vllm_config = get_current_vllm_config()
349
+ self.vllm_config = vllm_config
350
+
351
+ # FIXME (varun): We should have a better way of inferring the activation
352
+ # datatype. This works for now as the tensor datatype entering the MoE
353
+ # operation is typically unquantized (i.e. float16/bfloat16).
354
+ if vllm_config.model_config is not None:
355
+ moe_in_dtype = vllm_config.model_config.dtype
356
+ else:
357
+ # TODO (bnell): This is a hack to get test_mixtral_moe to work
358
+ # since model_config is not set in the pytest test.
359
+ moe_in_dtype = params_dtype
360
+
361
+ tp_size_ = (
362
+ tp_size if tp_size is not None else get_tensor_model_parallel_world_size()
363
+ )
364
+ dp_size_ = dp_size if dp_size is not None else get_dp_group().world_size
365
+
366
+ self.is_sequence_parallel = is_sequence_parallel
367
+ self.sp_size = tp_size_ if is_sequence_parallel else 1
368
+
369
+ self.moe_parallel_config: FusedMoEParallelConfig = FusedMoEParallelConfig.make(
370
+ tp_size_=tp_size_,
371
+ dp_size_=dp_size_,
372
+ vllm_parallel_config=vllm_config.parallel_config,
373
+ )
374
+
375
+ self.global_num_experts = num_experts + num_redundant_experts
376
+ self.logical_num_experts = num_experts
377
+ self.zero_expert_num = zero_expert_num
378
+ self.zero_expert_type = zero_expert_type
379
+
380
+ # Expert mapping used in self.load_weights
381
+ self.expert_mapping = expert_mapping
382
+
383
+ # Round up hidden size if needed.
384
+ hidden_size = maybe_roundup_hidden_size(
385
+ hidden_size,
386
+ moe_in_dtype,
387
+ quant_config,
388
+ self.moe_parallel_config,
389
+ is_lora_enabled=self.vllm_config.lora_config is not None,
390
+ )
391
+
392
+ # For smuggling this layer into the fused moe custom op
393
+ compilation_config = vllm_config.compilation_config
394
+ if prefix in compilation_config.static_forward_context:
395
+ raise ValueError("Duplicate layer name: {}".format(prefix))
396
+ compilation_config.static_forward_context[prefix] = self
397
+ self.layer_name = prefix
398
+
399
+ self.enable_eplb = enable_eplb
400
+ self.expert_load_view: torch.Tensor | None = None
401
+ self.logical_to_physical_map: torch.Tensor | None = None
402
+ self.logical_replica_count: torch.Tensor | None = None
403
+
404
+ # ROCm aiter shared experts fusion
405
+ self.rocm_aiter_fmoe_enabled = rocm_aiter_ops.is_fused_moe_enabled()
406
+ self.aiter_fmoe_shared_expert_enabled = (
407
+ rocm_aiter_ops.is_fusion_moe_shared_experts_enabled()
408
+ )
409
+
410
+ self.num_fused_shared_experts = (
411
+ n_shared_experts
412
+ if n_shared_experts is not None and self.aiter_fmoe_shared_expert_enabled
413
+ else 0
414
+ )
415
+ if (
416
+ not self.aiter_fmoe_shared_expert_enabled
417
+ and self.num_fused_shared_experts != 0
418
+ ):
419
+ raise ValueError(
420
+ "n_shared_experts is only supported on ROCm aiter when "
421
+ "VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS is enabled"
422
+ )
423
+
424
+ # Determine expert maps
425
+ if self.use_ep:
426
+ if self.enable_eplb:
427
+ assert self.global_num_experts % self.ep_size == 0, (
428
+ "EPLB currently only supports even distribution of "
429
+ "experts across ranks."
430
+ )
431
+ else:
432
+ assert num_redundant_experts == 0, (
433
+ "Redundant experts are only supported with EPLB."
434
+ )
435
+
436
+ expert_placement_strategy = (
437
+ vllm_config.parallel_config.expert_placement_strategy
438
+ )
439
+ if expert_placement_strategy == "round_robin":
440
+ # TODO(Bruce): will support round robin expert placement with
441
+ # EPLB enabled in the future.
442
+ round_robin_supported = (
443
+ (num_expert_group is not None and num_expert_group > 1)
444
+ and num_redundant_experts == 0
445
+ and not self.enable_eplb
446
+ )
447
+
448
+ if not round_robin_supported:
449
+ logger.warning(
450
+ "Round-robin expert placement is only supported for "
451
+ "models with multiple expert groups and no redundant "
452
+ "experts. Falling back to linear expert placement."
453
+ )
454
+ expert_placement_strategy = "linear"
455
+
456
+ self.expert_map: torch.Tensor | None
457
+ local_num_experts, expert_map, expert_mask = determine_expert_map(
458
+ ep_size=self.ep_size,
459
+ ep_rank=self.ep_rank,
460
+ global_num_experts=self.global_num_experts,
461
+ expert_placement_strategy=expert_placement_strategy,
462
+ num_fused_shared_experts=self.num_fused_shared_experts,
463
+ return_expert_mask=self.rocm_aiter_fmoe_enabled,
464
+ )
465
+ self.local_num_experts = local_num_experts
466
+ self.register_buffer("expert_map", expert_map)
467
+ self.register_buffer("expert_mask", expert_mask)
468
+ logger.info_once(
469
+ "[EP Rank %s/%s] Expert parallelism is enabled. Expert "
470
+ "placement strategy: %s. Local/global"
471
+ " number of experts: %s/%s. Experts local to global index map:"
472
+ " %s.",
473
+ self.ep_rank,
474
+ self.ep_size,
475
+ expert_placement_strategy,
476
+ self.local_num_experts,
477
+ self.global_num_experts,
478
+ get_compressed_expert_map(self.expert_map),
479
+ )
480
+ else:
481
+ self.local_num_experts, self.expert_map, self.expert_mask = (
482
+ self.global_num_experts,
483
+ None,
484
+ None,
485
+ )
486
+
487
+ self.top_k = top_k
488
+
489
+ self._init_aiter_shared_experts_topK_buffer(
490
+ vllm_config=vllm_config, dp_size=dp_size_
491
+ )
492
+
493
+ assert intermediate_size % self.tp_size == 0
494
+ self.hidden_size = hidden_size
495
+ self.intermediate_size_per_partition = intermediate_size // self.tp_size
496
+ self.reduce_results = reduce_results
497
+ self.renormalize = renormalize
498
+ self.use_grouped_topk = use_grouped_topk
499
+ if self.use_grouped_topk:
500
+ assert num_expert_group is not None and topk_group is not None
501
+ self.num_expert_group = num_expert_group
502
+ self.topk_group = topk_group
503
+ self.custom_routing_function = custom_routing_function
504
+ self.scoring_func = scoring_func
505
+ self.routed_scaling_factor = routed_scaling_factor
506
+ self.e_score_correction_bias = e_score_correction_bias
507
+ self.apply_router_weight_on_input = apply_router_weight_on_input
508
+ self.activation = activation
509
+
510
+ if self.scoring_func != "softmax" and not self.use_grouped_topk:
511
+ raise ValueError(
512
+ "Only softmax scoring function is supported for non-grouped topk."
513
+ )
514
+
515
+ # ToDo: Better logic to determine the routing method type
516
+ if routing_method_type is not None:
517
+ self.routing_method_type = routing_method_type
518
+ else:
519
+ if scoring_func == "sigmoid":
520
+ if self.use_grouped_topk:
521
+ self.routing_method_type = RoutingMethodType.DeepSeekV3
522
+ elif self.top_k == 1:
523
+ self.routing_method_type = RoutingMethodType.Llama4
524
+ elif self.scoring_func == "softmax":
525
+ self.routing_method_type = (
526
+ RoutingMethodType.Renormalize
527
+ if not self.renormalize
528
+ else RoutingMethodType.RenormalizeNaive
529
+ )
530
+ else:
531
+ self.routing_method_type = RoutingMethodType.TopK
532
+
533
+ self.moe_config: FusedMoEConfig = FusedMoEConfig(
534
+ num_experts=self.global_num_experts,
535
+ experts_per_token=top_k,
536
+ hidden_dim=hidden_size,
537
+ num_local_experts=self.local_num_experts,
538
+ moe_parallel_config=self.moe_parallel_config,
539
+ in_dtype=moe_in_dtype,
540
+ max_num_tokens=envs.VLLM_MOE_DP_CHUNK_SIZE,
541
+ has_bias=has_bias,
542
+ is_act_and_mul=is_act_and_mul,
543
+ is_lora_enabled=vllm_config.lora_config is not None,
544
+ )
545
+
546
+ self.quant_config = quant_config
547
+
548
+ def _get_quant_method() -> FusedMoEMethodBase:
549
+ """
550
+ Helper method to ensure self.quant_method is never None and
551
+ of the proper type.
552
+ """
553
+ quant_method = None
554
+ if self.quant_config is not None:
555
+ quant_method = self.quant_config.get_quant_method(self, prefix)
556
+ if quant_method is None:
557
+ quant_method = UnquantizedFusedMoEMethod(self.moe_config)
558
+ assert isinstance(quant_method, FusedMoEMethodBase)
559
+ return quant_method
560
+
561
+ # Note: get_quant_method will look at the layer's local_num_experts
562
+ # for heuristic purposes, so it must be initialized first.
563
+ self.quant_method: FusedMoEMethodBase = _get_quant_method()
564
+
565
+ if not self.moe_config.is_act_and_mul:
566
+ # Avoid circular import
567
+ from vllm.model_executor.layers.quantization.modelopt import (
568
+ ModelOptFp8MoEMethod,
569
+ )
570
+
571
+ if not isinstance(
572
+ self.quant_method, (UnquantizedFusedMoEMethod, ModelOptFp8MoEMethod)
573
+ ):
574
+ raise NotImplementedError(
575
+ "is_act_and_mul=False is supported only for unquantized "
576
+ "and ModelOpt FP8 moe for now"
577
+ )
578
+ if not current_platform.is_cuda():
579
+ raise NotImplementedError(
580
+ "is_act_and_mul=False is supported only for CUDA for now"
581
+ )
582
+
583
+ if self.enable_eplb and not self.quant_method.supports_eplb:
584
+ # TODO: Add support for additional quantization methods.
585
+ # The implementation for other quantization methods does not
586
+ # contain essential differences, but the current quant API
587
+ # design causes duplicated work when extending to new
588
+ # quantization methods, so I'm leaving it for now.
589
+ # If you plan to add support for more quantization methods,
590
+ # please refer to the implementation in `Fp8MoEMethod`.
591
+ raise NotImplementedError(
592
+ f"EPLB is not supported {self.quant_method.__class__.__name__}. "
593
+ "EPLB is only supported for FP8 quantization for now."
594
+ )
595
+
596
+ moe_quant_params = {
597
+ "num_experts": self.local_num_experts,
598
+ "hidden_size": hidden_size,
599
+ "intermediate_size_per_partition": self.intermediate_size_per_partition,
600
+ "params_dtype": params_dtype,
601
+ "weight_loader": self.weight_loader,
602
+ "global_num_experts": self.global_num_experts,
603
+ }
604
+ # need full intermediate size pre-sharding for WNA16 act order
605
+ if self.quant_method.__class__.__name__ in (
606
+ "GPTQMarlinMoEMethod",
607
+ "CompressedTensorsWNA16MarlinMoEMethod",
608
+ "CompressedTensorsWNA16MoEMethod",
609
+ ):
610
+ moe_quant_params["intermediate_size_full"] = intermediate_size
611
+
612
+ self.quant_method.create_weights(layer=self, **moe_quant_params)
613
+
614
+ # Chunked all2all staging tensor
615
+ self.batched_hidden_states: torch.Tensor | None = None
616
+ self.batched_router_logits: torch.Tensor | None = None
617
+
618
+ # Note: maybe_init_modular_kernel should only be called by
619
+ # prepare_communication_buffer_for_model.
620
+ # This is called after all weight loading and post-processing, so it
621
+ # should be safe to swap out the quant_method.
622
+ def maybe_init_modular_kernel(self) -> None:
623
+ self.ensure_moe_quant_config_init()
624
+ prepare_finalize = self.quant_method.maybe_make_prepare_finalize()
625
+ if prepare_finalize is not None:
626
+ logger.debug(
627
+ "%s for %s(%s)", prepare_finalize.__class__.__name__, self, id(self)
628
+ )
629
+ self.quant_method = FusedMoEModularMethod.make(
630
+ self, self.quant_method, prepare_finalize, self.shared_experts
631
+ )
632
+
633
+ @property
634
+ def shared_experts(self) -> torch.nn.Module | None:
635
+ return None
636
+
637
+ @property
638
+ def gate(self) -> torch.nn.Module | None:
639
+ return None
640
+
641
+ @property
642
+ def tp_size(self):
643
+ return self.moe_parallel_config.tp_size
644
+
645
+ @property
646
+ def dp_size(self):
647
+ return self.moe_parallel_config.dp_size
648
+
649
+ @property
650
+ def ep_size(self):
651
+ return self.moe_parallel_config.ep_size
652
+
653
+ @property
654
+ def tp_rank(self):
655
+ return self.moe_parallel_config.tp_rank
656
+
657
+ @property
658
+ def dp_rank(self):
659
+ return self.moe_parallel_config.dp_rank
660
+
661
+ @property
662
+ def ep_rank(self):
663
+ return self.moe_parallel_config.ep_rank
664
+
665
+ @property
666
+ def use_ep(self):
667
+ return self.moe_parallel_config.use_ep
668
+
669
+ @property
670
+ def use_pplx_kernels(self):
671
+ return self.moe_parallel_config.use_pplx_kernels
672
+
673
+ @property
674
+ def use_deepep_ht_kernels(self):
675
+ return self.moe_parallel_config.use_deepep_ht_kernels
676
+
677
+ @property
678
+ def use_deepep_ll_kernels(self):
679
+ return self.moe_parallel_config.use_deepep_ll_kernels
680
+
681
+ @property
682
+ def use_flashinfer_cutlass_kernels(self):
683
+ return (
684
+ self.moe_quant_config is not None
685
+ and self.moe_quant_config.quant_dtype == "nvfp4"
686
+ and self.moe_config.use_flashinfer_cutlass_kernels
687
+ )
688
+
689
+ @property
690
+ def use_marlin_kernels(self):
691
+ return getattr(self.quant_method, "use_marlin", False)
692
+
693
+ @property
694
+ def use_dp_chunking(self) -> bool:
695
+ return (
696
+ self.moe_parallel_config.use_pplx_kernels
697
+ or self.moe_parallel_config.use_deepep_ll_kernels
698
+ or (self.dp_size > 1 and self.use_flashinfer_cutlass_kernels)
699
+ )
700
+
701
+ @property
702
+ def is_internal_router(self) -> bool:
703
+ # By default, router/gate is called before FusedMoE forward pass
704
+ return False
705
+
706
+ def update_expert_map(self):
707
+ # ep_size and ep_rank should already be updated
708
+ assert self.expert_map is not None
709
+ with self.expert_map.device:
710
+ local_num_experts, expert_map, expert_mask = determine_expert_map(
711
+ ep_size=self.ep_size,
712
+ ep_rank=self.ep_rank,
713
+ global_num_experts=self.global_num_experts,
714
+ num_fused_shared_experts=self.num_fused_shared_experts,
715
+ return_expert_mask=self.rocm_aiter_fmoe_enabled,
716
+ )
717
+ self.local_num_experts = local_num_experts
718
+ self.register_buffer("expert_map", expert_map)
719
+ self.register_buffer("expert_mask", expert_mask)
720
+ if self.aiter_fmoe_shared_expert_enabled:
721
+ self._init_aiter_shared_experts_topK_buffer(
722
+ vllm_config=get_current_vllm_config(),
723
+ dp_size=get_dp_group().world_size,
724
+ )
725
+
726
+ def _load_per_tensor_weight_scale(
727
+ self,
728
+ shard_id: str,
729
+ param: torch.nn.Parameter,
730
+ loaded_weight: torch.Tensor,
731
+ expert_id: int,
732
+ ):
733
+ param_data = param.data
734
+ # for per tensor weight quantization
735
+ if shard_id in ("w1", "w3"):
736
+ # We have to keep the weight scales of w1 and w3 because
737
+ # we need to re-quantize w1/w3 weights after weight loading.
738
+ idx = 0 if shard_id == "w1" else 1
739
+ param_data[expert_id][idx] = loaded_weight
740
+ # If we are in the row parallel case (down_proj)
741
+ elif shard_id == "w2":
742
+ param_data[expert_id] = loaded_weight
743
+
744
+ def _load_combined_w13_weight_scale(
745
+ self,
746
+ shard_dim: int,
747
+ loaded_weight: torch.Tensor,
748
+ param: torch.Tensor,
749
+ tp_rank: int,
750
+ ):
751
+ """
752
+ Load w13 weight scales assuming that w1 weight scales and w3 weight
753
+ scales are stored in the same loaded_weight tensor.
754
+ """
755
+ shard_size = param.shape[shard_dim]
756
+ loaded_weight = loaded_weight.narrow(
757
+ shard_dim, shard_size * tp_rank, shard_size
758
+ )
759
+ param.copy_(loaded_weight)
760
+
761
+ def _load_model_weight_or_group_weight_scale(
762
+ self,
763
+ shard_dim: int,
764
+ expert_data: torch.Tensor,
765
+ shard_id: str,
766
+ loaded_weight: torch.Tensor,
767
+ tp_rank: int,
768
+ load_full_w2: bool = False,
769
+ ):
770
+ """
771
+ Load grouped weight scales for group quantization or model weights
772
+ :param shard_dim: dimension to shard
773
+ :param expert_data: parameter for a particular expert
774
+ :param shard_id: either w1, w2, or w3
775
+ :param loaded_weight: checkpoint weight to load into the param
776
+ :param tp_rank: tensor parallel rank
777
+ :param load_full_w2: whether or not the w2 loaded should be sharded.
778
+ """
779
+ if shard_id == "w2":
780
+ # In the case where we have actorder/g_idx, we do not partition the
781
+ # w2 scales, as indicated by `load_full` argument, for all tp cases
782
+ self._load_w2(
783
+ shard_dim=shard_dim,
784
+ loaded_weight=loaded_weight,
785
+ expert_data=expert_data,
786
+ tp_rank=tp_rank,
787
+ load_full=load_full_w2,
788
+ )
789
+ elif shard_id in ("w1", "w3"):
790
+ self._load_w13(
791
+ shard_id=shard_id,
792
+ shard_dim=shard_dim,
793
+ loaded_weight=loaded_weight,
794
+ expert_data=expert_data,
795
+ tp_rank=tp_rank,
796
+ )
797
+
798
+ def _load_per_channel_weight_scale(
799
+ self,
800
+ expert_data: torch.Tensor,
801
+ shard_dim: int,
802
+ shard_id: str,
803
+ loaded_weight: torch.Tensor,
804
+ tp_rank: int,
805
+ ):
806
+ # for per channel weight quantization
807
+ if shard_id == "w2":
808
+ expert_data.copy_(loaded_weight)
809
+ elif shard_id in ("w1", "w3"):
810
+ self._load_w13(
811
+ shard_id=shard_id,
812
+ shard_dim=shard_dim,
813
+ loaded_weight=loaded_weight,
814
+ expert_data=expert_data,
815
+ tp_rank=tp_rank,
816
+ )
817
+
818
+ def _load_w13(
819
+ self,
820
+ expert_data: torch.Tensor,
821
+ shard_dim: int,
822
+ shard_id: str,
823
+ loaded_weight: torch.Tensor,
824
+ tp_rank: int,
825
+ load_full: bool = False,
826
+ ):
827
+ # Index the loaded weight for tp sharding.
828
+ # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim
829
+ if self.moe_config.is_act_and_mul:
830
+ shard_size = expert_data.shape[shard_dim] // 2
831
+ else:
832
+ shard_size = expert_data.shape[shard_dim]
833
+ if not load_full:
834
+ loaded_weight = loaded_weight.narrow(
835
+ shard_dim, shard_size * tp_rank, shard_size
836
+ )
837
+ # Narrow parameter and load.
838
+ # w1, gate_proj: Load into first logical weight of w13.
839
+ if shard_id == "w1":
840
+ expert_data = expert_data.narrow(shard_dim, 0, shard_size)
841
+ # w3, up_proj: Load into second logical weight of w13.
842
+ else:
843
+ assert shard_id == "w3"
844
+ expert_data = expert_data.narrow(shard_dim, shard_size, shard_size)
845
+ expert_data.copy_(loaded_weight)
846
+
847
+ def _load_w2(
848
+ self,
849
+ expert_data: torch.Tensor,
850
+ shard_dim: int,
851
+ loaded_weight: torch.Tensor,
852
+ tp_rank: int,
853
+ load_full: bool = False,
854
+ ):
855
+ # Index the loaded weight for tp sharding.
856
+ # down_proj: "RowParallel" so tp sharding on input_dim
857
+ # Narrow parameter and load.
858
+ shard_size = expert_data.shape[shard_dim]
859
+ if not load_full:
860
+ loaded_weight = loaded_weight.narrow(
861
+ shard_dim, shard_size * tp_rank, shard_size
862
+ )
863
+ # w2, down_proj: Load into only logical weight of w2.
864
+ expert_data.copy_(loaded_weight)
865
+
866
+ def _load_single_value(
867
+ self, param: torch.nn.Parameter, loaded_weight: torch.Tensor, expert_id: int
868
+ ):
869
+ param_data = param.data
870
+
871
+ # Input scales can be loaded directly and should be equal.
872
+ param_data[expert_id] = loaded_weight
873
+
874
+ def _load_g_idx(
875
+ self,
876
+ shard_id: str,
877
+ expert_data: torch.Tensor,
878
+ shard_dim: int,
879
+ loaded_weight: torch.Tensor,
880
+ tp_rank: int,
881
+ ):
882
+ if shard_id == "w2":
883
+ self._load_w2(
884
+ shard_dim=shard_dim,
885
+ loaded_weight=loaded_weight,
886
+ expert_data=expert_data,
887
+ tp_rank=tp_rank,
888
+ )
889
+ else:
890
+ assert shard_id in ("w1", "w3")
891
+ expert_data.copy_(loaded_weight)
892
+
893
+ def _map_global_expert_id_to_local_expert_id(self, expert_id: int) -> int:
894
+ if self.expert_map is None:
895
+ return expert_id
896
+ return self.expert_map[expert_id].item()
897
+
898
+ def _init_aiter_shared_experts_topK_buffer(
899
+ self, vllm_config: VllmConfig, dp_size: int
900
+ ):
901
+ if self.num_fused_shared_experts > 0:
902
+ init_aiter_topK_meta_data(
903
+ n_routed_experts=self.global_num_experts,
904
+ n_shared_experts=self.num_fused_shared_experts,
905
+ top_k=self.top_k,
906
+ tp_rank=self.ep_rank if self.use_ep else self.tp_rank,
907
+ tp_size=self.ep_size if self.use_ep else self.tp_size,
908
+ shared_experts_score=1.0,
909
+ max_num_tokens=vllm_config.scheduler_config.max_num_batched_tokens
910
+ * dp_size,
911
+ is_EP=self.use_ep,
912
+ )
913
+ self.local_num_experts += self.num_fused_shared_experts
914
+
915
+ @overload
916
+ def weight_loader(
917
+ self,
918
+ param: torch.nn.Parameter,
919
+ loaded_weight: torch.Tensor,
920
+ weight_name: str,
921
+ shard_id: str,
922
+ expert_id: int,
923
+ return_success: Literal[False],
924
+ ) -> None: ...
925
+
926
+ @overload
927
+ def weight_loader(
928
+ self,
929
+ param: torch.nn.Parameter,
930
+ loaded_weight: torch.Tensor,
931
+ weight_name: str,
932
+ shard_id: str,
933
+ expert_id: int,
934
+ return_success: Literal[True],
935
+ ) -> bool: ...
936
+
937
+ def weight_loader(
938
+ self,
939
+ param: torch.nn.Parameter,
940
+ loaded_weight: torch.Tensor,
941
+ weight_name: str,
942
+ shard_id: str,
943
+ expert_id: int,
944
+ return_success: bool = False,
945
+ ) -> bool | None:
946
+ if self.quant_config and self.quant_config.get_name() == "mxfp4":
947
+ # (FIXME) for gpt-oss all experts are combined
948
+ if "bias" in weight_name:
949
+ dim1 = loaded_weight.shape[1]
950
+ param.data[:, :dim1].copy_(loaded_weight)
951
+ else:
952
+ dim1 = loaded_weight.shape[1]
953
+ dim2 = loaded_weight.shape[2]
954
+ param.data[:, :dim1, :dim2].copy_(loaded_weight)
955
+ return True if return_success else None
956
+
957
+ quant_method_name = self.quant_method.__class__.__name__
958
+ global_expert_id = expert_id
959
+ expert_id = self._map_global_expert_id_to_local_expert_id(global_expert_id)
960
+
961
+ allow_flashinfer = getattr(self.quant_method, "allow_flashinfer", False)
962
+ moe_backend = getattr(self.quant_method, "flashinfer_moe_backend", None)
963
+
964
+ use_global_sf = (
965
+ allow_flashinfer
966
+ and is_flashinfer_supporting_global_sf(moe_backend)
967
+ and "input_scale" in weight_name
968
+ and quant_method_name == "ModelOptNvFp4FusedMoE"
969
+ )
970
+
971
+ if expert_id == -1 and not use_global_sf:
972
+ # Failed to load this param since it's not local to this rank
973
+ return False if return_success else None
974
+ # Hereafter, `expert_id` is local physical id
975
+
976
+ # compressed-tensors checkpoints with packed weights are stored flipped
977
+ # TODO (mgoin): check self.quant_method.quant_config.quant_format
978
+ # against known CompressionFormat enum values that have this quality
979
+ if self.quant_method.__class__.__name__ in (
980
+ "CompressedTensorsWNA16MarlinMoEMethod",
981
+ "CompressedTensorsWNA16MoEMethod",
982
+ ):
983
+ loaded_weight = loaded_weight.t().contiguous()
984
+
985
+ if shard_id not in ("w1", "w2", "w3"):
986
+ raise ValueError(f"shard_id must be ['w1','w2','w3'] but got {shard_id}.")
987
+
988
+ # Fetch the dim to shard the parameter/loaded weight
989
+ # based on the shard id. This will be whatever
990
+ # dimension intermediate_size_per_partition is used.
991
+ SHARD_ID_TO_SHARDED_DIM = {"w1": 0, "w2": 1, "w3": 0}
992
+
993
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
994
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
995
+ if is_gguf_weight_type:
996
+ param.weight_type = loaded_weight.item()
997
+ param.data.copy_(loaded_weight)
998
+ return True if return_success else None
999
+
1000
+ # Case for BitsAndBytes
1001
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
1002
+ if use_bitsandbytes_4bit:
1003
+ shard_dim = 0
1004
+
1005
+ expert_data = param.data[expert_id]
1006
+ if shard_id == "w2":
1007
+ expert_data.copy_(loaded_weight)
1008
+ elif shard_id in ("w1", "w3"):
1009
+ # BNB inflight quantization has already sharded the weights
1010
+ full_load = True
1011
+ self._load_w13(
1012
+ shard_id=shard_id,
1013
+ shard_dim=shard_dim,
1014
+ loaded_weight=loaded_weight,
1015
+ expert_data=expert_data,
1016
+ tp_rank=self.tp_rank,
1017
+ load_full=full_load,
1018
+ )
1019
+ return True if return_success else None
1020
+
1021
+ # is_transposed: if the dim to shard the weight
1022
+ # should be flipped. Required by GPTQ, compressed-tensors
1023
+ # should be whatever dimension intermediate_size_per_partition is
1024
+ is_transposed = getattr(param, "is_transposed", False)
1025
+ shard_dim = SHARD_ID_TO_SHARDED_DIM[shard_id]
1026
+ if is_transposed:
1027
+ shard_dim = int(not shard_dim)
1028
+
1029
+ full_load = len(loaded_weight.shape) == 3
1030
+ if full_load:
1031
+ shard_dim += 1
1032
+
1033
+ # Materialize GGUF UninitializedParameter
1034
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
1035
+ final_shape = list(loaded_weight.shape)
1036
+ if shard_id in ["w1", "w3"]:
1037
+ final_shape[1] *= 2
1038
+ final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size
1039
+ param.materialize(final_shape, dtype=loaded_weight.dtype)
1040
+
1041
+ expert_data = param.data if full_load else param.data[expert_id]
1042
+
1043
+ # Case input scale: input_scale loading is only supported for fp8
1044
+ if "input_scale" in weight_name:
1045
+ # this is needed for compressed-tensors only
1046
+ loaded_weight = loaded_weight.to(param.data.device)
1047
+
1048
+ if (
1049
+ "compressed" in quant_method_name.lower()
1050
+ and param.data[expert_id] != 1
1051
+ and (param.data[expert_id] - loaded_weight).abs() > 1e-5
1052
+ ):
1053
+ raise ValueError(
1054
+ "input_scales of w1 and w3 of a layer "
1055
+ f"must be equal. But got {param.data[expert_id]} "
1056
+ f"vs. {loaded_weight}"
1057
+ )
1058
+
1059
+ self._load_single_value(
1060
+ param=param,
1061
+ loaded_weight=loaded_weight,
1062
+ expert_id=global_expert_id if use_global_sf else expert_id,
1063
+ )
1064
+ return True if return_success else None
1065
+
1066
+ # Case g_idx
1067
+ if "g_idx" in weight_name:
1068
+ self._load_g_idx(
1069
+ shard_dim=0,
1070
+ shard_id=shard_id,
1071
+ loaded_weight=loaded_weight,
1072
+ expert_data=expert_data,
1073
+ tp_rank=self.tp_rank,
1074
+ )
1075
+ return True if return_success else None
1076
+
1077
+ # TODO @dsikka: ModelOpt should follow the proper MoE loading pattern
1078
+ if "ModelOpt" in quant_method_name:
1079
+ # Determine per-tensor weight scale patterns based on variant
1080
+ # Use the dedicated method instead of brittle string matching
1081
+ uses_weight_scale_2 = self.quant_method.uses_weight_scale_2_pattern()
1082
+
1083
+ # Call _load_per_tensor_weight_scale() to load per-tensor (scalar)
1084
+ # weights scales.
1085
+ # Input scales are always per-tensor.
1086
+ # Weight scales: FP4 uses "weight_scale_2" and FP8 uses
1087
+ # "weight_scale" for per-tensor scales.
1088
+ is_per_tensor = (
1089
+ "weight_scale_2" in weight_name
1090
+ if uses_weight_scale_2
1091
+ else "weight_scale" in weight_name
1092
+ ) or "input_scale" in weight_name
1093
+ if is_per_tensor:
1094
+ self._load_per_tensor_weight_scale(
1095
+ shard_id=shard_id,
1096
+ param=param,
1097
+ loaded_weight=loaded_weight,
1098
+ expert_id=expert_id,
1099
+ )
1100
+ return True if return_success else None
1101
+
1102
+ # If the weight is w13_weight_scale and w13_weight_scales are
1103
+ # combined into single loaded_weight, call
1104
+ # _load_combined_w13_weight_scale() to load it.
1105
+ # This is checked by comparing the hidden_out dims of the
1106
+ # loaded_weight and the param.
1107
+ if "w13_weight_scale" in weight_name:
1108
+ loaded_weight_hidden_out = loaded_weight.shape[-2]
1109
+ param_hidden_out = param.data.shape[-2] * self.tp_size
1110
+ if loaded_weight_hidden_out == param_hidden_out:
1111
+ self._load_combined_w13_weight_scale(
1112
+ shard_dim=shard_dim,
1113
+ loaded_weight=loaded_weight,
1114
+ param=param,
1115
+ tp_rank=self.tp_rank,
1116
+ )
1117
+ return True if return_success else None
1118
+
1119
+ # For other weights, call _load_model_weight_or_group_weight_scale()
1120
+ # to load it.
1121
+ if "weight" in weight_name:
1122
+ self._load_model_weight_or_group_weight_scale(
1123
+ shard_id=shard_id,
1124
+ shard_dim=shard_dim,
1125
+ loaded_weight=loaded_weight,
1126
+ expert_data=expert_data,
1127
+ tp_rank=self.tp_rank,
1128
+ )
1129
+ return True if return_success else None
1130
+
1131
+ # Case weight scales, zero_points and offset, weight/input global scales
1132
+ if "scale" in weight_name or "zero" in weight_name or "offset" in weight_name:
1133
+ # load the weight scales and zp based on the quantization scheme
1134
+ # supported weight scales/zp can be found in
1135
+ # FusedMoeWeightScaleSupported
1136
+ # TODO @dsikka: once hardened, refactor to use vLLM Parameters
1137
+ # specific to each case
1138
+ quant_method = getattr(param, "quant_method", None)
1139
+ if quant_method == FusedMoeWeightScaleSupported.CHANNEL.value:
1140
+ self._load_per_channel_weight_scale(
1141
+ shard_id=shard_id,
1142
+ shard_dim=shard_dim,
1143
+ loaded_weight=loaded_weight,
1144
+ expert_data=expert_data,
1145
+ tp_rank=self.tp_rank,
1146
+ )
1147
+ elif quant_method in [
1148
+ FusedMoeWeightScaleSupported.GROUP.value,
1149
+ FusedMoeWeightScaleSupported.BLOCK.value,
1150
+ ]:
1151
+ self._load_model_weight_or_group_weight_scale(
1152
+ shard_id=shard_id,
1153
+ shard_dim=shard_dim,
1154
+ loaded_weight=loaded_weight,
1155
+ expert_data=expert_data,
1156
+ tp_rank=self.tp_rank,
1157
+ load_full_w2=getattr(param, "load_full_w2", False),
1158
+ )
1159
+ elif quant_method == FusedMoeWeightScaleSupported.TENSOR.value:
1160
+ self._load_per_tensor_weight_scale(
1161
+ shard_id=shard_id,
1162
+ param=param,
1163
+ loaded_weight=loaded_weight,
1164
+ expert_id=expert_id,
1165
+ )
1166
+ else:
1167
+ WEIGHT_SCALE_SUPPORTED = [e.value for e in FusedMoeWeightScaleSupported]
1168
+ raise ValueError(
1169
+ f"quant method must be one of {WEIGHT_SCALE_SUPPORTED}"
1170
+ )
1171
+ return True if return_success else None
1172
+
1173
+ # Case weight_shape
1174
+ if "weight_shape" in weight_name:
1175
+ # only required by compressed-tensors
1176
+ self._load_single_value(
1177
+ param=param, loaded_weight=loaded_weight, expert_id=expert_id
1178
+ )
1179
+ return True if return_success else None
1180
+
1181
+ # Case model weights
1182
+ if "weight" in weight_name:
1183
+ self._load_model_weight_or_group_weight_scale(
1184
+ shard_id=shard_id,
1185
+ shard_dim=shard_dim,
1186
+ loaded_weight=loaded_weight,
1187
+ expert_data=expert_data,
1188
+ tp_rank=self.tp_rank,
1189
+ )
1190
+ return True if return_success else None
1191
+
1192
+ return False if return_success else None
1193
+
1194
+ def load_weights(
1195
+ self, weights: Iterable[tuple[str, torch.Tensor]]
1196
+ ) -> Iterable[str]:
1197
+ if (expert_mapping := self.expert_mapping) is None:
1198
+ raise ValueError(
1199
+ "`self.expert_mapping` must be provided to "
1200
+ "load weights using `self.load_weights`."
1201
+ )
1202
+ for expert_name, loaded_weight in weights:
1203
+ qual_name = f"{self.layer_name}.{expert_name}"
1204
+ for param_name, weight_name, expert_id, shard_id in expert_mapping:
1205
+ if weight_name not in qual_name:
1206
+ continue
1207
+ weight_name = qual_name.replace(weight_name, param_name)
1208
+ param_name = weight_name.removeprefix(f"{self.layer_name}.")
1209
+ param = getattr(self, param_name)
1210
+ success = self.weight_loader(
1211
+ param=param,
1212
+ loaded_weight=loaded_weight,
1213
+ weight_name=weight_name,
1214
+ shard_id=shard_id,
1215
+ expert_id=expert_id,
1216
+ return_success=True,
1217
+ )
1218
+ if success:
1219
+ logger.debug(
1220
+ "Loaded %s for expert %d into %s",
1221
+ param_name,
1222
+ expert_id,
1223
+ self.layer_name,
1224
+ )
1225
+ yield param_name
1226
+
1227
+ def get_expert_weights(self) -> Iterable[torch.Tensor]:
1228
+ weights = list(self.named_parameters())
1229
+ assert all(
1230
+ weight.is_contiguous()
1231
+ for name, weight in weights
1232
+ if not name.startswith("_shared_experts.")
1233
+ )
1234
+
1235
+ # Filter out the non-expert weights.
1236
+ # `e_score_correction_bias` is a bias for each logical expert,
1237
+ # with shape (num_logical_experts,), not an expert weight.
1238
+ NON_EXPERT_WEIGHTS = {
1239
+ "e_score_correction_bias",
1240
+ }
1241
+
1242
+ return [
1243
+ weight.view(self.local_num_experts, -1)
1244
+ for name, weight in weights
1245
+ if name not in NON_EXPERT_WEIGHTS
1246
+ and weight.shape != torch.Size([])
1247
+ and not name.startswith("_shared_experts.")
1248
+ # exclude parameters from non-expert submodules (e.g. gate/shared)
1249
+ and not name.startswith("_gate.")
1250
+ ]
1251
+
1252
+ def set_eplb_state(
1253
+ self,
1254
+ moe_layer_idx: int,
1255
+ expert_load_view: torch.Tensor,
1256
+ logical_to_physical_map: torch.Tensor,
1257
+ logical_replica_count: torch.Tensor,
1258
+ ) -> None:
1259
+ """
1260
+ Register the EPLB state in this layer.
1261
+
1262
+ This is used later in forward pass, where we get the expert mapping
1263
+ and record the load metrics in `expert_load_view`.
1264
+ """
1265
+ self.expert_load_view = expert_load_view[moe_layer_idx]
1266
+ self.logical_to_physical_map = logical_to_physical_map[moe_layer_idx]
1267
+ self.logical_replica_count = logical_replica_count[moe_layer_idx]
1268
+
1269
+ def ensure_moe_quant_config_init(self):
1270
+ if self.quant_method.moe_quant_config is None:
1271
+ # Note: the moe_quant_config can't be constructed until after
1272
+ # weight loading post processing.
1273
+ self.quant_method.moe_quant_config = (
1274
+ self.quant_method.get_fused_moe_quant_config(self)
1275
+ )
1276
+
1277
+ @property
1278
+ def moe_quant_config(self) -> FusedMoEQuantConfig | None:
1279
+ self.ensure_moe_quant_config_init()
1280
+ return self.quant_method.moe_quant_config
1281
+
1282
+ def ensure_dp_chunking_init(self):
1283
+ if not self.use_dp_chunking or self.batched_hidden_states is not None:
1284
+ return
1285
+
1286
+ states_shape: tuple[int, ...]
1287
+ logits_shape: tuple[int, ...]
1288
+
1289
+ moe = self.moe_config
1290
+
1291
+ if self.vllm_config.parallel_config.enable_dbo:
1292
+ states_shape = (2, moe.max_num_tokens, self.hidden_size)
1293
+ logits_shape = (2, moe.max_num_tokens, self.logical_num_experts)
1294
+ else:
1295
+ states_shape = (moe.max_num_tokens, self.hidden_size)
1296
+ logits_shape = (moe.max_num_tokens, self.logical_num_experts)
1297
+
1298
+ self.batched_hidden_states = torch.zeros(
1299
+ states_shape, dtype=moe.in_dtype, device=torch.cuda.current_device()
1300
+ )
1301
+
1302
+ self.batched_router_logits = torch.zeros(
1303
+ logits_shape, dtype=moe.in_dtype, device=torch.cuda.current_device()
1304
+ )
1305
+
1306
+ @staticmethod
1307
+ def select_experts(
1308
+ hidden_states: torch.Tensor,
1309
+ router_logits: torch.Tensor,
1310
+ top_k: int,
1311
+ use_grouped_topk: bool,
1312
+ renormalize: bool,
1313
+ topk_group: int | None = None,
1314
+ num_expert_group: int | None = None,
1315
+ custom_routing_function: Callable | None = None,
1316
+ scoring_func: str = "softmax",
1317
+ routed_scaling_factor: float = 1.0,
1318
+ e_score_correction_bias: torch.Tensor | None = None,
1319
+ indices_type: torch.dtype | None = None,
1320
+ enable_eplb: bool = False,
1321
+ expert_map: torch.Tensor | None = None,
1322
+ expert_load_view: torch.Tensor | None = None,
1323
+ logical_to_physical_map: torch.Tensor | None = None,
1324
+ logical_replica_count: torch.Tensor | None = None,
1325
+ global_num_experts: int | None = None,
1326
+ zero_expert_num: int | None = None,
1327
+ zero_expert_type: str | None = None,
1328
+ num_fused_shared_experts: int = 0,
1329
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
1330
+ """
1331
+ Route the input hidden states to the top-k experts based on the
1332
+ router logits.
1333
+
1334
+ Returns:
1335
+ (topk_weights, topk_ids, zero_expert_result)
1336
+ (tuple[torch.Tensor, torch.Tensor, torch.Tensor]):
1337
+ The weights, expert ids, and zero expert computation result.
1338
+
1339
+ **Compatibility**: When EPLB is not enabled, the returned ids are
1340
+ equivalent to global logical ids, so should be compatible with
1341
+ plain MoE implementations without redundant experts.
1342
+ """
1343
+ from vllm.model_executor.layers.fused_moe.fused_moe import (
1344
+ fused_topk,
1345
+ fused_topk_bias,
1346
+ )
1347
+
1348
+ # Check if we should use a routing simulation strategy
1349
+ routing_strategy = envs.VLLM_MOE_ROUTING_SIMULATION_STRATEGY
1350
+ if routing_strategy != "":
1351
+ topk_weights, topk_ids = RoutingSimulator.simulate_routing(
1352
+ hidden_states=hidden_states,
1353
+ router_logits=router_logits,
1354
+ strategy_name=routing_strategy,
1355
+ top_k=top_k,
1356
+ indices_type=indices_type,
1357
+ )
1358
+
1359
+ # DeepSeekv2 uses grouped_top_k
1360
+ elif use_grouped_topk:
1361
+ assert topk_group is not None
1362
+ assert num_expert_group is not None
1363
+ if rocm_aiter_ops.is_fused_moe_enabled():
1364
+ if not rocm_aiter_ops.is_fusion_moe_shared_experts_enabled():
1365
+ assert num_fused_shared_experts == 0
1366
+ grouped_topk_impl = partial(
1367
+ rocm_aiter_grouped_topk,
1368
+ num_fused_shared_experts=num_fused_shared_experts,
1369
+ )
1370
+ else:
1371
+ grouped_topk_impl = grouped_topk
1372
+
1373
+ topk_weights, topk_ids = grouped_topk_impl(
1374
+ hidden_states=hidden_states,
1375
+ gating_output=router_logits,
1376
+ topk=top_k,
1377
+ renormalize=renormalize,
1378
+ num_expert_group=num_expert_group,
1379
+ topk_group=topk_group,
1380
+ scoring_func=scoring_func,
1381
+ routed_scaling_factor=routed_scaling_factor,
1382
+ e_score_correction_bias=e_score_correction_bias,
1383
+ )
1384
+ if indices_type is not None:
1385
+ topk_ids = topk_ids.to(dtype=indices_type)
1386
+ elif e_score_correction_bias is not None:
1387
+ topk_weights, topk_ids = fused_topk_bias(
1388
+ hidden_states=hidden_states,
1389
+ gating_output=router_logits,
1390
+ e_score_correction_bias=e_score_correction_bias.data,
1391
+ topk=top_k,
1392
+ renormalize=renormalize,
1393
+ )
1394
+ if routed_scaling_factor is not None:
1395
+ topk_weights *= routed_scaling_factor
1396
+ elif custom_routing_function is None:
1397
+ topk_weights, topk_ids, token_expert_indices = fused_topk(
1398
+ hidden_states=hidden_states,
1399
+ gating_output=router_logits,
1400
+ topk=top_k,
1401
+ renormalize=renormalize,
1402
+ indices_type=indices_type,
1403
+ )
1404
+ else:
1405
+ topk_weights, topk_ids = custom_routing_function(
1406
+ hidden_states=hidden_states,
1407
+ gating_output=router_logits,
1408
+ topk=top_k,
1409
+ renormalize=renormalize,
1410
+ )
1411
+ if indices_type is not None:
1412
+ topk_ids = topk_ids.to(dtype=indices_type)
1413
+
1414
+ if enable_eplb:
1415
+ assert expert_load_view is not None
1416
+ assert logical_to_physical_map is not None
1417
+ assert logical_replica_count is not None
1418
+
1419
+ topk_ids = eplb_map_to_physical_and_record(
1420
+ topk_ids=topk_ids,
1421
+ expert_load_view=expert_load_view,
1422
+ logical_to_physical_map=logical_to_physical_map,
1423
+ logical_replica_count=logical_replica_count,
1424
+ indices_type=indices_type,
1425
+ )
1426
+
1427
+ assert topk_ids.dtype == indices_type or indices_type is None
1428
+
1429
+ # Compute zero expert result if needed
1430
+ if (
1431
+ zero_expert_num is not None
1432
+ and zero_expert_num > 0
1433
+ and zero_expert_type is not None
1434
+ and global_num_experts is not None
1435
+ ):
1436
+ zero_expert_result = zero_experts_compute_triton(
1437
+ expert_indices=topk_ids,
1438
+ expert_scales=topk_weights,
1439
+ num_experts=global_num_experts,
1440
+ zero_expert_type=zero_expert_type,
1441
+ hidden_states=hidden_states,
1442
+ )
1443
+ else:
1444
+ zero_expert_result = None
1445
+ return topk_weights, topk_ids, zero_expert_result
1446
+
1447
+ def must_reduce_shared_expert_outputs(self) -> bool:
1448
+ """
1449
+ The shared_experts are typically computed using the RowParallelLinear
1450
+ layer. The result of this function is typically used as
1451
+ the reduce_results argument to the module.
1452
+ When just tensor-parallel is used, it is not required to reduce
1453
+ the shared_experts results immediately. Instead we reduce at the
1454
+ once at the end of the MoE op. (Refer to DeepSeekV2MoE module)
1455
+ With EP and all2all kernels - this is no longer viable as all
1456
+ GPU ranks in DP, produce the complete set of hidden_states.
1457
+ Therefore it is required that we reduce the shared_experts output
1458
+ early.
1459
+ """
1460
+ assert self.quant_method is not None
1461
+ return (
1462
+ isinstance(self.quant_method, FusedMoEModularMethod)
1463
+ and self.quant_method.fused_experts.output_is_reduced()
1464
+ )
1465
+
1466
+ def maybe_all_reduce_tensor_model_parallel(self, final_hidden_states: torch.Tensor):
1467
+ """
1468
+ Some combine kernels reduce across GPU ranks by default.
1469
+ """
1470
+ if self.must_reduce_shared_expert_outputs():
1471
+ return final_hidden_states
1472
+ else:
1473
+ return tensor_model_parallel_all_reduce(final_hidden_states)
1474
+
1475
+ def forward_native(
1476
+ self,
1477
+ hidden_states: torch.Tensor,
1478
+ router_logits: torch.Tensor,
1479
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1480
+ og_hidden_states = hidden_states.shape[-1]
1481
+ if self.hidden_size != og_hidden_states:
1482
+ hidden_states = F.pad(
1483
+ hidden_states,
1484
+ (0, self.hidden_size - og_hidden_states),
1485
+ mode="constant",
1486
+ value=0.0,
1487
+ )
1488
+
1489
+ def reduce_output(states: torch.Tensor) -> torch.Tensor:
1490
+ if (
1491
+ not self.is_sequence_parallel
1492
+ and not self.use_dp_chunking
1493
+ and self.reduce_results
1494
+ and (self.tp_size > 1 or self.ep_size > 1)
1495
+ ):
1496
+ states = self.maybe_all_reduce_tensor_model_parallel(states)
1497
+ return states
1498
+
1499
+ if self.shared_experts is None:
1500
+ if current_platform.is_tpu():
1501
+ # TODO: Once the OOM issue for the TPU backend is resolved, we
1502
+ # will switch to using the moe_forward custom op.
1503
+ fused_output = self.forward_impl(hidden_states, router_logits)
1504
+ assert not isinstance(fused_output, tuple)
1505
+ else:
1506
+ fused_output = torch.ops.vllm.moe_forward(
1507
+ hidden_states, router_logits, self.layer_name
1508
+ )
1509
+ if self.zero_expert_num is not None and self.zero_expert_num > 0:
1510
+ assert isinstance(fused_output, tuple)
1511
+ fused_output, zero_expert_result = fused_output
1512
+ return (reduce_output(fused_output) + zero_expert_result)[
1513
+ ..., :og_hidden_states
1514
+ ]
1515
+ else:
1516
+ return reduce_output(fused_output)[..., :og_hidden_states]
1517
+ else:
1518
+ if current_platform.is_tpu():
1519
+ # TODO: Once the OOM issue for the TPU backend is resolved, we
1520
+ # will switch to using the moe_forward custom op.
1521
+ shared_output, fused_output = self.forward_impl(
1522
+ hidden_states, router_logits
1523
+ )
1524
+ else:
1525
+ shared_output, fused_output = torch.ops.vllm.moe_forward_shared(
1526
+ hidden_states, router_logits, self.layer_name
1527
+ )
1528
+ return (
1529
+ reduce_output(shared_output)[..., :og_hidden_states],
1530
+ reduce_output(fused_output)[..., :og_hidden_states],
1531
+ )
1532
+
1533
+ def forward_cuda(
1534
+ self,
1535
+ hidden_states: torch.Tensor,
1536
+ router_logits: torch.Tensor,
1537
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1538
+ return self.forward_native(hidden_states, router_logits)
1539
+
1540
+ def forward_impl_chunked(
1541
+ self,
1542
+ full_hidden_states: torch.Tensor,
1543
+ full_router_logits: torch.Tensor,
1544
+ has_separate_shared_experts: bool,
1545
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1546
+ assert self.batched_hidden_states is not None
1547
+ assert self.batched_router_logits is not None
1548
+ assert self.batched_hidden_states.dtype == full_hidden_states.dtype
1549
+ assert self.batched_router_logits.dtype == full_router_logits.dtype
1550
+ # Check size compatibility.
1551
+ assert self.batched_hidden_states.size(-1) == full_hidden_states.size(-1)
1552
+ assert self.batched_router_logits.size(-1) == full_router_logits.size(-1)
1553
+
1554
+ full_fused_final_hidden_states = torch.empty_like(full_hidden_states)
1555
+ if self.shared_experts is not None:
1556
+ full_shared_final_hidden_states = torch.empty_like(full_hidden_states)
1557
+
1558
+ def process_chunk(chunk_start, chunk_end, skip_result_store=False):
1559
+ chunk_size = chunk_end - chunk_start
1560
+ hidden_states = full_hidden_states[chunk_start:chunk_end, :]
1561
+ router_logits = full_router_logits[chunk_start:chunk_end, :]
1562
+
1563
+ assert self.batched_hidden_states is not None
1564
+ assert self.batched_router_logits is not None
1565
+ # This is only true when DBO has been enabled in the config.
1566
+ # Both tensors will have an outer dimension for the ubatch id
1567
+ if self.batched_hidden_states.dim() == 3:
1568
+ assert self.batched_router_logits.dim() == 3
1569
+ batch_buffer_idx = dbo_current_ubatch_id()
1570
+ batched_hidden_states = self.batched_hidden_states[batch_buffer_idx, :]
1571
+ batched_router_logits = self.batched_router_logits[batch_buffer_idx, :]
1572
+ else:
1573
+ batched_hidden_states = self.batched_hidden_states
1574
+ batched_router_logits = self.batched_router_logits
1575
+
1576
+ assert (
1577
+ batched_hidden_states.size(0) # type: ignore
1578
+ >= chunk_size
1579
+ )
1580
+ assert (
1581
+ batched_router_logits.size(0) # type: ignore
1582
+ >= chunk_size
1583
+ )
1584
+ staged_hidden_states = batched_hidden_states[:chunk_size, :] # type: ignore
1585
+ staged_router_logits = batched_router_logits[:chunk_size, :] # type: ignore
1586
+ staged_hidden_states.copy_(hidden_states, non_blocking=True)
1587
+ staged_router_logits.copy_(router_logits, non_blocking=True)
1588
+
1589
+ # Matrix multiply.
1590
+ final_hidden_states = self.quant_method.apply(
1591
+ layer=self,
1592
+ x=staged_hidden_states,
1593
+ router_logits=staged_router_logits,
1594
+ top_k=self.top_k,
1595
+ renormalize=self.renormalize,
1596
+ use_grouped_topk=self.use_grouped_topk,
1597
+ global_num_experts=self.global_num_experts,
1598
+ expert_map=self.expert_map
1599
+ if not self.rocm_aiter_fmoe_enabled
1600
+ else self.expert_mask,
1601
+ topk_group=self.topk_group,
1602
+ num_expert_group=self.num_expert_group,
1603
+ custom_routing_function=self.custom_routing_function,
1604
+ scoring_func=self.scoring_func,
1605
+ routed_scaling_factor=self.routed_scaling_factor,
1606
+ e_score_correction_bias=self.e_score_correction_bias,
1607
+ activation=self.activation,
1608
+ enable_eplb=self.enable_eplb,
1609
+ expert_load_view=self.expert_load_view,
1610
+ logical_to_physical_map=self.logical_to_physical_map,
1611
+ logical_replica_count=self.logical_replica_count,
1612
+ )
1613
+
1614
+ if has_separate_shared_experts:
1615
+ assert not isinstance(final_hidden_states, tuple)
1616
+ assert self.shared_experts is not None
1617
+
1618
+ shared_output = self.shared_experts(staged_hidden_states)
1619
+
1620
+ final_hidden_states = (
1621
+ shared_output,
1622
+ final_hidden_states,
1623
+ )
1624
+
1625
+ if self.zero_expert_num is not None and self.zero_expert_num > 0:
1626
+ assert isinstance(final_hidden_states, tuple)
1627
+ assert self.shared_experts is None
1628
+ final_hidden_states, zero_expert_result = final_hidden_states
1629
+ if zero_expert_result is not None:
1630
+ final_hidden_states += zero_expert_result
1631
+
1632
+ if not skip_result_store:
1633
+ if self.shared_experts is None:
1634
+ full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
1635
+ final_hidden_states, non_blocking=True
1636
+ )
1637
+ else:
1638
+ full_shared_final_hidden_states[chunk_start:chunk_end, :].copy_(
1639
+ final_hidden_states[0], non_blocking=True
1640
+ )
1641
+ full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
1642
+ final_hidden_states[1], non_blocking=True
1643
+ )
1644
+
1645
+ ctx = get_forward_context()
1646
+ # flashinfer_cutlass_kernels can handle: optional DP + TP/EP
1647
+ max_tokens_across_dispatchers = ctx.dp_metadata.max_tokens_across_dp_cpu
1648
+ moe_dp_chunk_size_per_rank = self.moe_config.max_num_tokens
1649
+
1650
+ # If the input to the MoE is sequence parallel then divide by sp_size
1651
+ # to find the maximum number of tokens for any individual dispatcher.
1652
+ if self.is_sequence_parallel:
1653
+ max_tokens_across_dispatchers = cdiv(
1654
+ max_tokens_across_dispatchers, self.sp_size
1655
+ )
1656
+
1657
+ num_tokens = full_hidden_states.size(0)
1658
+ for chunk_idx, chunk_start_ in enumerate(
1659
+ range(0, max_tokens_across_dispatchers, moe_dp_chunk_size_per_rank)
1660
+ ):
1661
+ chunk_start = chunk_start_
1662
+ chunk_end = min(
1663
+ chunk_start + moe_dp_chunk_size_per_rank, max_tokens_across_dispatchers
1664
+ )
1665
+ # clamp start and end
1666
+ chunk_start = min(chunk_start, num_tokens - 1)
1667
+ chunk_end = min(chunk_end, num_tokens)
1668
+ with ctx.dp_metadata.chunked_sizes(
1669
+ self.sp_size, moe_dp_chunk_size_per_rank, chunk_idx
1670
+ ):
1671
+ process_chunk(
1672
+ chunk_start, chunk_end, skip_result_store=chunk_start_ >= num_tokens
1673
+ )
1674
+
1675
+ if self.shared_experts is None:
1676
+ return full_fused_final_hidden_states
1677
+ else:
1678
+ return (full_shared_final_hidden_states, full_fused_final_hidden_states)
1679
+
1680
+ def forward_impl(
1681
+ self,
1682
+ hidden_states: torch.Tensor,
1683
+ router_logits: torch.Tensor,
1684
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1685
+ assert self.quant_method is not None
1686
+
1687
+ self.ensure_moe_quant_config_init()
1688
+ self.ensure_dp_chunking_init()
1689
+
1690
+ has_separate_shared_experts = (
1691
+ not isinstance(self.quant_method, FusedMoEModularMethod)
1692
+ and self.shared_experts is not None
1693
+ )
1694
+
1695
+ use_chunked_impl = self.use_dp_chunking
1696
+
1697
+ use_shared_experts_stream = (
1698
+ has_separate_shared_experts
1699
+ and not use_chunked_impl
1700
+ and self.shared_experts_stream is not None
1701
+ and (
1702
+ hidden_states.shape[0]
1703
+ <= envs.VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD
1704
+ )
1705
+ )
1706
+
1707
+ if use_shared_experts_stream:
1708
+ assert self.shared_experts_stream is not None
1709
+
1710
+ # Clone BEFORE switching streams to avoid race condition
1711
+ # where routed_expert kernel may mutate hidden_states.
1712
+ hidden_states_clone = hidden_states.clone()
1713
+
1714
+ # Record that the clone will be used by shared_experts_stream
1715
+ # to avoid gc issue from deallocation of hidden_states_clone
1716
+ # For more details: https://docs.pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html # noqa: E501
1717
+ # NOTE: We dont need shared_output.record_stream(current_stream())
1718
+ # because we synch the streams before using shared_output.
1719
+ hidden_states_clone.record_stream(self.shared_experts_stream)
1720
+
1721
+ # Mark sync start point for the separate shared experts
1722
+ # stream here since we want to run in parallel with the
1723
+ # router/gate (next op below)
1724
+ assert self.shared_experts_stream is not None
1725
+ self.shared_experts_stream.wait_stream(current_stream())
1726
+
1727
+ # If router/gate provided, then apply it here.
1728
+ # (Note: This code runs only when "overlapped mode" is on to allow
1729
+ # parallel execution of shared experts with the FusedMoE via
1730
+ # separate cuda stream)
1731
+ if self.gate is not None:
1732
+ router_logits, _ = self.gate(hidden_states)
1733
+
1734
+ if use_chunked_impl:
1735
+ return self.forward_impl_chunked(
1736
+ hidden_states, router_logits, has_separate_shared_experts
1737
+ )
1738
+
1739
+ do_naive_dispatch_combine: bool = self.dp_size > 1 and not isinstance(
1740
+ self.quant_method, FusedMoEModularMethod
1741
+ )
1742
+
1743
+ ctx = get_forward_context()
1744
+ sp_ctx = (
1745
+ ctx.dp_metadata.sp_local_sizes(self.sp_size)
1746
+ if ctx.dp_metadata
1747
+ else nullcontext()
1748
+ )
1749
+
1750
+ with sp_ctx:
1751
+ if do_naive_dispatch_combine:
1752
+ hidden_states_combined, router_logits = get_ep_group().dispatch(
1753
+ hidden_states, router_logits, self.is_sequence_parallel
1754
+ )
1755
+
1756
+ # Matrix multiply.
1757
+ final_hidden_states = self.quant_method.apply(
1758
+ layer=self,
1759
+ x=hidden_states_combined
1760
+ if do_naive_dispatch_combine
1761
+ else hidden_states,
1762
+ router_logits=router_logits,
1763
+ top_k=self.top_k,
1764
+ renormalize=self.renormalize,
1765
+ use_grouped_topk=self.use_grouped_topk,
1766
+ global_num_experts=self.global_num_experts,
1767
+ expert_map=self.expert_map
1768
+ if not self.rocm_aiter_fmoe_enabled
1769
+ else self.expert_mask,
1770
+ topk_group=self.topk_group,
1771
+ num_expert_group=self.num_expert_group,
1772
+ custom_routing_function=self.custom_routing_function,
1773
+ scoring_func=self.scoring_func,
1774
+ routed_scaling_factor=self.routed_scaling_factor,
1775
+ e_score_correction_bias=self.e_score_correction_bias,
1776
+ activation=self.activation,
1777
+ apply_router_weight_on_input=self.apply_router_weight_on_input,
1778
+ enable_eplb=self.enable_eplb,
1779
+ expert_load_view=self.expert_load_view,
1780
+ logical_to_physical_map=self.logical_to_physical_map,
1781
+ logical_replica_count=self.logical_replica_count,
1782
+ )
1783
+
1784
+ if has_separate_shared_experts:
1785
+ assert self.shared_experts is not None
1786
+
1787
+ if use_shared_experts_stream:
1788
+ # Run shared experts in parallel on a separate stream
1789
+ # NOTE: We start the separate stream here and mark the
1790
+ # sync end point immediately after it is done. This is
1791
+ # important to avoid excessive stream allocations by the cuda
1792
+ # graph replay later.
1793
+ with torch.cuda.stream(self.shared_experts_stream):
1794
+ # Note that hidden_states clone() is necessary here to avoid
1795
+ # conflict with the main stream
1796
+ shared_output = self.shared_experts(hidden_states_clone)
1797
+ current_stream().wait_stream(self.shared_experts_stream)
1798
+ else:
1799
+ shared_output = self.shared_experts(hidden_states)
1800
+
1801
+ final_hidden_states = (
1802
+ shared_output,
1803
+ final_hidden_states,
1804
+ )
1805
+ elif self.zero_expert_num is not None and self.zero_expert_num > 0:
1806
+ assert isinstance(final_hidden_states, tuple)
1807
+ final_hidden_states, zero_expert_result = final_hidden_states
1808
+
1809
+ def combine_output(states: torch.Tensor) -> torch.Tensor:
1810
+ if do_naive_dispatch_combine:
1811
+ states = get_ep_group().combine(states, self.is_sequence_parallel)
1812
+ return states
1813
+
1814
+ if self.shared_experts is not None:
1815
+ return (
1816
+ final_hidden_states[0],
1817
+ combine_output(final_hidden_states[1]),
1818
+ )
1819
+ elif self.zero_expert_num is not None and self.zero_expert_num > 0:
1820
+ assert isinstance(final_hidden_states, torch.Tensor)
1821
+ return (combine_output(final_hidden_states), zero_expert_result)
1822
+ else:
1823
+ return combine_output(final_hidden_states)
1824
+
1825
+ @classmethod
1826
+ def make_expert_params_mapping(
1827
+ cls,
1828
+ ckpt_gate_proj_name: str,
1829
+ ckpt_down_proj_name: str,
1830
+ ckpt_up_proj_name: str,
1831
+ num_experts: int,
1832
+ num_redundant_experts: int = 0,
1833
+ ) -> list[tuple[str, str, int, str]]:
1834
+ num_physical_experts = num_experts + num_redundant_experts
1835
+
1836
+ # In the returned mapping:
1837
+ # - `expert_id` is the physical expert id
1838
+ # - `weight_name` contains the weight name of the logical expert
1839
+ # So that we should map the expert id to logical in `weight_name`
1840
+ physical_to_logical_map = (
1841
+ EplbState.build_initial_global_physical_to_logical_map(
1842
+ num_experts, num_redundant_experts
1843
+ )
1844
+ )
1845
+
1846
+ return [
1847
+ # (param_name, weight_name, expert_id, shard_id)
1848
+ (
1849
+ "experts.w13_"
1850
+ if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name]
1851
+ else "experts.w2_",
1852
+ f"experts.{physical_to_logical_map[expert_id]}.{weight_name}.",
1853
+ expert_id,
1854
+ shard_id,
1855
+ )
1856
+ for expert_id in range(num_physical_experts)
1857
+ for shard_id, weight_name in [
1858
+ ("w1", ckpt_gate_proj_name),
1859
+ ("w2", ckpt_down_proj_name),
1860
+ ("w3", ckpt_up_proj_name),
1861
+ ]
1862
+ ]
1863
+
1864
+ def extra_repr(self) -> str:
1865
+ s = (
1866
+ f"global_num_experts={self.global_num_experts}, "
1867
+ f"local_num_experts={self.local_num_experts}, "
1868
+ f"top_k={self.top_k}, "
1869
+ f"intermediate_size_per_partition={self.intermediate_size_per_partition}, " # noqa: E501
1870
+ f"tp_size={self.tp_size},\n"
1871
+ f"ep_size={self.ep_size}, "
1872
+ f"reduce_results={self.reduce_results}, "
1873
+ f"renormalize={self.renormalize}, "
1874
+ f"use_grouped_topk={self.use_grouped_topk}"
1875
+ )
1876
+
1877
+ if self.use_grouped_topk:
1878
+ s += f", num_expert_group={self.num_expert_group}, topk_group={self.topk_group}" # noqa: E501
1879
+
1880
+ s += f", scoring_func='{self.scoring_func}', activation='{self.activation}'" # noqa: E501
1881
+
1882
+ return s
1883
+
1884
+
1885
+ def moe_forward(
1886
+ hidden_states: torch.Tensor,
1887
+ router_logits: torch.Tensor,
1888
+ layer_name: str,
1889
+ ) -> torch.Tensor:
1890
+ forward_context: ForwardContext = get_forward_context()
1891
+ self = forward_context.no_compile_layers[layer_name]
1892
+ assert self.shared_experts is None
1893
+ return self.forward_impl(hidden_states, router_logits)
1894
+
1895
+
1896
+ def moe_forward_fake(
1897
+ hidden_states: torch.Tensor,
1898
+ router_logits: torch.Tensor,
1899
+ layer_name: str,
1900
+ ) -> torch.Tensor:
1901
+ return torch.empty_like(hidden_states)
1902
+
1903
+
1904
+ direct_register_custom_op(
1905
+ op_name="moe_forward",
1906
+ op_func=moe_forward,
1907
+ mutates_args=["hidden_states"],
1908
+ fake_impl=moe_forward_fake,
1909
+ tags=(torch.Tag.needs_fixed_stride_order,),
1910
+ )
1911
+
1912
+
1913
+ def moe_forward_shared(
1914
+ hidden_states: torch.Tensor,
1915
+ router_logits: torch.Tensor,
1916
+ layer_name: str,
1917
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1918
+ forward_context: ForwardContext = get_forward_context()
1919
+ self = forward_context.no_compile_layers[layer_name]
1920
+ assert self.shared_experts is not None
1921
+ return self.forward_impl(hidden_states, router_logits)
1922
+
1923
+
1924
+ def moe_forward_shared_fake(
1925
+ hidden_states: torch.Tensor,
1926
+ router_logits: torch.Tensor,
1927
+ layer_name: str,
1928
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1929
+ shared_out = torch.empty_like(hidden_states)
1930
+ fused_out = torch.empty_like(hidden_states)
1931
+ return shared_out, fused_out
1932
+
1933
+
1934
+ direct_register_custom_op(
1935
+ op_name="moe_forward_shared",
1936
+ op_func=moe_forward_shared,
1937
+ mutates_args=["hidden_states"],
1938
+ fake_impl=moe_forward_shared_fake,
1939
+ tags=(torch.Tag.needs_fixed_stride_order,),
1940
+ )
1941
+
1942
+ # Mark the FusedMoE weight_loader as supporting MoE-specific parameters
1943
+ # to avoid expensive runtime reflection in model loading code
1944
+ FusedMoE.weight_loader.supports_moe_loading = True # type: ignore[attr-defined]