vllm-cpu-avx512bf16 0.14.0__cp313-cp313-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1712) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +1511 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +3206 -0
  6. vllm/_ipex_ops.py +445 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +62 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +0 -0
  14. vllm/attention/layer.py +913 -0
  15. vllm/attention/utils/__init__.py +0 -0
  16. vllm/attention/utils/kv_sharing_utils.py +33 -0
  17. vllm/attention/utils/kv_transfer_utils.py +60 -0
  18. vllm/beam_search.py +88 -0
  19. vllm/benchmarks/__init__.py +0 -0
  20. vllm/benchmarks/datasets.py +3277 -0
  21. vllm/benchmarks/latency.py +172 -0
  22. vllm/benchmarks/lib/__init__.py +3 -0
  23. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  24. vllm/benchmarks/lib/ready_checker.py +72 -0
  25. vllm/benchmarks/lib/utils.py +79 -0
  26. vllm/benchmarks/mm_processor.py +363 -0
  27. vllm/benchmarks/serve.py +1761 -0
  28. vllm/benchmarks/startup.py +321 -0
  29. vllm/benchmarks/sweep/__init__.py +0 -0
  30. vllm/benchmarks/sweep/cli.py +41 -0
  31. vllm/benchmarks/sweep/param_sweep.py +159 -0
  32. vllm/benchmarks/sweep/plot.py +675 -0
  33. vllm/benchmarks/sweep/plot_pareto.py +393 -0
  34. vllm/benchmarks/sweep/serve.py +450 -0
  35. vllm/benchmarks/sweep/serve_sla.py +459 -0
  36. vllm/benchmarks/sweep/server.py +114 -0
  37. vllm/benchmarks/sweep/sla_sweep.py +138 -0
  38. vllm/benchmarks/sweep/utils.py +4 -0
  39. vllm/benchmarks/throughput.py +946 -0
  40. vllm/collect_env.py +857 -0
  41. vllm/compilation/__init__.py +0 -0
  42. vllm/compilation/activation_quant_fusion.py +214 -0
  43. vllm/compilation/backends.py +840 -0
  44. vllm/compilation/base_static_graph.py +57 -0
  45. vllm/compilation/caching.py +196 -0
  46. vllm/compilation/collective_fusion.py +1224 -0
  47. vllm/compilation/compiler_interface.py +639 -0
  48. vllm/compilation/counter.py +50 -0
  49. vllm/compilation/cuda_graph.py +309 -0
  50. vllm/compilation/decorators.py +662 -0
  51. vllm/compilation/fix_functionalization.py +266 -0
  52. vllm/compilation/fusion.py +570 -0
  53. vllm/compilation/fusion_attn.py +363 -0
  54. vllm/compilation/fx_utils.py +92 -0
  55. vllm/compilation/inductor_pass.py +145 -0
  56. vllm/compilation/matcher_utils.py +454 -0
  57. vllm/compilation/monitor.py +62 -0
  58. vllm/compilation/noop_elimination.py +130 -0
  59. vllm/compilation/partition_rules.py +75 -0
  60. vllm/compilation/pass_manager.py +164 -0
  61. vllm/compilation/piecewise_backend.py +191 -0
  62. vllm/compilation/post_cleanup.py +21 -0
  63. vllm/compilation/qk_norm_rope_fusion.py +244 -0
  64. vllm/compilation/rocm_aiter_fusion.py +401 -0
  65. vllm/compilation/sequence_parallelism.py +368 -0
  66. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  67. vllm/compilation/vllm_inductor_pass.py +180 -0
  68. vllm/compilation/wrapper.py +329 -0
  69. vllm/config/__init__.py +112 -0
  70. vllm/config/attention.py +114 -0
  71. vllm/config/cache.py +233 -0
  72. vllm/config/compilation.py +1149 -0
  73. vllm/config/device.py +75 -0
  74. vllm/config/ec_transfer.py +110 -0
  75. vllm/config/kv_events.py +56 -0
  76. vllm/config/kv_transfer.py +119 -0
  77. vllm/config/load.py +124 -0
  78. vllm/config/lora.py +102 -0
  79. vllm/config/model.py +2026 -0
  80. vllm/config/model_arch.py +57 -0
  81. vllm/config/multimodal.py +247 -0
  82. vllm/config/observability.py +157 -0
  83. vllm/config/parallel.py +703 -0
  84. vllm/config/pooler.py +188 -0
  85. vllm/config/profiler.py +199 -0
  86. vllm/config/scheduler.py +298 -0
  87. vllm/config/speculative.py +656 -0
  88. vllm/config/speech_to_text.py +39 -0
  89. vllm/config/structured_outputs.py +78 -0
  90. vllm/config/utils.py +374 -0
  91. vllm/config/vllm.py +1487 -0
  92. vllm/connections.py +189 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +301 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +43 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +509 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +303 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +346 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +190 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  106. vllm/distributed/device_communicators/pynccl.py +386 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +567 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +778 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +697 -0
  113. vllm/distributed/device_communicators/symm_mem.py +156 -0
  114. vllm/distributed/device_communicators/xpu_communicator.py +98 -0
  115. vllm/distributed/ec_transfer/__init__.py +14 -0
  116. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  117. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  118. vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
  119. vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
  120. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  121. vllm/distributed/eplb/__init__.py +3 -0
  122. vllm/distributed/eplb/async_worker.py +115 -0
  123. vllm/distributed/eplb/eplb_state.py +1192 -0
  124. vllm/distributed/eplb/policy/__init__.py +19 -0
  125. vllm/distributed/eplb/policy/abstract.py +43 -0
  126. vllm/distributed/eplb/policy/default.py +376 -0
  127. vllm/distributed/eplb/rebalance_execute.py +699 -0
  128. vllm/distributed/kv_events.py +505 -0
  129. vllm/distributed/kv_transfer/README.md +29 -0
  130. vllm/distributed/kv_transfer/__init__.py +20 -0
  131. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  132. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  133. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  134. vllm/distributed/kv_transfer/kv_connector/factory.py +203 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +459 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +607 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +344 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  142. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +395 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +211 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1431 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +941 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +916 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/moriio/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py +321 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py +1515 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py +609 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +477 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2688 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +557 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  159. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  160. vllm/distributed/parallel_state.py +1809 -0
  161. vllm/distributed/utils.py +545 -0
  162. vllm/engine/__init__.py +0 -0
  163. vllm/engine/arg_utils.py +2137 -0
  164. vllm/engine/async_llm_engine.py +6 -0
  165. vllm/engine/llm_engine.py +6 -0
  166. vllm/engine/protocol.py +194 -0
  167. vllm/entrypoints/__init__.py +0 -0
  168. vllm/entrypoints/anthropic/__init__.py +0 -0
  169. vllm/entrypoints/anthropic/protocol.py +162 -0
  170. vllm/entrypoints/anthropic/serving_messages.py +468 -0
  171. vllm/entrypoints/api_server.py +186 -0
  172. vllm/entrypoints/chat_utils.py +1912 -0
  173. vllm/entrypoints/cli/__init__.py +19 -0
  174. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/base.py +25 -0
  176. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  177. vllm/entrypoints/cli/benchmark/main.py +57 -0
  178. vllm/entrypoints/cli/benchmark/mm_processor.py +21 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  180. vllm/entrypoints/cli/benchmark/startup.py +21 -0
  181. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  182. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  183. vllm/entrypoints/cli/collect_env.py +38 -0
  184. vllm/entrypoints/cli/main.py +79 -0
  185. vllm/entrypoints/cli/openai.py +260 -0
  186. vllm/entrypoints/cli/run_batch.py +68 -0
  187. vllm/entrypoints/cli/serve.py +253 -0
  188. vllm/entrypoints/cli/types.py +29 -0
  189. vllm/entrypoints/constants.py +12 -0
  190. vllm/entrypoints/context.py +898 -0
  191. vllm/entrypoints/grpc_server.py +531 -0
  192. vllm/entrypoints/launcher.py +175 -0
  193. vllm/entrypoints/llm.py +1807 -0
  194. vllm/entrypoints/logger.py +86 -0
  195. vllm/entrypoints/openai/__init__.py +0 -0
  196. vllm/entrypoints/openai/api_server.py +1390 -0
  197. vllm/entrypoints/openai/cli_args.py +320 -0
  198. vllm/entrypoints/openai/orca_metrics.py +120 -0
  199. vllm/entrypoints/openai/parser/__init__.py +0 -0
  200. vllm/entrypoints/openai/parser/harmony_utils.py +820 -0
  201. vllm/entrypoints/openai/parser/responses_parser.py +176 -0
  202. vllm/entrypoints/openai/protocol.py +2566 -0
  203. vllm/entrypoints/openai/run_batch.py +635 -0
  204. vllm/entrypoints/openai/serving_chat.py +1897 -0
  205. vllm/entrypoints/openai/serving_chat_stream_harmony.py +101 -0
  206. vllm/entrypoints/openai/serving_completion.py +740 -0
  207. vllm/entrypoints/openai/serving_engine.py +1612 -0
  208. vllm/entrypoints/openai/serving_models.py +309 -0
  209. vllm/entrypoints/openai/serving_responses.py +2552 -0
  210. vllm/entrypoints/openai/serving_transcription.py +168 -0
  211. vllm/entrypoints/openai/speech_to_text.py +711 -0
  212. vllm/entrypoints/openai/utils.py +49 -0
  213. vllm/entrypoints/pooling/__init__.py +16 -0
  214. vllm/entrypoints/pooling/classify/__init__.py +0 -0
  215. vllm/entrypoints/pooling/classify/api_router.py +48 -0
  216. vllm/entrypoints/pooling/classify/protocol.py +181 -0
  217. vllm/entrypoints/pooling/classify/serving.py +233 -0
  218. vllm/entrypoints/pooling/embed/__init__.py +0 -0
  219. vllm/entrypoints/pooling/embed/api_router.py +65 -0
  220. vllm/entrypoints/pooling/embed/conftest.py +28 -0
  221. vllm/entrypoints/pooling/embed/protocol.py +217 -0
  222. vllm/entrypoints/pooling/embed/serving.py +684 -0
  223. vllm/entrypoints/pooling/pooling/__init__.py +0 -0
  224. vllm/entrypoints/pooling/pooling/api_router.py +62 -0
  225. vllm/entrypoints/pooling/pooling/protocol.py +146 -0
  226. vllm/entrypoints/pooling/pooling/serving.py +354 -0
  227. vllm/entrypoints/pooling/score/__init__.py +0 -0
  228. vllm/entrypoints/pooling/score/api_router.py +147 -0
  229. vllm/entrypoints/pooling/score/protocol.py +146 -0
  230. vllm/entrypoints/pooling/score/serving.py +511 -0
  231. vllm/entrypoints/renderer.py +411 -0
  232. vllm/entrypoints/responses_utils.py +218 -0
  233. vllm/entrypoints/sagemaker/__init__.py +4 -0
  234. vllm/entrypoints/sagemaker/routes.py +118 -0
  235. vllm/entrypoints/score_utils.py +271 -0
  236. vllm/entrypoints/serve/__init__.py +94 -0
  237. vllm/entrypoints/serve/cache/__init__.py +0 -0
  238. vllm/entrypoints/serve/cache/api_router.py +61 -0
  239. vllm/entrypoints/serve/disagg/__init__.py +0 -0
  240. vllm/entrypoints/serve/disagg/api_router.py +109 -0
  241. vllm/entrypoints/serve/disagg/protocol.py +90 -0
  242. vllm/entrypoints/serve/disagg/serving.py +285 -0
  243. vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
  244. vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
  245. vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
  246. vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
  247. vllm/entrypoints/serve/instrumentator/health.py +33 -0
  248. vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
  249. vllm/entrypoints/serve/instrumentator/offline_docs.py +50 -0
  250. vllm/entrypoints/serve/instrumentator/server_info.py +56 -0
  251. vllm/entrypoints/serve/instrumentator/static/swagger-ui-bundle.js +2 -0
  252. vllm/entrypoints/serve/instrumentator/static/swagger-ui.css +3 -0
  253. vllm/entrypoints/serve/lora/__init__.py +0 -0
  254. vllm/entrypoints/serve/lora/api_router.py +70 -0
  255. vllm/entrypoints/serve/profile/__init__.py +0 -0
  256. vllm/entrypoints/serve/profile/api_router.py +46 -0
  257. vllm/entrypoints/serve/rlhf/__init__.py +0 -0
  258. vllm/entrypoints/serve/rlhf/api_router.py +102 -0
  259. vllm/entrypoints/serve/rpc/__init__.py +0 -0
  260. vllm/entrypoints/serve/rpc/api_router.py +61 -0
  261. vllm/entrypoints/serve/sleep/__init__.py +0 -0
  262. vllm/entrypoints/serve/sleep/api_router.py +56 -0
  263. vllm/entrypoints/serve/tokenize/__init__.py +0 -0
  264. vllm/entrypoints/serve/tokenize/api_router.py +112 -0
  265. vllm/entrypoints/serve/tokenize/serving.py +204 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +187 -0
  268. vllm/entrypoints/tool_server.py +234 -0
  269. vllm/entrypoints/utils.py +336 -0
  270. vllm/env_override.py +402 -0
  271. vllm/envs.py +1791 -0
  272. vllm/exceptions.py +36 -0
  273. vllm/forward_context.py +375 -0
  274. vllm/grpc/__init__.py +17 -0
  275. vllm/grpc/compile_protos.py +94 -0
  276. vllm/grpc/vllm_engine.proto +195 -0
  277. vllm/grpc/vllm_engine_pb2.py +77 -0
  278. vllm/grpc/vllm_engine_pb2.pyi +213 -0
  279. vllm/grpc/vllm_engine_pb2_grpc.py +330 -0
  280. vllm/inputs/__init__.py +44 -0
  281. vllm/inputs/data.py +359 -0
  282. vllm/inputs/parse.py +147 -0
  283. vllm/inputs/preprocess.py +716 -0
  284. vllm/logger.py +303 -0
  285. vllm/logging_utils/__init__.py +13 -0
  286. vllm/logging_utils/dump_input.py +83 -0
  287. vllm/logging_utils/formatter.py +127 -0
  288. vllm/logging_utils/lazy.py +20 -0
  289. vllm/logging_utils/log_time.py +34 -0
  290. vllm/logits_process.py +121 -0
  291. vllm/logprobs.py +206 -0
  292. vllm/lora/__init__.py +0 -0
  293. vllm/lora/layers/__init__.py +43 -0
  294. vllm/lora/layers/base.py +66 -0
  295. vllm/lora/layers/base_linear.py +172 -0
  296. vllm/lora/layers/column_parallel_linear.py +577 -0
  297. vllm/lora/layers/fused_moe.py +739 -0
  298. vllm/lora/layers/logits_processor.py +203 -0
  299. vllm/lora/layers/replicated_linear.py +70 -0
  300. vllm/lora/layers/row_parallel_linear.py +176 -0
  301. vllm/lora/layers/utils.py +115 -0
  302. vllm/lora/layers/vocal_parallel_embedding.py +140 -0
  303. vllm/lora/lora_model.py +221 -0
  304. vllm/lora/lora_weights.py +227 -0
  305. vllm/lora/model_manager.py +858 -0
  306. vllm/lora/ops/__init__.py +0 -0
  307. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  308. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  309. vllm/lora/ops/torch_ops/__init__.py +20 -0
  310. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  311. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  312. vllm/lora/ops/triton_ops/__init__.py +21 -0
  313. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +677 -0
  314. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  315. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  316. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  317. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  318. vllm/lora/ops/triton_ops/utils.py +313 -0
  319. vllm/lora/peft_helper.py +128 -0
  320. vllm/lora/punica_wrapper/__init__.py +10 -0
  321. vllm/lora/punica_wrapper/punica_base.py +493 -0
  322. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  323. vllm/lora/punica_wrapper/punica_gpu.py +413 -0
  324. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  325. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  326. vllm/lora/punica_wrapper/utils.py +150 -0
  327. vllm/lora/request.py +60 -0
  328. vllm/lora/resolver.py +88 -0
  329. vllm/lora/utils.py +281 -0
  330. vllm/lora/worker_manager.py +278 -0
  331. vllm/model_executor/__init__.py +9 -0
  332. vllm/model_executor/custom_op.py +203 -0
  333. vllm/model_executor/layers/__init__.py +0 -0
  334. vllm/model_executor/layers/activation.py +628 -0
  335. vllm/model_executor/layers/attention/__init__.py +0 -0
  336. vllm/model_executor/layers/attention/chunked_local_attention.py +130 -0
  337. vllm/model_executor/layers/attention/cross_attention.py +182 -0
  338. vllm/model_executor/layers/attention/encoder_only_attention.py +103 -0
  339. vllm/model_executor/layers/attention/mm_encoder_attention.py +234 -0
  340. vllm/model_executor/layers/attention/static_sink_attention.py +254 -0
  341. vllm/model_executor/layers/attention_layer_base.py +34 -0
  342. vllm/model_executor/layers/batch_invariant.py +1063 -0
  343. vllm/model_executor/layers/conv.py +262 -0
  344. vllm/model_executor/layers/fla/__init__.py +8 -0
  345. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  346. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  347. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  348. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  349. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  350. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  351. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  352. vllm/model_executor/layers/fla/ops/index.py +41 -0
  353. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  354. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  355. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  356. vllm/model_executor/layers/fla/ops/op.py +60 -0
  357. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  358. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  359. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  360. vllm/model_executor/layers/fused_moe/__init__.py +120 -0
  361. vllm/model_executor/layers/fused_moe/all2all_utils.py +173 -0
  362. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +411 -0
  363. vllm/model_executor/layers/fused_moe/config.py +1111 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200.json +147 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=129,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=160,N=768,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=64,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  625. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  626. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  627. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  628. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  629. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  630. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  631. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  632. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  633. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  634. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  635. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  636. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  637. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  638. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  639. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  640. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  641. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  642. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  643. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  644. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  645. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  646. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  647. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  648. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  649. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  650. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  651. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +444 -0
  652. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1086 -0
  653. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +364 -0
  654. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
  655. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  656. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +436 -0
  657. vllm/model_executor/layers/fused_moe/fallback.py +127 -0
  658. vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +338 -0
  659. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +310 -0
  660. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +371 -0
  661. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  662. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1018 -0
  663. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +824 -0
  664. vllm/model_executor/layers/fused_moe/fused_moe.py +2638 -0
  665. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +119 -0
  666. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +117 -0
  667. vllm/model_executor/layers/fused_moe/fused_moe_router.py +40 -0
  668. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +531 -0
  669. vllm/model_executor/layers/fused_moe/layer.py +2169 -0
  670. vllm/model_executor/layers/fused_moe/modular_kernel.py +1251 -0
  671. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
  672. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  673. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  674. vllm/model_executor/layers/fused_moe/oracle/__init__.py +2 -0
  675. vllm/model_executor/layers/fused_moe/oracle/fp8.py +358 -0
  676. vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +280 -0
  677. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  678. vllm/model_executor/layers/fused_moe/prepare_finalize.py +87 -0
  679. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +347 -0
  680. vllm/model_executor/layers/fused_moe/routed_experts_capturer.py +324 -0
  681. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  682. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
  683. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  684. vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py +78 -0
  685. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +75 -0
  686. vllm/model_executor/layers/fused_moe/trtllm_moe.py +144 -0
  687. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +403 -0
  688. vllm/model_executor/layers/fused_moe/utils.py +382 -0
  689. vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py +189 -0
  690. vllm/model_executor/layers/kda.py +442 -0
  691. vllm/model_executor/layers/layernorm.py +451 -0
  692. vllm/model_executor/layers/lightning_attn.py +735 -0
  693. vllm/model_executor/layers/linear.py +1478 -0
  694. vllm/model_executor/layers/logits_processor.py +109 -0
  695. vllm/model_executor/layers/mamba/__init__.py +0 -0
  696. vllm/model_executor/layers/mamba/abstract.py +68 -0
  697. vllm/model_executor/layers/mamba/linear_attn.py +410 -0
  698. vllm/model_executor/layers/mamba/mamba_mixer.py +541 -0
  699. vllm/model_executor/layers/mamba/mamba_mixer2.py +936 -0
  700. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  701. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  702. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  703. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  704. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
  705. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  706. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  707. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  708. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  709. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  710. vllm/model_executor/layers/mamba/short_conv.py +254 -0
  711. vllm/model_executor/layers/mla.py +179 -0
  712. vllm/model_executor/layers/pooler/__init__.py +5 -0
  713. vllm/model_executor/layers/pooler/abstract.py +39 -0
  714. vllm/model_executor/layers/pooler/activations.py +162 -0
  715. vllm/model_executor/layers/pooler/common.py +32 -0
  716. vllm/model_executor/layers/pooler/seqwise/__init__.py +45 -0
  717. vllm/model_executor/layers/pooler/seqwise/heads.py +151 -0
  718. vllm/model_executor/layers/pooler/seqwise/methods.py +93 -0
  719. vllm/model_executor/layers/pooler/seqwise/poolers.py +127 -0
  720. vllm/model_executor/layers/pooler/special.py +128 -0
  721. vllm/model_executor/layers/pooler/tokwise/__init__.py +39 -0
  722. vllm/model_executor/layers/pooler/tokwise/heads.py +133 -0
  723. vllm/model_executor/layers/pooler/tokwise/methods.py +122 -0
  724. vllm/model_executor/layers/pooler/tokwise/poolers.py +127 -0
  725. vllm/model_executor/layers/quantization/__init__.py +195 -0
  726. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  727. vllm/model_executor/layers/quantization/awq.py +277 -0
  728. vllm/model_executor/layers/quantization/awq_marlin.py +795 -0
  729. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  730. vllm/model_executor/layers/quantization/base_config.py +170 -0
  731. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  732. vllm/model_executor/layers/quantization/bitsandbytes.py +631 -0
  733. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  734. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +982 -0
  735. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2368 -0
  736. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +37 -0
  737. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  738. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  739. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  740. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py +106 -0
  741. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  742. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  743. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
  744. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  745. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  746. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +203 -0
  747. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  748. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
  749. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  750. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  751. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  752. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  753. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  754. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  755. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  756. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  757. vllm/model_executor/layers/quantization/cpu_wna16.py +299 -0
  758. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  759. vllm/model_executor/layers/quantization/experts_int8.py +209 -0
  760. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  761. vllm/model_executor/layers/quantization/fp8.py +1224 -0
  762. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  763. vllm/model_executor/layers/quantization/gguf.py +682 -0
  764. vllm/model_executor/layers/quantization/gptq.py +393 -0
  765. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  766. vllm/model_executor/layers/quantization/gptq_marlin.py +934 -0
  767. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  768. vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
  769. vllm/model_executor/layers/quantization/inc.py +65 -0
  770. vllm/model_executor/layers/quantization/input_quant_fp8.py +212 -0
  771. vllm/model_executor/layers/quantization/ipex_quant.py +403 -0
  772. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  773. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  774. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +113 -0
  775. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  776. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  777. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  778. vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py +126 -0
  779. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
  780. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  781. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +168 -0
  782. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  783. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
  784. vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
  785. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
  786. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +77 -0
  787. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
  788. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
  789. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
  790. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +88 -0
  791. vllm/model_executor/layers/quantization/kv_cache.py +153 -0
  792. vllm/model_executor/layers/quantization/modelopt.py +1665 -0
  793. vllm/model_executor/layers/quantization/moe_wna16.py +518 -0
  794. vllm/model_executor/layers/quantization/mxfp4.py +1145 -0
  795. vllm/model_executor/layers/quantization/petit.py +319 -0
  796. vllm/model_executor/layers/quantization/ptpc_fp8.py +140 -0
  797. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  798. vllm/model_executor/layers/quantization/quark/quark.py +570 -0
  799. vllm/model_executor/layers/quantization/quark/quark_moe.py +797 -0
  800. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  801. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
  802. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  803. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  804. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  805. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  806. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  807. vllm/model_executor/layers/quantization/rtn.py +626 -0
  808. vllm/model_executor/layers/quantization/schema.py +90 -0
  809. vllm/model_executor/layers/quantization/torchao.py +380 -0
  810. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  811. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  812. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  976. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  977. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  978. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  979. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  980. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  981. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  982. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  983. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  984. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  985. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  986. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  987. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  988. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  989. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  990. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  991. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  992. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  993. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  994. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  995. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  996. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  997. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  998. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  999. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1000. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1001. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1002. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1003. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1004. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1005. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1006. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1007. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1008. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1009. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1010. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1011. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1012. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1013. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1014. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1015. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1016. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1017. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1018. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1019. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1020. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1021. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1022. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1023. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1024. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1025. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1026. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1027. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  1028. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +514 -0
  1029. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +370 -0
  1030. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1658 -0
  1031. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  1032. vllm/model_executor/layers/quantization/utils/int8_utils.py +477 -0
  1033. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  1034. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  1035. vllm/model_executor/layers/quantization/utils/marlin_utils.py +720 -0
  1036. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +565 -0
  1037. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
  1038. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
  1039. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  1040. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
  1041. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  1042. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  1043. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  1044. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
  1045. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  1046. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  1047. vllm/model_executor/layers/quantization/utils/quant_utils.py +767 -0
  1048. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
  1049. vllm/model_executor/layers/resampler.py +283 -0
  1050. vllm/model_executor/layers/rotary_embedding/__init__.py +291 -0
  1051. vllm/model_executor/layers/rotary_embedding/base.py +282 -0
  1052. vllm/model_executor/layers/rotary_embedding/common.py +289 -0
  1053. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +184 -0
  1054. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +218 -0
  1055. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1056. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1057. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
  1058. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1059. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1060. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +83 -0
  1061. vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
  1062. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1063. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1064. vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
  1065. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
  1066. vllm/model_executor/layers/utils.py +251 -0
  1067. vllm/model_executor/layers/vocab_parallel_embedding.py +564 -0
  1068. vllm/model_executor/model_loader/__init__.py +150 -0
  1069. vllm/model_executor/model_loader/base_loader.py +71 -0
  1070. vllm/model_executor/model_loader/bitsandbytes_loader.py +821 -0
  1071. vllm/model_executor/model_loader/default_loader.py +304 -0
  1072. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1073. vllm/model_executor/model_loader/gguf_loader.py +371 -0
  1074. vllm/model_executor/model_loader/online_quantization.py +275 -0
  1075. vllm/model_executor/model_loader/runai_streamer_loader.py +115 -0
  1076. vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
  1077. vllm/model_executor/model_loader/tensorizer.py +793 -0
  1078. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1079. vllm/model_executor/model_loader/utils.py +299 -0
  1080. vllm/model_executor/model_loader/weight_utils.py +1183 -0
  1081. vllm/model_executor/models/__init__.py +44 -0
  1082. vllm/model_executor/models/adapters.py +592 -0
  1083. vllm/model_executor/models/afmoe.py +697 -0
  1084. vllm/model_executor/models/aimv2.py +248 -0
  1085. vllm/model_executor/models/apertus.py +567 -0
  1086. vllm/model_executor/models/arcee.py +428 -0
  1087. vllm/model_executor/models/arctic.py +633 -0
  1088. vllm/model_executor/models/aria.py +663 -0
  1089. vllm/model_executor/models/audioflamingo3.py +639 -0
  1090. vllm/model_executor/models/aya_vision.py +448 -0
  1091. vllm/model_executor/models/bagel.py +591 -0
  1092. vllm/model_executor/models/baichuan.py +493 -0
  1093. vllm/model_executor/models/bailing_moe.py +643 -0
  1094. vllm/model_executor/models/bamba.py +511 -0
  1095. vllm/model_executor/models/bee.py +157 -0
  1096. vllm/model_executor/models/bert.py +911 -0
  1097. vllm/model_executor/models/bert_with_rope.py +729 -0
  1098. vllm/model_executor/models/blip.py +350 -0
  1099. vllm/model_executor/models/blip2.py +736 -0
  1100. vllm/model_executor/models/bloom.py +390 -0
  1101. vllm/model_executor/models/chameleon.py +1095 -0
  1102. vllm/model_executor/models/chatglm.py +502 -0
  1103. vllm/model_executor/models/clip.py +1045 -0
  1104. vllm/model_executor/models/cohere2_vision.py +470 -0
  1105. vllm/model_executor/models/commandr.py +469 -0
  1106. vllm/model_executor/models/config.py +571 -0
  1107. vllm/model_executor/models/dbrx.py +484 -0
  1108. vllm/model_executor/models/deepencoder.py +679 -0
  1109. vllm/model_executor/models/deepseek_eagle.py +253 -0
  1110. vllm/model_executor/models/deepseek_mtp.py +447 -0
  1111. vllm/model_executor/models/deepseek_ocr.py +601 -0
  1112. vllm/model_executor/models/deepseek_v2.py +1727 -0
  1113. vllm/model_executor/models/deepseek_vl2.py +642 -0
  1114. vllm/model_executor/models/dots1.py +566 -0
  1115. vllm/model_executor/models/dots_ocr.py +830 -0
  1116. vllm/model_executor/models/ernie45.py +53 -0
  1117. vllm/model_executor/models/ernie45_moe.py +755 -0
  1118. vllm/model_executor/models/ernie45_vl.py +1702 -0
  1119. vllm/model_executor/models/ernie45_vl_moe.py +801 -0
  1120. vllm/model_executor/models/ernie_mtp.py +278 -0
  1121. vllm/model_executor/models/exaone.py +524 -0
  1122. vllm/model_executor/models/exaone4.py +518 -0
  1123. vllm/model_executor/models/exaone_moe.py +579 -0
  1124. vllm/model_executor/models/exaone_moe_mtp.py +255 -0
  1125. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1126. vllm/model_executor/models/falcon.py +543 -0
  1127. vllm/model_executor/models/falcon_h1.py +675 -0
  1128. vllm/model_executor/models/flex_olmo.py +155 -0
  1129. vllm/model_executor/models/fuyu.py +371 -0
  1130. vllm/model_executor/models/gemma.py +425 -0
  1131. vllm/model_executor/models/gemma2.py +435 -0
  1132. vllm/model_executor/models/gemma3.py +520 -0
  1133. vllm/model_executor/models/gemma3_mm.py +664 -0
  1134. vllm/model_executor/models/gemma3n.py +1166 -0
  1135. vllm/model_executor/models/gemma3n_audio_utils.py +57 -0
  1136. vllm/model_executor/models/gemma3n_mm.py +820 -0
  1137. vllm/model_executor/models/glm.py +24 -0
  1138. vllm/model_executor/models/glm4.py +295 -0
  1139. vllm/model_executor/models/glm4_1v.py +1823 -0
  1140. vllm/model_executor/models/glm4_moe.py +725 -0
  1141. vllm/model_executor/models/glm4_moe_mtp.py +365 -0
  1142. vllm/model_executor/models/glm4v.py +783 -0
  1143. vllm/model_executor/models/glmasr.py +1154 -0
  1144. vllm/model_executor/models/glmasr_utils.py +188 -0
  1145. vllm/model_executor/models/gpt2.py +385 -0
  1146. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1147. vllm/model_executor/models/gpt_j.py +346 -0
  1148. vllm/model_executor/models/gpt_neox.py +340 -0
  1149. vllm/model_executor/models/gpt_oss.py +745 -0
  1150. vllm/model_executor/models/granite.py +475 -0
  1151. vllm/model_executor/models/granite_speech.py +919 -0
  1152. vllm/model_executor/models/granitemoe.py +561 -0
  1153. vllm/model_executor/models/granitemoehybrid.py +703 -0
  1154. vllm/model_executor/models/granitemoeshared.py +328 -0
  1155. vllm/model_executor/models/gritlm.py +242 -0
  1156. vllm/model_executor/models/grok1.py +803 -0
  1157. vllm/model_executor/models/h2ovl.py +554 -0
  1158. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1159. vllm/model_executor/models/hunyuan_vision.py +1034 -0
  1160. vllm/model_executor/models/hyperclovax_vision.py +1163 -0
  1161. vllm/model_executor/models/idefics2_vision_model.py +427 -0
  1162. vllm/model_executor/models/idefics3.py +734 -0
  1163. vllm/model_executor/models/interfaces.py +1180 -0
  1164. vllm/model_executor/models/interfaces_base.py +252 -0
  1165. vllm/model_executor/models/intern_vit.py +454 -0
  1166. vllm/model_executor/models/internlm2.py +451 -0
  1167. vllm/model_executor/models/internlm2_ve.py +139 -0
  1168. vllm/model_executor/models/interns1.py +828 -0
  1169. vllm/model_executor/models/interns1_vit.py +433 -0
  1170. vllm/model_executor/models/internvl.py +1436 -0
  1171. vllm/model_executor/models/iquest_loopcoder.py +595 -0
  1172. vllm/model_executor/models/isaac.py +1503 -0
  1173. vllm/model_executor/models/jais.py +397 -0
  1174. vllm/model_executor/models/jais2.py +508 -0
  1175. vllm/model_executor/models/jamba.py +599 -0
  1176. vllm/model_executor/models/jina_vl.py +145 -0
  1177. vllm/model_executor/models/kanana_v.py +756 -0
  1178. vllm/model_executor/models/keye.py +1709 -0
  1179. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1180. vllm/model_executor/models/kimi_linear.py +659 -0
  1181. vllm/model_executor/models/kimi_vl.py +577 -0
  1182. vllm/model_executor/models/lfm2.py +515 -0
  1183. vllm/model_executor/models/lfm2_moe.py +746 -0
  1184. vllm/model_executor/models/lfm2_vl.py +732 -0
  1185. vllm/model_executor/models/lightonocr.py +197 -0
  1186. vllm/model_executor/models/llama.py +724 -0
  1187. vllm/model_executor/models/llama4.py +860 -0
  1188. vllm/model_executor/models/llama4_eagle.py +225 -0
  1189. vllm/model_executor/models/llama_eagle.py +213 -0
  1190. vllm/model_executor/models/llama_eagle3.py +375 -0
  1191. vllm/model_executor/models/llava.py +879 -0
  1192. vllm/model_executor/models/llava_next.py +583 -0
  1193. vllm/model_executor/models/llava_next_video.py +467 -0
  1194. vllm/model_executor/models/llava_onevision.py +922 -0
  1195. vllm/model_executor/models/longcat_flash.py +767 -0
  1196. vllm/model_executor/models/longcat_flash_mtp.py +348 -0
  1197. vllm/model_executor/models/mamba.py +276 -0
  1198. vllm/model_executor/models/mamba2.py +288 -0
  1199. vllm/model_executor/models/medusa.py +179 -0
  1200. vllm/model_executor/models/midashenglm.py +826 -0
  1201. vllm/model_executor/models/mimo.py +188 -0
  1202. vllm/model_executor/models/mimo_mtp.py +294 -0
  1203. vllm/model_executor/models/mimo_v2_flash.py +718 -0
  1204. vllm/model_executor/models/minicpm.py +660 -0
  1205. vllm/model_executor/models/minicpm3.py +233 -0
  1206. vllm/model_executor/models/minicpm_eagle.py +386 -0
  1207. vllm/model_executor/models/minicpmo.py +768 -0
  1208. vllm/model_executor/models/minicpmv.py +1742 -0
  1209. vllm/model_executor/models/minimax_m2.py +552 -0
  1210. vllm/model_executor/models/minimax_text_01.py +1008 -0
  1211. vllm/model_executor/models/minimax_vl_01.py +395 -0
  1212. vllm/model_executor/models/mistral3.py +638 -0
  1213. vllm/model_executor/models/mistral_large_3.py +63 -0
  1214. vllm/model_executor/models/mistral_large_3_eagle.py +137 -0
  1215. vllm/model_executor/models/mixtral.py +599 -0
  1216. vllm/model_executor/models/mllama4.py +1170 -0
  1217. vllm/model_executor/models/mlp_speculator.py +235 -0
  1218. vllm/model_executor/models/modernbert.py +458 -0
  1219. vllm/model_executor/models/module_mapping.py +74 -0
  1220. vllm/model_executor/models/molmo.py +1592 -0
  1221. vllm/model_executor/models/moonvit.py +601 -0
  1222. vllm/model_executor/models/mpt.py +335 -0
  1223. vllm/model_executor/models/nano_nemotron_vl.py +1725 -0
  1224. vllm/model_executor/models/nemotron.py +499 -0
  1225. vllm/model_executor/models/nemotron_h.py +902 -0
  1226. vllm/model_executor/models/nemotron_nas.py +474 -0
  1227. vllm/model_executor/models/nemotron_parse.py +958 -0
  1228. vllm/model_executor/models/nemotron_vl.py +651 -0
  1229. vllm/model_executor/models/nvlm_d.py +216 -0
  1230. vllm/model_executor/models/olmo.py +412 -0
  1231. vllm/model_executor/models/olmo2.py +454 -0
  1232. vllm/model_executor/models/olmoe.py +498 -0
  1233. vllm/model_executor/models/opencua.py +262 -0
  1234. vllm/model_executor/models/openpangu.py +1378 -0
  1235. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1236. vllm/model_executor/models/opt.py +426 -0
  1237. vllm/model_executor/models/orion.py +365 -0
  1238. vllm/model_executor/models/ouro.py +507 -0
  1239. vllm/model_executor/models/ovis.py +557 -0
  1240. vllm/model_executor/models/ovis2_5.py +661 -0
  1241. vllm/model_executor/models/paddleocr_vl.py +1261 -0
  1242. vllm/model_executor/models/paligemma.py +429 -0
  1243. vllm/model_executor/models/persimmon.py +373 -0
  1244. vllm/model_executor/models/phi.py +363 -0
  1245. vllm/model_executor/models/phi3.py +18 -0
  1246. vllm/model_executor/models/phi3v.py +729 -0
  1247. vllm/model_executor/models/phi4mm.py +1250 -0
  1248. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1249. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1250. vllm/model_executor/models/phimoe.py +671 -0
  1251. vllm/model_executor/models/pixtral.py +1437 -0
  1252. vllm/model_executor/models/plamo2.py +993 -0
  1253. vllm/model_executor/models/plamo3.py +437 -0
  1254. vllm/model_executor/models/qwen.py +377 -0
  1255. vllm/model_executor/models/qwen2.py +600 -0
  1256. vllm/model_executor/models/qwen2_5_omni_thinker.py +1200 -0
  1257. vllm/model_executor/models/qwen2_5_vl.py +1598 -0
  1258. vllm/model_executor/models/qwen2_audio.py +478 -0
  1259. vllm/model_executor/models/qwen2_moe.py +604 -0
  1260. vllm/model_executor/models/qwen2_rm.py +120 -0
  1261. vllm/model_executor/models/qwen2_vl.py +1588 -0
  1262. vllm/model_executor/models/qwen3.py +331 -0
  1263. vllm/model_executor/models/qwen3_moe.py +752 -0
  1264. vllm/model_executor/models/qwen3_next.py +1410 -0
  1265. vllm/model_executor/models/qwen3_next_mtp.py +293 -0
  1266. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1814 -0
  1267. vllm/model_executor/models/qwen3_vl.py +2120 -0
  1268. vllm/model_executor/models/qwen3_vl_moe.py +474 -0
  1269. vllm/model_executor/models/qwen_vl.py +821 -0
  1270. vllm/model_executor/models/radio.py +573 -0
  1271. vllm/model_executor/models/registry.py +1218 -0
  1272. vllm/model_executor/models/roberta.py +239 -0
  1273. vllm/model_executor/models/rvl.py +107 -0
  1274. vllm/model_executor/models/seed_oss.py +492 -0
  1275. vllm/model_executor/models/siglip.py +1259 -0
  1276. vllm/model_executor/models/siglip2.py +495 -0
  1277. vllm/model_executor/models/siglip2navit.py +660 -0
  1278. vllm/model_executor/models/skyworkr1v.py +951 -0
  1279. vllm/model_executor/models/smolvlm.py +38 -0
  1280. vllm/model_executor/models/solar.py +484 -0
  1281. vllm/model_executor/models/stablelm.py +354 -0
  1282. vllm/model_executor/models/starcoder2.py +365 -0
  1283. vllm/model_executor/models/step3_text.py +554 -0
  1284. vllm/model_executor/models/step3_vl.py +1147 -0
  1285. vllm/model_executor/models/swin.py +500 -0
  1286. vllm/model_executor/models/tarsier.py +624 -0
  1287. vllm/model_executor/models/telechat2.py +153 -0
  1288. vllm/model_executor/models/teleflm.py +78 -0
  1289. vllm/model_executor/models/terratorch.py +318 -0
  1290. vllm/model_executor/models/transformers/__init__.py +127 -0
  1291. vllm/model_executor/models/transformers/base.py +523 -0
  1292. vllm/model_executor/models/transformers/causal.py +65 -0
  1293. vllm/model_executor/models/transformers/legacy.py +90 -0
  1294. vllm/model_executor/models/transformers/moe.py +329 -0
  1295. vllm/model_executor/models/transformers/multimodal.py +441 -0
  1296. vllm/model_executor/models/transformers/pooling.py +102 -0
  1297. vllm/model_executor/models/transformers/utils.py +253 -0
  1298. vllm/model_executor/models/ultravox.py +786 -0
  1299. vllm/model_executor/models/utils.py +832 -0
  1300. vllm/model_executor/models/vision.py +546 -0
  1301. vllm/model_executor/models/voxtral.py +867 -0
  1302. vllm/model_executor/models/voxtral_streaming.py +304 -0
  1303. vllm/model_executor/models/whisper.py +993 -0
  1304. vllm/model_executor/models/whisper_utils.py +299 -0
  1305. vllm/model_executor/models/zamba2.py +986 -0
  1306. vllm/model_executor/parameter.py +642 -0
  1307. vllm/model_executor/utils.py +113 -0
  1308. vllm/model_executor/warmup/__init__.py +0 -0
  1309. vllm/model_executor/warmup/deep_gemm_warmup.py +371 -0
  1310. vllm/model_executor/warmup/kernel_warmup.py +97 -0
  1311. vllm/model_inspection.py +136 -0
  1312. vllm/multimodal/__init__.py +38 -0
  1313. vllm/multimodal/audio.py +287 -0
  1314. vllm/multimodal/base.py +60 -0
  1315. vllm/multimodal/cache.py +829 -0
  1316. vllm/multimodal/evs.py +294 -0
  1317. vllm/multimodal/hasher.py +123 -0
  1318. vllm/multimodal/image.py +155 -0
  1319. vllm/multimodal/inputs.py +1027 -0
  1320. vllm/multimodal/parse.py +674 -0
  1321. vllm/multimodal/processing.py +2469 -0
  1322. vllm/multimodal/profiling.py +351 -0
  1323. vllm/multimodal/registry.py +375 -0
  1324. vllm/multimodal/utils.py +550 -0
  1325. vllm/multimodal/video.py +512 -0
  1326. vllm/outputs.py +347 -0
  1327. vllm/platforms/__init__.py +277 -0
  1328. vllm/platforms/cpu.py +423 -0
  1329. vllm/platforms/cuda.py +618 -0
  1330. vllm/platforms/interface.py +707 -0
  1331. vllm/platforms/rocm.py +586 -0
  1332. vllm/platforms/tpu.py +20 -0
  1333. vllm/platforms/xpu.py +262 -0
  1334. vllm/plugins/__init__.py +81 -0
  1335. vllm/plugins/io_processors/__init__.py +68 -0
  1336. vllm/plugins/io_processors/interface.py +77 -0
  1337. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1338. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1339. vllm/pooling_params.py +229 -0
  1340. vllm/profiler/__init__.py +0 -0
  1341. vllm/profiler/layerwise_profile.py +392 -0
  1342. vllm/profiler/utils.py +151 -0
  1343. vllm/profiler/wrapper.py +241 -0
  1344. vllm/py.typed +2 -0
  1345. vllm/ray/__init__.py +0 -0
  1346. vllm/ray/lazy_utils.py +30 -0
  1347. vllm/ray/ray_env.py +79 -0
  1348. vllm/reasoning/__init__.py +96 -0
  1349. vllm/reasoning/abs_reasoning_parsers.py +318 -0
  1350. vllm/reasoning/basic_parsers.py +175 -0
  1351. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1352. vllm/reasoning/deepseek_v3_reasoning_parser.py +69 -0
  1353. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1354. vllm/reasoning/glm4_moe_reasoning_parser.py +13 -0
  1355. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1356. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1357. vllm/reasoning/holo2_reasoning_parser.py +89 -0
  1358. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1359. vllm/reasoning/identity_reasoning_parser.py +63 -0
  1360. vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
  1361. vllm/reasoning/mistral_reasoning_parser.py +154 -0
  1362. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1363. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1364. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1365. vllm/reasoning/step3_reasoning_parser.py +113 -0
  1366. vllm/sampling_params.py +629 -0
  1367. vllm/scalar_type.py +355 -0
  1368. vllm/scripts.py +17 -0
  1369. vllm/sequence.py +64 -0
  1370. vllm/tasks.py +13 -0
  1371. vllm/third_party/__init__.py +0 -0
  1372. vllm/third_party/pynvml.py +6140 -0
  1373. vllm/tokenizers/__init__.py +18 -0
  1374. vllm/tokenizers/deepseek_v32.py +187 -0
  1375. vllm/tokenizers/deepseek_v32_encoding.py +463 -0
  1376. vllm/tokenizers/detokenizer_utils.py +198 -0
  1377. vllm/tokenizers/grok2.py +443 -0
  1378. vllm/tokenizers/hf.py +119 -0
  1379. vllm/tokenizers/mistral.py +543 -0
  1380. vllm/tokenizers/protocol.py +123 -0
  1381. vllm/tokenizers/registry.py +238 -0
  1382. vllm/tool_parsers/__init__.py +158 -0
  1383. vllm/tool_parsers/abstract_tool_parser.py +274 -0
  1384. vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
  1385. vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
  1386. vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
  1387. vllm/tool_parsers/ernie45_tool_parser.py +210 -0
  1388. vllm/tool_parsers/functiongemma_tool_parser.py +321 -0
  1389. vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
  1390. vllm/tool_parsers/glm47_moe_tool_parser.py +23 -0
  1391. vllm/tool_parsers/glm4_moe_tool_parser.py +215 -0
  1392. vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  1393. vllm/tool_parsers/granite_tool_parser.py +253 -0
  1394. vllm/tool_parsers/hermes_tool_parser.py +495 -0
  1395. vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  1396. vllm/tool_parsers/internlm2_tool_parser.py +227 -0
  1397. vllm/tool_parsers/jamba_tool_parser.py +323 -0
  1398. vllm/tool_parsers/kimi_k2_tool_parser.py +598 -0
  1399. vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  1400. vllm/tool_parsers/llama_tool_parser.py +324 -0
  1401. vllm/tool_parsers/longcat_tool_parser.py +37 -0
  1402. vllm/tool_parsers/minimax_m2_tool_parser.py +776 -0
  1403. vllm/tool_parsers/minimax_tool_parser.py +849 -0
  1404. vllm/tool_parsers/mistral_tool_parser.py +612 -0
  1405. vllm/tool_parsers/olmo3_tool_parser.py +366 -0
  1406. vllm/tool_parsers/openai_tool_parser.py +111 -0
  1407. vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
  1408. vllm/tool_parsers/pythonic_tool_parser.py +332 -0
  1409. vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
  1410. vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  1411. vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
  1412. vllm/tool_parsers/step3_tool_parser.py +303 -0
  1413. vllm/tool_parsers/utils.py +229 -0
  1414. vllm/tool_parsers/xlam_tool_parser.py +556 -0
  1415. vllm/tracing.py +135 -0
  1416. vllm/transformers_utils/__init__.py +26 -0
  1417. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1418. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1419. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1420. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1421. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1422. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1423. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1424. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1425. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1426. vllm/transformers_utils/config.py +1169 -0
  1427. vllm/transformers_utils/config_parser_base.py +20 -0
  1428. vllm/transformers_utils/configs/__init__.py +106 -0
  1429. vllm/transformers_utils/configs/afmoe.py +87 -0
  1430. vllm/transformers_utils/configs/arctic.py +216 -0
  1431. vllm/transformers_utils/configs/bagel.py +53 -0
  1432. vllm/transformers_utils/configs/chatglm.py +75 -0
  1433. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1434. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1435. vllm/transformers_utils/configs/eagle.py +90 -0
  1436. vllm/transformers_utils/configs/falcon.py +89 -0
  1437. vllm/transformers_utils/configs/flex_olmo.py +82 -0
  1438. vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
  1439. vllm/transformers_utils/configs/isaac.py +100 -0
  1440. vllm/transformers_utils/configs/jais.py +243 -0
  1441. vllm/transformers_utils/configs/kimi_linear.py +148 -0
  1442. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1443. vllm/transformers_utils/configs/lfm2_moe.py +163 -0
  1444. vllm/transformers_utils/configs/medusa.py +65 -0
  1445. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1446. vllm/transformers_utils/configs/mistral.py +263 -0
  1447. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1448. vllm/transformers_utils/configs/moonvit.py +33 -0
  1449. vllm/transformers_utils/configs/nemotron.py +220 -0
  1450. vllm/transformers_utils/configs/nemotron_h.py +284 -0
  1451. vllm/transformers_utils/configs/olmo3.py +83 -0
  1452. vllm/transformers_utils/configs/ovis.py +182 -0
  1453. vllm/transformers_utils/configs/qwen3_next.py +277 -0
  1454. vllm/transformers_utils/configs/radio.py +98 -0
  1455. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1456. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1457. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1458. vllm/transformers_utils/configs/step3_vl.py +178 -0
  1459. vllm/transformers_utils/configs/tarsier2.py +24 -0
  1460. vllm/transformers_utils/configs/ultravox.py +120 -0
  1461. vllm/transformers_utils/dynamic_module.py +70 -0
  1462. vllm/transformers_utils/gguf_utils.py +280 -0
  1463. vllm/transformers_utils/model_arch_config_convertor.py +402 -0
  1464. vllm/transformers_utils/processor.py +424 -0
  1465. vllm/transformers_utils/processors/__init__.py +25 -0
  1466. vllm/transformers_utils/processors/bagel.py +78 -0
  1467. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1468. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1469. vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
  1470. vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
  1471. vllm/transformers_utils/processors/ovis.py +453 -0
  1472. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1473. vllm/transformers_utils/repo_utils.py +287 -0
  1474. vllm/transformers_utils/runai_utils.py +102 -0
  1475. vllm/transformers_utils/s3_utils.py +95 -0
  1476. vllm/transformers_utils/tokenizer.py +19 -0
  1477. vllm/transformers_utils/utils.py +112 -0
  1478. vllm/triton_utils/__init__.py +20 -0
  1479. vllm/triton_utils/importing.py +103 -0
  1480. vllm/usage/__init__.py +0 -0
  1481. vllm/usage/usage_lib.py +278 -0
  1482. vllm/utils/__init__.py +36 -0
  1483. vllm/utils/argparse_utils.py +491 -0
  1484. vllm/utils/async_utils.py +310 -0
  1485. vllm/utils/cache.py +214 -0
  1486. vllm/utils/collection_utils.py +112 -0
  1487. vllm/utils/counter.py +45 -0
  1488. vllm/utils/deep_gemm.py +424 -0
  1489. vllm/utils/flashinfer.py +602 -0
  1490. vllm/utils/func_utils.py +236 -0
  1491. vllm/utils/gc_utils.py +151 -0
  1492. vllm/utils/hashing.py +117 -0
  1493. vllm/utils/import_utils.py +438 -0
  1494. vllm/utils/jsontree.py +158 -0
  1495. vllm/utils/math_utils.py +32 -0
  1496. vllm/utils/mem_constants.py +13 -0
  1497. vllm/utils/mem_utils.py +285 -0
  1498. vllm/utils/nccl.py +64 -0
  1499. vllm/utils/network_utils.py +331 -0
  1500. vllm/utils/nvtx_pytorch_hooks.py +286 -0
  1501. vllm/utils/platform_utils.py +59 -0
  1502. vllm/utils/profiling.py +56 -0
  1503. vllm/utils/registry.py +51 -0
  1504. vllm/utils/serial_utils.py +214 -0
  1505. vllm/utils/system_utils.py +296 -0
  1506. vllm/utils/tensor_schema.py +255 -0
  1507. vllm/utils/torch_utils.py +781 -0
  1508. vllm/v1/__init__.py +0 -0
  1509. vllm/v1/attention/__init__.py +0 -0
  1510. vllm/v1/attention/backend.py +736 -0
  1511. vllm/v1/attention/backends/__init__.py +0 -0
  1512. vllm/v1/attention/backends/cpu_attn.py +501 -0
  1513. vllm/v1/attention/backends/fa_utils.py +126 -0
  1514. vllm/v1/attention/backends/flash_attn.py +1092 -0
  1515. vllm/v1/attention/backends/flash_attn_diffkv.py +277 -0
  1516. vllm/v1/attention/backends/flashinfer.py +1713 -0
  1517. vllm/v1/attention/backends/flex_attention.py +1024 -0
  1518. vllm/v1/attention/backends/gdn_attn.py +382 -0
  1519. vllm/v1/attention/backends/linear_attn.py +77 -0
  1520. vllm/v1/attention/backends/mamba1_attn.py +28 -0
  1521. vllm/v1/attention/backends/mamba2_attn.py +256 -0
  1522. vllm/v1/attention/backends/mamba_attn.py +313 -0
  1523. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1524. vllm/v1/attention/backends/mla/aiter_triton_mla.py +66 -0
  1525. vllm/v1/attention/backends/mla/common.py +2156 -0
  1526. vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
  1527. vllm/v1/attention/backends/mla/flashattn_mla.py +348 -0
  1528. vllm/v1/attention/backends/mla/flashinfer_mla.py +175 -0
  1529. vllm/v1/attention/backends/mla/flashmla.py +321 -0
  1530. vllm/v1/attention/backends/mla/flashmla_sparse.py +1021 -0
  1531. vllm/v1/attention/backends/mla/indexer.py +345 -0
  1532. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +284 -0
  1533. vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +321 -0
  1534. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1535. vllm/v1/attention/backends/registry.py +258 -0
  1536. vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
  1537. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
  1538. vllm/v1/attention/backends/rocm_attn.py +405 -0
  1539. vllm/v1/attention/backends/short_conv_attn.py +26 -0
  1540. vllm/v1/attention/backends/tree_attn.py +430 -0
  1541. vllm/v1/attention/backends/triton_attn.py +578 -0
  1542. vllm/v1/attention/backends/utils.py +978 -0
  1543. vllm/v1/attention/ops/__init__.py +0 -0
  1544. vllm/v1/attention/ops/chunked_prefill_paged_decode.py +459 -0
  1545. vllm/v1/attention/ops/common.py +469 -0
  1546. vllm/v1/attention/ops/flashmla.py +254 -0
  1547. vllm/v1/attention/ops/merge_attn_states.py +47 -0
  1548. vllm/v1/attention/ops/paged_attn.py +51 -0
  1549. vllm/v1/attention/ops/pallas_kv_cache_update.py +130 -0
  1550. vllm/v1/attention/ops/prefix_prefill.py +862 -0
  1551. vllm/v1/attention/ops/rocm_aiter_mla_sparse.py +210 -0
  1552. vllm/v1/attention/ops/triton_decode_attention.py +709 -0
  1553. vllm/v1/attention/ops/triton_merge_attn_states.py +116 -0
  1554. vllm/v1/attention/ops/triton_prefill_attention.py +272 -0
  1555. vllm/v1/attention/ops/triton_reshape_and_cache_flash.py +395 -0
  1556. vllm/v1/attention/ops/triton_unified_attention.py +1088 -0
  1557. vllm/v1/attention/ops/vit_attn_wrappers.py +185 -0
  1558. vllm/v1/attention/selector.py +145 -0
  1559. vllm/v1/core/__init__.py +0 -0
  1560. vllm/v1/core/block_pool.py +489 -0
  1561. vllm/v1/core/encoder_cache_manager.py +402 -0
  1562. vllm/v1/core/kv_cache_coordinator.py +560 -0
  1563. vllm/v1/core/kv_cache_manager.py +485 -0
  1564. vllm/v1/core/kv_cache_metrics.py +96 -0
  1565. vllm/v1/core/kv_cache_utils.py +1642 -0
  1566. vllm/v1/core/sched/__init__.py +0 -0
  1567. vllm/v1/core/sched/async_scheduler.py +66 -0
  1568. vllm/v1/core/sched/interface.py +205 -0
  1569. vllm/v1/core/sched/output.py +261 -0
  1570. vllm/v1/core/sched/request_queue.py +208 -0
  1571. vllm/v1/core/sched/scheduler.py +1936 -0
  1572. vllm/v1/core/sched/utils.py +64 -0
  1573. vllm/v1/core/single_type_kv_cache_manager.py +926 -0
  1574. vllm/v1/cudagraph_dispatcher.py +183 -0
  1575. vllm/v1/engine/__init__.py +224 -0
  1576. vllm/v1/engine/async_llm.py +874 -0
  1577. vllm/v1/engine/coordinator.py +396 -0
  1578. vllm/v1/engine/core.py +1614 -0
  1579. vllm/v1/engine/core_client.py +1422 -0
  1580. vllm/v1/engine/detokenizer.py +351 -0
  1581. vllm/v1/engine/exceptions.py +18 -0
  1582. vllm/v1/engine/input_processor.py +713 -0
  1583. vllm/v1/engine/llm_engine.py +415 -0
  1584. vllm/v1/engine/logprobs.py +245 -0
  1585. vllm/v1/engine/output_processor.py +715 -0
  1586. vllm/v1/engine/parallel_sampling.py +150 -0
  1587. vllm/v1/engine/utils.py +1086 -0
  1588. vllm/v1/executor/__init__.py +6 -0
  1589. vllm/v1/executor/abstract.py +352 -0
  1590. vllm/v1/executor/multiproc_executor.py +888 -0
  1591. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1592. vllm/v1/executor/ray_executor.py +623 -0
  1593. vllm/v1/executor/ray_utils.py +468 -0
  1594. vllm/v1/executor/uniproc_executor.py +186 -0
  1595. vllm/v1/kv_cache_interface.py +485 -0
  1596. vllm/v1/kv_offload/__init__.py +0 -0
  1597. vllm/v1/kv_offload/abstract.py +161 -0
  1598. vllm/v1/kv_offload/arc_manager.py +237 -0
  1599. vllm/v1/kv_offload/backend.py +97 -0
  1600. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1601. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1602. vllm/v1/kv_offload/cpu.py +109 -0
  1603. vllm/v1/kv_offload/factory.py +58 -0
  1604. vllm/v1/kv_offload/lru_manager.py +139 -0
  1605. vllm/v1/kv_offload/mediums.py +39 -0
  1606. vllm/v1/kv_offload/spec.py +70 -0
  1607. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1608. vllm/v1/kv_offload/worker/cpu_gpu.py +287 -0
  1609. vllm/v1/kv_offload/worker/worker.py +163 -0
  1610. vllm/v1/metrics/__init__.py +0 -0
  1611. vllm/v1/metrics/loggers.py +1320 -0
  1612. vllm/v1/metrics/perf.py +1244 -0
  1613. vllm/v1/metrics/prometheus.py +82 -0
  1614. vllm/v1/metrics/ray_wrappers.py +194 -0
  1615. vllm/v1/metrics/reader.py +257 -0
  1616. vllm/v1/metrics/stats.py +440 -0
  1617. vllm/v1/outputs.py +242 -0
  1618. vllm/v1/pool/__init__.py +0 -0
  1619. vllm/v1/pool/metadata.py +124 -0
  1620. vllm/v1/request.py +281 -0
  1621. vllm/v1/sample/__init__.py +0 -0
  1622. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1623. vllm/v1/sample/logits_processor/builtin.py +278 -0
  1624. vllm/v1/sample/logits_processor/interface.py +106 -0
  1625. vllm/v1/sample/logits_processor/state.py +165 -0
  1626. vllm/v1/sample/metadata.py +44 -0
  1627. vllm/v1/sample/ops/__init__.py +0 -0
  1628. vllm/v1/sample/ops/bad_words.py +57 -0
  1629. vllm/v1/sample/ops/logprobs.py +25 -0
  1630. vllm/v1/sample/ops/penalties.py +57 -0
  1631. vllm/v1/sample/ops/topk_topp_sampler.py +388 -0
  1632. vllm/v1/sample/rejection_sampler.py +822 -0
  1633. vllm/v1/sample/sampler.py +319 -0
  1634. vllm/v1/sample/tpu/__init__.py +0 -0
  1635. vllm/v1/sample/tpu/metadata.py +120 -0
  1636. vllm/v1/sample/tpu/sampler.py +215 -0
  1637. vllm/v1/serial_utils.py +514 -0
  1638. vllm/v1/spec_decode/__init__.py +0 -0
  1639. vllm/v1/spec_decode/eagle.py +1346 -0
  1640. vllm/v1/spec_decode/medusa.py +73 -0
  1641. vllm/v1/spec_decode/metadata.py +66 -0
  1642. vllm/v1/spec_decode/metrics.py +225 -0
  1643. vllm/v1/spec_decode/ngram_proposer.py +281 -0
  1644. vllm/v1/spec_decode/suffix_decoding.py +95 -0
  1645. vllm/v1/spec_decode/utils.py +109 -0
  1646. vllm/v1/structured_output/__init__.py +337 -0
  1647. vllm/v1/structured_output/backend_guidance.py +291 -0
  1648. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1649. vllm/v1/structured_output/backend_outlines.py +324 -0
  1650. vllm/v1/structured_output/backend_types.py +136 -0
  1651. vllm/v1/structured_output/backend_xgrammar.py +378 -0
  1652. vllm/v1/structured_output/request.py +91 -0
  1653. vllm/v1/structured_output/utils.py +457 -0
  1654. vllm/v1/utils.py +466 -0
  1655. vllm/v1/worker/__init__.py +0 -0
  1656. vllm/v1/worker/block_table.py +343 -0
  1657. vllm/v1/worker/cp_utils.py +42 -0
  1658. vllm/v1/worker/cpu_model_runner.py +122 -0
  1659. vllm/v1/worker/cpu_worker.py +192 -0
  1660. vllm/v1/worker/dp_utils.py +240 -0
  1661. vllm/v1/worker/ec_connector_model_runner_mixin.py +85 -0
  1662. vllm/v1/worker/gpu/README.md +4 -0
  1663. vllm/v1/worker/gpu/__init__.py +0 -0
  1664. vllm/v1/worker/gpu/async_utils.py +98 -0
  1665. vllm/v1/worker/gpu/attn_utils.py +183 -0
  1666. vllm/v1/worker/gpu/block_table.py +222 -0
  1667. vllm/v1/worker/gpu/buffer_utils.py +224 -0
  1668. vllm/v1/worker/gpu/cudagraph_utils.py +264 -0
  1669. vllm/v1/worker/gpu/dp_utils.py +31 -0
  1670. vllm/v1/worker/gpu/input_batch.py +526 -0
  1671. vllm/v1/worker/gpu/metrics/__init__.py +0 -0
  1672. vllm/v1/worker/gpu/metrics/logits.py +42 -0
  1673. vllm/v1/worker/gpu/mm/__init__.py +0 -0
  1674. vllm/v1/worker/gpu/mm/mrope_utils.py +127 -0
  1675. vllm/v1/worker/gpu/model_runner.py +1005 -0
  1676. vllm/v1/worker/gpu/sample/__init__.py +0 -0
  1677. vllm/v1/worker/gpu/sample/gumbel.py +106 -0
  1678. vllm/v1/worker/gpu/sample/logit_bias.py +270 -0
  1679. vllm/v1/worker/gpu/sample/logprob.py +167 -0
  1680. vllm/v1/worker/gpu/sample/metadata.py +79 -0
  1681. vllm/v1/worker/gpu/sample/min_p.py +58 -0
  1682. vllm/v1/worker/gpu/sample/output.py +14 -0
  1683. vllm/v1/worker/gpu/sample/penalties.py +155 -0
  1684. vllm/v1/worker/gpu/sample/sampler.py +88 -0
  1685. vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
  1686. vllm/v1/worker/gpu/spec_decode/eagle.py +566 -0
  1687. vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
  1688. vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
  1689. vllm/v1/worker/gpu/states.py +282 -0
  1690. vllm/v1/worker/gpu/structured_outputs.py +100 -0
  1691. vllm/v1/worker/gpu_input_batch.py +1030 -0
  1692. vllm/v1/worker/gpu_model_runner.py +5761 -0
  1693. vllm/v1/worker/gpu_ubatch_wrapper.py +475 -0
  1694. vllm/v1/worker/gpu_worker.py +968 -0
  1695. vllm/v1/worker/kv_connector_model_runner_mixin.py +300 -0
  1696. vllm/v1/worker/lora_model_runner_mixin.py +225 -0
  1697. vllm/v1/worker/tpu_input_batch.py +574 -0
  1698. vllm/v1/worker/tpu_worker.py +18 -0
  1699. vllm/v1/worker/ubatch_utils.py +112 -0
  1700. vllm/v1/worker/ubatching.py +242 -0
  1701. vllm/v1/worker/utils.py +400 -0
  1702. vllm/v1/worker/worker_base.py +372 -0
  1703. vllm/v1/worker/workspace.py +253 -0
  1704. vllm/v1/worker/xpu_model_runner.py +48 -0
  1705. vllm/v1/worker/xpu_worker.py +174 -0
  1706. vllm/version.py +39 -0
  1707. vllm/vllm_flash_attn/.gitkeep +0 -0
  1708. vllm_cpu_avx512bf16-0.14.0.dist-info/METADATA +348 -0
  1709. vllm_cpu_avx512bf16-0.14.0.dist-info/RECORD +1712 -0
  1710. vllm_cpu_avx512bf16-0.14.0.dist-info/WHEEL +5 -0
  1711. vllm_cpu_avx512bf16-0.14.0.dist-info/entry_points.txt +5 -0
  1712. vllm_cpu_avx512bf16-0.14.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2169 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from collections.abc import Callable, Iterable
5
+ from contextlib import nullcontext
6
+ from enum import Enum
7
+ from typing import Literal, cast, get_args, overload
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from torch.nn.parameter import UninitializedParameter
12
+
13
+ import vllm.envs as envs
14
+ from vllm._aiter_ops import rocm_aiter_ops
15
+ from vllm.config import VllmConfig, get_current_vllm_config
16
+ from vllm.config.parallel import ExpertPlacementStrategy
17
+ from vllm.distributed import (
18
+ get_dp_group,
19
+ get_ep_group,
20
+ get_pcp_group,
21
+ get_tensor_model_parallel_world_size,
22
+ tensor_model_parallel_all_reduce,
23
+ )
24
+ from vllm.distributed.eplb.eplb_state import EplbState
25
+ from vllm.forward_context import ForwardContext, get_forward_context
26
+ from vllm.logger import init_logger
27
+ from vllm.model_executor.custom_op import CustomOp
28
+ from vllm.model_executor.layers.fused_moe.config import (
29
+ FusedMoEConfig,
30
+ FusedMoEParallelConfig,
31
+ FusedMoEQuantConfig,
32
+ RoutingMethodType,
33
+ )
34
+ from vllm.model_executor.layers.fused_moe.fused_moe_router import FusedMoERouter
35
+ from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import (
36
+ init_aiter_topK_meta_data,
37
+ )
38
+ from vllm.model_executor.layers.fused_moe.routed_experts_capturer import (
39
+ RoutedExpertsCapturer,
40
+ )
41
+ from vllm.model_executor.layers.fused_moe.routing_simulator import RoutingSimulator
42
+ from vllm.model_executor.layers.quantization.base_config import (
43
+ QuantizationConfig,
44
+ )
45
+ from vllm.platforms import current_platform
46
+ from vllm.utils.flashinfer import has_flashinfer_trtllm_fused_moe
47
+ from vllm.utils.math_utils import cdiv, round_up
48
+ from vllm.utils.torch_utils import (
49
+ aux_stream,
50
+ current_stream,
51
+ direct_register_custom_op,
52
+ )
53
+ from vllm.v1.worker.ubatching import dbo_current_ubatch_id
54
+
55
+ if current_platform.is_cuda_alike():
56
+ from .fused_moe import eplb_map_to_physical_and_record
57
+ else:
58
+
59
+ def _eplb_map_to_physical_and_record(
60
+ topk_ids: torch.Tensor,
61
+ expert_load_view: torch.Tensor,
62
+ logical_to_physical_map: torch.Tensor,
63
+ logical_replica_count: torch.Tensor,
64
+ ) -> torch.Tensor:
65
+ # CPU fallback: no EPLB so just return as is
66
+ return topk_ids
67
+
68
+ eplb_map_to_physical_and_record = _eplb_map_to_physical_and_record
69
+ from vllm.model_executor.layers.fused_moe.fused_moe import GroupedTopk
70
+ from vllm.model_executor.layers.fused_moe.fused_moe_method_base import (
71
+ FusedMoEMethodBase,
72
+ )
73
+ from vllm.model_executor.layers.fused_moe.fused_moe_modular_method import (
74
+ FusedMoEModularMethod,
75
+ )
76
+ from vllm.model_executor.layers.fused_moe.unquantized_fused_moe_method import (
77
+ UnquantizedFusedMoEMethod,
78
+ )
79
+
80
+ logger = init_logger(__name__)
81
+
82
+
83
+ class FusedMoeWeightScaleSupported(Enum):
84
+ TENSOR = "tensor"
85
+ CHANNEL = "channel"
86
+ GROUP = "group"
87
+ BLOCK = "block"
88
+
89
+
90
+ def determine_expert_map(
91
+ ep_size: int,
92
+ ep_rank: int,
93
+ global_num_experts: int,
94
+ expert_placement_strategy: ExpertPlacementStrategy = "linear",
95
+ num_fused_shared_experts: int = 0,
96
+ return_expert_mask: bool = False,
97
+ ) -> tuple[int, torch.Tensor | None, torch.Tensor | None]:
98
+ """
99
+ Calculates how many experts should be assigned to each rank for EP and
100
+ creates a mapping from global to local expert index. Experts are
101
+ distributed evenly across ranks. Any remaining are assigned to the
102
+ last rank.
103
+
104
+ Args:
105
+ ep_size: The size of the expert parallel group
106
+ ep_rank: The rank of the current process in the expert parallel
107
+ group
108
+ global_num_experts: The total number of experts in the model.
109
+ expert_placement_strategy: The expert placement strategy.
110
+
111
+ Returns:
112
+ tuple[int, Optional[torch.Tensor]]: A tuple containing:
113
+ - local_num_experts (int): The number of experts assigned
114
+ to the current rank.
115
+ - expert_map (Optional[torch.Tensor]): A tensor of shape
116
+ (global_num_experts,) mapping from global to local index.
117
+ Contains -1 for experts not assigned to the current rank.
118
+ Returns None if ep_size is 1.
119
+ - expert_mask (Optional[torch.Tensor]): A tensor of shape
120
+ (global_num_experts + num_fused_shared_experts + 1,)
121
+ containing 1 for experts assigned to the current rank
122
+ and 0 for sentinel.
123
+ Returns None if ep_size is 1.
124
+ Used only when AITER MOE is enabled.
125
+ """
126
+ assert ep_size > 0
127
+ if ep_size == 1:
128
+ return (global_num_experts, None, None)
129
+
130
+ # Distribute experts as evenly as possible to each rank.
131
+ base_experts = global_num_experts // ep_size
132
+ remainder = global_num_experts % ep_size
133
+ local_num_experts = base_experts + 1 if ep_rank < remainder else base_experts
134
+
135
+ # Create a tensor of size num_experts filled with -1
136
+ expert_map = torch.full((global_num_experts,), -1, dtype=torch.int32)
137
+ # Create an expert map for the local experts
138
+ if expert_placement_strategy == "linear":
139
+ start_idx = ep_rank * base_experts + min(ep_rank, remainder)
140
+ expert_map[start_idx : start_idx + local_num_experts] = torch.arange(
141
+ 0, local_num_experts, dtype=torch.int32
142
+ )
143
+ elif expert_placement_strategy == "round_robin":
144
+ local_log_experts = torch.arange(
145
+ ep_rank, global_num_experts, ep_size, dtype=torch.int32
146
+ )
147
+
148
+ expert_map[local_log_experts] = torch.arange(
149
+ 0, local_num_experts, dtype=torch.int32
150
+ )
151
+ else:
152
+ raise ValueError(
153
+ "Unsupported expert placement strategy "
154
+ f"'{expert_placement_strategy}', expected one of "
155
+ f"{get_args(ExpertPlacementStrategy)}"
156
+ )
157
+
158
+ expert_mask = None
159
+ if return_expert_mask:
160
+ expert_mask = torch.ones(
161
+ (global_num_experts + num_fused_shared_experts + 1,), dtype=torch.int32
162
+ )
163
+ expert_mask[-1] = 0
164
+ expert_mask[:global_num_experts] = expert_map > -1
165
+ expert_map = torch.cat(
166
+ (
167
+ expert_map,
168
+ torch.tensor(
169
+ [local_num_experts + i for i in range(num_fused_shared_experts)],
170
+ dtype=torch.int32,
171
+ ),
172
+ ),
173
+ dim=0,
174
+ )
175
+
176
+ return (local_num_experts, expert_map, expert_mask)
177
+
178
+
179
+ def determine_expert_placement_strategy(
180
+ expert_placement_strategy: ExpertPlacementStrategy,
181
+ moe_parallel_config: FusedMoEParallelConfig,
182
+ num_expert_group: int | None,
183
+ num_redundant_experts: int,
184
+ enable_eplb: bool,
185
+ ) -> ExpertPlacementStrategy:
186
+ if expert_placement_strategy == "round_robin":
187
+ round_robin_supported = (
188
+ (num_expert_group is not None and num_expert_group > 1)
189
+ and num_redundant_experts == 0
190
+ and not enable_eplb
191
+ )
192
+
193
+ if not round_robin_supported:
194
+ logger.warning(
195
+ "Round-robin expert placement is only supported for "
196
+ "models with multiple expert groups and no redundant "
197
+ "experts. Falling back to linear expert placement."
198
+ )
199
+ return "linear"
200
+ if (
201
+ moe_parallel_config.use_all2all_kernels
202
+ and not moe_parallel_config.use_deepep_ll_kernels
203
+ ):
204
+ logger.warning(
205
+ "Round-robin expert placement currently only supports "
206
+ "the DeepEP low-latency backend, but '%s' was configured. "
207
+ "Falling back to linear expert placement.",
208
+ moe_parallel_config.all2all_backend,
209
+ )
210
+ return "linear"
211
+
212
+ return expert_placement_strategy
213
+
214
+
215
+ def get_compressed_expert_map(expert_map: torch.Tensor) -> str:
216
+ """
217
+ Compresses the expert map by removing any -1 entries.
218
+
219
+ Args:
220
+ expert_map (torch.Tensor): A tensor of shape (global_num_experts,)
221
+ mapping from global to local index. Contains -1 for experts not
222
+ assigned to the current rank.
223
+
224
+ Returns:
225
+ str: A string mapping from local to global index.
226
+ Using str to support hashing for logging once only.
227
+ """
228
+ global_indices = torch.where(expert_map != -1)[0]
229
+ local_indices = expert_map[global_indices]
230
+ return ", ".join(
231
+ f"{local_index.item()}->{global_index.item()}"
232
+ for local_index, global_index in zip(local_indices, global_indices)
233
+ )
234
+
235
+
236
+ def maybe_roundup_hidden_size(
237
+ hidden_size: int,
238
+ act_dtype: torch.dtype,
239
+ quant_config: QuantizationConfig | None,
240
+ moe_parallel_config: FusedMoEParallelConfig,
241
+ is_lora_enabled: bool,
242
+ ) -> int:
243
+ """
244
+ Given layer hidden size and MoE configurations, round up hidden_size
245
+ if necessary.
246
+
247
+ Args:
248
+ hidden_size: Layer hidden-size
249
+ act_dtype: Data type of the layer activations.
250
+ quant_config: Fused MoE quantization configuration.
251
+ moe_parallel_config: Fused MoE parallelization strategy configuration.
252
+ is_lora_enabled: True if the engine is enabled with LoRA. This
253
+ is used in the case of mxfp4 quantization in selecting the
254
+ MxFP4Backend.
255
+
256
+ Return:
257
+ Rounded up hidden_size if rounding up is required based on the configs.
258
+ Original hidden size otherwise.
259
+ """
260
+ from vllm.model_executor.layers.fused_moe.all2all_utils import (
261
+ maybe_roundup_layer_hidden_size,
262
+ )
263
+
264
+ hidden_size = maybe_roundup_layer_hidden_size(
265
+ hidden_size, act_dtype, moe_parallel_config
266
+ )
267
+
268
+ # we are padding globally so EP buffer allocation works
269
+ if quant_config and quant_config.get_name() == "mxfp4":
270
+ from vllm.model_executor.layers.quantization.mxfp4 import (
271
+ Mxfp4Backend,
272
+ get_mxfp4_backend,
273
+ )
274
+
275
+ current_mxfp4_backend = get_mxfp4_backend(is_lora_enabled)
276
+ if (
277
+ current_mxfp4_backend == Mxfp4Backend.SM90_FI_MXFP4_BF16
278
+ or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_CUTLASS
279
+ ):
280
+ hidden_size = round_up(hidden_size, 128)
281
+ elif (
282
+ current_platform.is_rocm()
283
+ or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_MXFP8_TRTLLM
284
+ or current_mxfp4_backend == Mxfp4Backend.SM100_FI_MXFP4_BF16
285
+ ):
286
+ hidden_size = round_up(hidden_size, 256)
287
+
288
+ return hidden_size
289
+
290
+
291
+ class FusedMoERouterImpl(FusedMoERouter):
292
+ def __init__(self, layer: "FusedMoE"):
293
+ super().__init__()
294
+ self.layer = layer
295
+
296
+ @property
297
+ def routing_method_type(self) -> RoutingMethodType:
298
+ return self.layer.routing_method_type
299
+
300
+ def select_experts(
301
+ self,
302
+ hidden_states: torch.Tensor,
303
+ router_logits: torch.Tensor,
304
+ ) -> tuple[torch.Tensor, torch.Tensor]:
305
+ return self.layer._select_experts(hidden_states, router_logits)
306
+
307
+
308
+ # --8<-- [start:fused_moe]
309
+ @CustomOp.register("fused_moe")
310
+ class FusedMoE(CustomOp):
311
+ """FusedMoE layer for MoE models.
312
+
313
+ This layer contains both MergedColumnParallel weights (gate_up_proj /
314
+ w13) and RowParallelLinear weights (down_proj/ w2).
315
+
316
+ Note: Mixtral uses w1, w2, and w3 for gate, up, and down_proj. We
317
+ copy that naming convention here and handle any remapping in the
318
+ load_weights function in each model implementation.
319
+
320
+ Args:
321
+ num_experts: Number of experts in the model
322
+ top_k: Number of experts selected for each token
323
+ hidden_size: Input hidden state size of the transformer
324
+ intermediate_size: Intermediate size of the experts
325
+ params_dtype: Data type for the parameters.
326
+ reduce_results: Whether to all_reduce on the output of the layer
327
+ renormalize: Whether to renormalize the logits in the fused_moe kernel
328
+ quant_config: Quantization configure.
329
+ enable_eplb: Whether to enable expert parallelism load balancer.
330
+ router_logits_dtype: Data type for router logits buffers.
331
+ """
332
+
333
+ # --8<-- [end:fused_moe]
334
+
335
+ def __init__(
336
+ self,
337
+ num_experts: int, # Global number of experts
338
+ top_k: int,
339
+ hidden_size: int,
340
+ intermediate_size: int,
341
+ params_dtype: torch.dtype | None = None,
342
+ reduce_results: bool = False,
343
+ renormalize: bool = True,
344
+ use_grouped_topk: bool = False,
345
+ num_expert_group: int | None = None,
346
+ topk_group: int | None = None,
347
+ quant_config: QuantizationConfig | None = None,
348
+ tp_size: int | None = None,
349
+ ep_size: int | None = None,
350
+ dp_size: int | None = None,
351
+ pcp_size: int | None = None,
352
+ prefix: str = "",
353
+ custom_routing_function: Callable | None = None,
354
+ scoring_func: str = "softmax",
355
+ routed_scaling_factor: float = 1.0,
356
+ e_score_correction_bias: torch.Tensor | None = None,
357
+ apply_router_weight_on_input: bool = False,
358
+ activation: str = "silu",
359
+ is_act_and_mul: bool = True,
360
+ enable_eplb: bool = False,
361
+ num_redundant_experts: int = 0,
362
+ has_bias: bool = False,
363
+ is_sequence_parallel=False,
364
+ expert_mapping: list[tuple[str, str, int, str]] | None = None,
365
+ n_shared_experts: int | None = None,
366
+ routing_method_type: RoutingMethodType | None = None,
367
+ router_logits_dtype: torch.dtype | None = None,
368
+ ):
369
+ super().__init__()
370
+
371
+ # Allow disabling of the separate shared experts stream for
372
+ # debug purposes.
373
+ # TODO: Remove this after more extensive testings with TP/DP
374
+ # and other execution modes
375
+ if envs.VLLM_DISABLE_SHARED_EXPERTS_STREAM:
376
+ logger.debug_once("Disabling MoE shared_experts cuda stream", scope="local")
377
+ self.shared_experts_stream = None
378
+ else:
379
+ # TODO(rob): enable shared expert overlap with non-cuda-alike.
380
+ # aux_stream() returns None on non-cuda-alike platforms.
381
+ self.shared_experts_stream = aux_stream()
382
+ if self.shared_experts_stream is not None:
383
+ logger.debug_once(
384
+ "Enabled separate cuda stream for MoE shared_experts", scope="local"
385
+ )
386
+
387
+ if params_dtype is None:
388
+ params_dtype = torch.get_default_dtype()
389
+ self.params_dtype = params_dtype
390
+
391
+ vllm_config = get_current_vllm_config()
392
+ self.vllm_config = vllm_config
393
+
394
+ # FIXME (varun): We should have a better way of inferring the activation
395
+ # datatype. This works for now as the tensor datatype entering the MoE
396
+ # operation is typically unquantized (i.e. float16/bfloat16).
397
+ if vllm_config.model_config is not None:
398
+ moe_in_dtype = vllm_config.model_config.dtype
399
+ else:
400
+ # TODO (bnell): This is a hack to get test_mixtral_moe to work
401
+ # since model_config is not set in the pytest test.
402
+ moe_in_dtype = params_dtype
403
+
404
+ tp_size_ = (
405
+ tp_size if tp_size is not None else get_tensor_model_parallel_world_size()
406
+ )
407
+ dp_size_ = dp_size if dp_size is not None else get_dp_group().world_size
408
+ pcp_size_ = pcp_size if pcp_size is not None else get_pcp_group().world_size
409
+
410
+ self.is_sequence_parallel = is_sequence_parallel
411
+ self.sp_size = tp_size_ if is_sequence_parallel else 1
412
+
413
+ self.moe_parallel_config: FusedMoEParallelConfig = FusedMoEParallelConfig.make(
414
+ tp_size_=tp_size_,
415
+ pcp_size_=pcp_size_,
416
+ dp_size_=dp_size_,
417
+ vllm_parallel_config=vllm_config.parallel_config,
418
+ )
419
+
420
+ self.global_num_experts = num_experts + num_redundant_experts
421
+ self.logical_num_experts = num_experts
422
+
423
+ # Expert mapping used in self.load_weights
424
+ self.expert_mapping = expert_mapping
425
+
426
+ # Round up hidden size if needed.
427
+ hidden_size = maybe_roundup_hidden_size(
428
+ hidden_size,
429
+ moe_in_dtype,
430
+ quant_config,
431
+ self.moe_parallel_config,
432
+ is_lora_enabled=self.vllm_config.lora_config is not None,
433
+ )
434
+
435
+ # For smuggling this layer into the fused moe custom op
436
+ compilation_config = vllm_config.compilation_config
437
+ if prefix in compilation_config.static_forward_context:
438
+ raise ValueError("Duplicate layer name: {}".format(prefix))
439
+ compilation_config.static_forward_context[prefix] = self
440
+ self.layer_name = prefix
441
+
442
+ self.enable_eplb = enable_eplb
443
+ self.expert_load_view: torch.Tensor | None = None
444
+ self.logical_to_physical_map: torch.Tensor | None = None
445
+ self.logical_replica_count: torch.Tensor | None = None
446
+ self.expert_placement_strategy: ExpertPlacementStrategy = (
447
+ vllm_config.parallel_config.expert_placement_strategy
448
+ )
449
+
450
+ # ROCm aiter shared experts fusion
451
+ self.rocm_aiter_fmoe_enabled = rocm_aiter_ops.is_fused_moe_enabled()
452
+ self.aiter_fmoe_shared_expert_enabled = (
453
+ rocm_aiter_ops.is_fusion_moe_shared_experts_enabled()
454
+ )
455
+
456
+ self.num_fused_shared_experts = (
457
+ n_shared_experts
458
+ if n_shared_experts is not None and self.aiter_fmoe_shared_expert_enabled
459
+ else 0
460
+ )
461
+ if (
462
+ not self.aiter_fmoe_shared_expert_enabled
463
+ and self.num_fused_shared_experts != 0
464
+ ):
465
+ raise ValueError(
466
+ "n_shared_experts is only supported on ROCm aiter when "
467
+ "VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS is enabled"
468
+ )
469
+
470
+ # Determine expert maps
471
+ if self.use_ep:
472
+ if self.enable_eplb:
473
+ assert self.global_num_experts % self.ep_size == 0, (
474
+ "EPLB currently only supports even distribution of "
475
+ "experts across ranks."
476
+ )
477
+ else:
478
+ assert num_redundant_experts == 0, (
479
+ "Redundant experts are only supported with EPLB."
480
+ )
481
+
482
+ self.expert_placement_strategy = determine_expert_placement_strategy(
483
+ expert_placement_strategy=self.expert_placement_strategy,
484
+ moe_parallel_config=self.moe_parallel_config,
485
+ num_expert_group=num_expert_group,
486
+ num_redundant_experts=num_redundant_experts,
487
+ enable_eplb=self.enable_eplb,
488
+ )
489
+
490
+ self._expert_map: torch.Tensor | None
491
+ local_num_experts, expert_map, expert_mask = determine_expert_map(
492
+ ep_size=self.ep_size,
493
+ ep_rank=self.ep_rank,
494
+ global_num_experts=self.global_num_experts,
495
+ expert_placement_strategy=self.expert_placement_strategy,
496
+ num_fused_shared_experts=self.num_fused_shared_experts,
497
+ return_expert_mask=self.rocm_aiter_fmoe_enabled,
498
+ )
499
+ self.local_num_experts = local_num_experts
500
+ self.register_buffer("_expert_map", expert_map)
501
+ self.register_buffer("expert_mask", expert_mask)
502
+ self._maybe_init_expert_routing_tables()
503
+ logger.info_once(
504
+ "[EP Rank %s/%s] Expert parallelism is enabled. Expert "
505
+ "placement strategy: %s. Local/global"
506
+ " number of experts: %s/%s. Experts local to global index map:"
507
+ " %s.",
508
+ self.ep_rank,
509
+ self.ep_size,
510
+ self.expert_placement_strategy,
511
+ self.local_num_experts,
512
+ self.global_num_experts,
513
+ get_compressed_expert_map(self._expert_map),
514
+ )
515
+ else:
516
+ self.local_num_experts, self._expert_map, self.expert_mask = (
517
+ self.global_num_experts,
518
+ None,
519
+ None,
520
+ )
521
+
522
+ self.top_k = top_k
523
+
524
+ self._init_aiter_shared_experts_topK_buffer(
525
+ vllm_config=vllm_config, dp_size=dp_size_
526
+ )
527
+ if self.use_ep and self.rocm_aiter_fmoe_enabled:
528
+ assert self.expert_mask is None or torch.all(
529
+ (expert_mask == 0) | (expert_mask == 1)
530
+ ), "Aiter Fused MoE kernel only supports expert_map with 0 and 1s."
531
+
532
+ assert intermediate_size % self.tp_size == 0
533
+ self.hidden_size = hidden_size
534
+ self.intermediate_size_per_partition = intermediate_size // self.tp_size
535
+ self.reduce_results = reduce_results
536
+ self.renormalize = renormalize
537
+ self.use_grouped_topk = use_grouped_topk
538
+ if self.use_grouped_topk:
539
+ assert num_expert_group is not None and topk_group is not None
540
+ self.num_expert_group = num_expert_group
541
+ self.topk_group = topk_group
542
+ self.custom_routing_function = custom_routing_function
543
+ self.scoring_func = scoring_func
544
+ self.routed_scaling_factor = routed_scaling_factor
545
+ self.e_score_correction_bias = e_score_correction_bias
546
+ self.apply_router_weight_on_input = apply_router_weight_on_input
547
+ self.activation = activation
548
+
549
+ self._grouped_topk_impl: GroupedTopk | None = None
550
+ if self.use_grouped_topk:
551
+ assert self.num_expert_group is not None
552
+ assert self.topk_group is not None
553
+ self._grouped_topk_impl = GroupedTopk(
554
+ topk=self.top_k,
555
+ renormalize=self.renormalize,
556
+ num_expert_group=self.num_expert_group,
557
+ topk_group=self.topk_group,
558
+ scoring_func=self.scoring_func,
559
+ routed_scaling_factor=self.routed_scaling_factor,
560
+ num_fused_shared_experts=self.num_fused_shared_experts,
561
+ )
562
+
563
+ if self.scoring_func != "softmax" and not self.use_grouped_topk:
564
+ raise ValueError(
565
+ "Only softmax scoring function is supported for non-grouped topk."
566
+ )
567
+
568
+ # ToDo: Better logic to determine the routing method type
569
+ if routing_method_type is not None:
570
+ self.routing_method_type: RoutingMethodType = routing_method_type
571
+ else:
572
+ if scoring_func == "sigmoid":
573
+ if self.use_grouped_topk:
574
+ self.routing_method_type = RoutingMethodType.DeepSeekV3
575
+ elif self.top_k == 1:
576
+ self.routing_method_type = RoutingMethodType.Llama4
577
+ elif self.scoring_func == "softmax":
578
+ self.routing_method_type = (
579
+ RoutingMethodType.Renormalize
580
+ if not self.renormalize
581
+ else RoutingMethodType.RenormalizeNaive
582
+ )
583
+ else:
584
+ self.routing_method_type = RoutingMethodType.TopK
585
+
586
+ self.moe_config: FusedMoEConfig = FusedMoEConfig(
587
+ num_experts=self.global_num_experts,
588
+ experts_per_token=top_k,
589
+ hidden_dim=hidden_size,
590
+ num_local_experts=self.local_num_experts,
591
+ moe_parallel_config=self.moe_parallel_config,
592
+ in_dtype=moe_in_dtype,
593
+ router_logits_dtype=router_logits_dtype,
594
+ max_num_tokens=envs.VLLM_MOE_DP_CHUNK_SIZE,
595
+ has_bias=has_bias,
596
+ is_act_and_mul=is_act_and_mul,
597
+ is_lora_enabled=vllm_config.lora_config is not None,
598
+ )
599
+ self.moe_config_use_flashinfer_cutlass_kernels = (
600
+ self.moe_config.use_flashinfer_cutlass_kernels
601
+ )
602
+
603
+ self.quant_config = quant_config
604
+
605
+ def _get_quant_method() -> FusedMoEMethodBase:
606
+ """
607
+ Helper method to ensure self.quant_method is never None and
608
+ of the proper type.
609
+ """
610
+ quant_method = None
611
+ if self.quant_config is not None:
612
+ quant_method = self.quant_config.get_quant_method(self, prefix)
613
+ if quant_method is None:
614
+ quant_method = UnquantizedFusedMoEMethod(self.moe_config)
615
+ assert isinstance(quant_method, FusedMoEMethodBase)
616
+ return quant_method
617
+
618
+ # Note: get_quant_method will look at the layer's local_num_experts
619
+ # for heuristic purposes, so it must be initialized first.
620
+ self.quant_method: FusedMoEMethodBase = _get_quant_method()
621
+
622
+ if not self.moe_config.is_act_and_mul:
623
+ # Avoid circular import
624
+ from vllm.model_executor.layers.quantization.modelopt import (
625
+ ModelOptFp8MoEMethod,
626
+ ModelOptNvFp4FusedMoE,
627
+ )
628
+
629
+ if not isinstance(
630
+ self.quant_method,
631
+ (
632
+ UnquantizedFusedMoEMethod,
633
+ ModelOptFp8MoEMethod,
634
+ ModelOptNvFp4FusedMoE,
635
+ ),
636
+ ):
637
+ raise NotImplementedError(
638
+ "is_act_and_mul=False is supported only for unquantized "
639
+ ", ModelOpt FP8, and ModelOpt NvFp4 checkpoints"
640
+ )
641
+ if not current_platform.is_cuda():
642
+ raise NotImplementedError(
643
+ "is_act_and_mul=False is supported only for CUDA for now"
644
+ )
645
+
646
+ if self.enable_eplb and not self.quant_method.supports_eplb:
647
+ # TODO: Add support for additional quantization methods.
648
+ # The implementation for other quantization methods does not
649
+ # contain essential differences, but the current quant API
650
+ # design causes duplicated work when extending to new
651
+ # quantization methods, so I'm leaving it for now.
652
+ # If you plan to add support for more quantization methods,
653
+ # please refer to the implementation in `Fp8MoEMethod`.
654
+ raise NotImplementedError(
655
+ f"EPLB is not supported {self.quant_method.__class__.__name__}. "
656
+ "EPLB is only supported for FP8 quantization for now."
657
+ )
658
+
659
+ moe_quant_params = {
660
+ "num_experts": self.local_num_experts,
661
+ "hidden_size": hidden_size,
662
+ "intermediate_size_per_partition": self.intermediate_size_per_partition,
663
+ "params_dtype": params_dtype,
664
+ "weight_loader": self.weight_loader,
665
+ "global_num_experts": self.global_num_experts,
666
+ }
667
+ # need full intermediate size pre-sharding for WNA16 act order
668
+ if self.quant_method.__class__.__name__ in (
669
+ "GPTQMarlinMoEMethod",
670
+ "CompressedTensorsWNA16MarlinMoEMethod",
671
+ "CompressedTensorsWNA16MoEMethod",
672
+ ):
673
+ moe_quant_params["intermediate_size_full"] = intermediate_size
674
+
675
+ self.quant_method.create_weights(layer=self, **moe_quant_params)
676
+
677
+ # Chunked all2all staging tensor
678
+ self.batched_hidden_states: torch.Tensor | None = None
679
+ self.batched_router_logits: torch.Tensor | None = None
680
+
681
+ self.router = FusedMoERouterImpl(self)
682
+
683
+ # Note: maybe_init_modular_kernel should only be called by
684
+ # prepare_communication_buffer_for_model.
685
+ # This is called after all weight loading and post-processing, so it
686
+ # should be safe to swap out the quant_method.
687
+ def maybe_init_modular_kernel(self) -> None:
688
+ self.ensure_moe_quant_config_init()
689
+ # routing_tables only needed for round-robin expert placement with
690
+ # DeepEP all2all backend.
691
+ routing_tables = self._maybe_init_expert_routing_tables()
692
+ prepare_finalize = self.quant_method.maybe_make_prepare_finalize(
693
+ routing_tables=routing_tables
694
+ )
695
+ if prepare_finalize is not None:
696
+ logger.debug(
697
+ "%s for %s(%s)", prepare_finalize.__class__.__name__, self, id(self)
698
+ )
699
+ self.quant_method = FusedMoEModularMethod.make(
700
+ self, self.quant_method, prepare_finalize, self.shared_experts
701
+ )
702
+
703
+ @property
704
+ def shared_experts(self) -> torch.nn.Module | None:
705
+ return None
706
+
707
+ @property
708
+ def layer_id(self):
709
+ # Delayed import to avoid circular dependency
710
+ from vllm.model_executor.models.utils import extract_layer_index
711
+
712
+ return extract_layer_index(self.layer_name)
713
+
714
+ @property
715
+ def gate(self) -> torch.nn.Module | None:
716
+ return None
717
+
718
+ @property
719
+ def tp_size(self):
720
+ return self.moe_parallel_config.tp_size
721
+
722
+ @property
723
+ def dp_size(self):
724
+ return self.moe_parallel_config.dp_size
725
+
726
+ @property
727
+ def pcp_size(self):
728
+ return self.moe_parallel_config.pcp_size
729
+
730
+ @property
731
+ def ep_size(self):
732
+ return self.moe_parallel_config.ep_size
733
+
734
+ @property
735
+ def tp_rank(self):
736
+ return self.moe_parallel_config.tp_rank
737
+
738
+ @property
739
+ def dp_rank(self):
740
+ return self.moe_parallel_config.dp_rank
741
+
742
+ @property
743
+ def pcp_rank(self):
744
+ return self.moe_parallel_config.pcp_rank
745
+
746
+ @property
747
+ def ep_rank(self):
748
+ return self.moe_parallel_config.ep_rank
749
+
750
+ @property
751
+ def use_ep(self):
752
+ return self.moe_parallel_config.use_ep
753
+
754
+ @property
755
+ def use_pplx_kernels(self):
756
+ return self.moe_parallel_config.use_pplx_kernels
757
+
758
+ @property
759
+ def use_deepep_ht_kernels(self):
760
+ return self.moe_parallel_config.use_deepep_ht_kernels
761
+
762
+ @property
763
+ def use_deepep_ll_kernels(self):
764
+ return self.moe_parallel_config.use_deepep_ll_kernels
765
+
766
+ @property
767
+ def use_flashinfer_cutlass_kernels(self):
768
+ return (
769
+ self.moe_quant_config is not None
770
+ and self.moe_quant_config.quant_dtype == "nvfp4"
771
+ and self.moe_config_use_flashinfer_cutlass_kernels
772
+ )
773
+
774
+ @property
775
+ def use_marlin_kernels(self):
776
+ return getattr(self.quant_method, "use_marlin", False)
777
+
778
+ @property
779
+ def use_dp_chunking(self) -> bool:
780
+ return (
781
+ self.moe_parallel_config.use_pplx_kernels
782
+ or self.moe_parallel_config.use_deepep_ll_kernels
783
+ or (self.dp_size > 1 and self.use_flashinfer_cutlass_kernels)
784
+ ) and envs.VLLM_ENABLE_MOE_DP_CHUNK
785
+
786
+ @property
787
+ def is_internal_router(self) -> bool:
788
+ # By default, router/gate is called before FusedMoE forward pass
789
+ return False
790
+
791
+ def _maybe_init_expert_routing_tables(
792
+ self,
793
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None:
794
+ # Currently routing_tables only needed for round-robin expert placement
795
+ # with DeepEP-ll all2all backend.
796
+ if (
797
+ self.expert_placement_strategy != "round_robin"
798
+ or not self.use_deepep_ll_kernels
799
+ ):
800
+ return None
801
+
802
+ if hasattr(self, "expert_global_to_physical"):
803
+ return cast(
804
+ tuple[torch.Tensor, torch.Tensor, torch.Tensor],
805
+ (
806
+ self.expert_global_to_physical,
807
+ self.expert_physical_to_global,
808
+ self.expert_local_to_global,
809
+ ),
810
+ )
811
+
812
+ if self._expert_map is None:
813
+ return None
814
+
815
+ routing_tables = self.ensure_round_robin_expert_routing_tables(
816
+ global_num_experts=self.global_num_experts,
817
+ ep_size=self.ep_size,
818
+ ep_rank=self.ep_rank,
819
+ local_num_experts=self.local_num_experts,
820
+ device=self._expert_map.device,
821
+ )
822
+
823
+ global_to_physical, physical_to_global, local_global = routing_tables
824
+ self.register_buffer("expert_global_to_physical", global_to_physical)
825
+ self.register_buffer("expert_physical_to_global", physical_to_global)
826
+ self.register_buffer("expert_local_to_global", local_global)
827
+
828
+ return routing_tables
829
+
830
+ @staticmethod
831
+ def ensure_round_robin_expert_routing_tables(
832
+ global_num_experts: int,
833
+ ep_size: int,
834
+ ep_rank: int,
835
+ local_num_experts: int,
836
+ device: torch.device | None = None,
837
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
838
+ device_kwargs = {"device": device} if device is not None else {}
839
+ global_indices = torch.arange(
840
+ global_num_experts, dtype=torch.long, **device_kwargs
841
+ )
842
+ owner = torch.remainder(global_indices, ep_size)
843
+ local_index = torch.div(global_indices, ep_size, rounding_mode="floor")
844
+ base = global_num_experts // ep_size
845
+ remainder = global_num_experts % ep_size
846
+ physical_offset = owner * base
847
+ if remainder > 0:
848
+ remainder_tensor = torch.tensor(
849
+ remainder, dtype=torch.long, **device_kwargs
850
+ )
851
+ physical_offset = physical_offset + torch.minimum(owner, remainder_tensor)
852
+
853
+ global_to_physical = physical_offset + local_index
854
+ physical_to_global = torch.empty_like(global_to_physical)
855
+ physical_to_global[global_to_physical] = global_indices
856
+
857
+ local_global = torch.arange(
858
+ ep_rank,
859
+ global_num_experts,
860
+ ep_size,
861
+ dtype=torch.long,
862
+ **device_kwargs,
863
+ )
864
+ if local_global.numel() != local_num_experts:
865
+ local_global = local_global[:local_num_experts]
866
+
867
+ return (global_to_physical, physical_to_global, local_global)
868
+
869
+ def update_expert_map(self):
870
+ # ep_size and ep_rank should already be updated
871
+ assert self._expert_map is not None
872
+ with self._expert_map.device:
873
+ local_num_experts, expert_map, expert_mask = determine_expert_map(
874
+ ep_size=self.ep_size,
875
+ ep_rank=self.ep_rank,
876
+ global_num_experts=self.global_num_experts,
877
+ expert_placement_strategy=self.expert_placement_strategy,
878
+ num_fused_shared_experts=self.num_fused_shared_experts,
879
+ return_expert_mask=self.rocm_aiter_fmoe_enabled,
880
+ )
881
+ self.local_num_experts = local_num_experts
882
+ self.register_buffer("_expert_map", expert_map)
883
+ self.register_buffer("expert_mask", expert_mask)
884
+ self._maybe_init_expert_routing_tables()
885
+ if self.aiter_fmoe_shared_expert_enabled:
886
+ self._init_aiter_shared_experts_topK_buffer(
887
+ vllm_config=get_current_vllm_config(),
888
+ dp_size=get_dp_group().world_size,
889
+ )
890
+
891
+ def _maybe_setup_shared_experts_stream(
892
+ self,
893
+ hidden_states: torch.Tensor,
894
+ has_separate_shared_experts: bool,
895
+ use_chunked_impl: bool,
896
+ ) -> tuple[bool, torch.Tensor | None]:
897
+ use_shared_experts_stream = (
898
+ current_platform.is_cuda()
899
+ and has_separate_shared_experts
900
+ and not use_chunked_impl
901
+ and self.shared_experts_stream is not None
902
+ and (
903
+ hidden_states.shape[0]
904
+ <= envs.VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD
905
+ )
906
+ )
907
+
908
+ hidden_states_clone: torch.Tensor | None = None
909
+ if use_shared_experts_stream:
910
+ assert self.shared_experts_stream is not None
911
+
912
+ # Clone BEFORE switching streams to avoid race condition
913
+ # where routed_expert kernel may mutate hidden_states.
914
+ hidden_states_clone = hidden_states.clone()
915
+
916
+ # Record that the clone will be used by shared_experts_stream
917
+ # to avoid gc issue from deallocation of hidden_states_clone
918
+ # For more details: https://docs.pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html # noqa: E501
919
+ # NOTE: We don't need shared_output.record_stream(current_stream())
920
+ # because we synch the streams before using shared_output.
921
+ hidden_states_clone.record_stream(self.shared_experts_stream)
922
+
923
+ # Mark sync start point for the separate shared experts
924
+ # stream here since we want to run in parallel with the
925
+ # router/gate (next op below)
926
+ assert self.shared_experts_stream is not None
927
+ self.shared_experts_stream.wait_stream(current_stream())
928
+
929
+ return use_shared_experts_stream, hidden_states_clone
930
+
931
+ def _load_per_tensor_weight_scale(
932
+ self,
933
+ shard_id: str,
934
+ param: torch.nn.Parameter,
935
+ loaded_weight: torch.Tensor,
936
+ expert_id: int,
937
+ ):
938
+ param_data = param.data
939
+ # for per tensor weight quantization
940
+ if shard_id in ("w1", "w3"):
941
+ # We have to keep the weight scales of w1 and w3 because
942
+ # we need to re-quantize w1/w3 weights after weight loading.
943
+ idx = 0 if shard_id == "w1" else 1
944
+ param_data[expert_id][idx] = loaded_weight
945
+ # If we are in the row parallel case (down_proj)
946
+ elif shard_id == "w2":
947
+ param_data[expert_id] = loaded_weight
948
+
949
+ def _load_combined_w13_weight_scale(
950
+ self,
951
+ shard_dim: int,
952
+ loaded_weight: torch.Tensor,
953
+ param: torch.Tensor,
954
+ tp_rank: int,
955
+ ):
956
+ """
957
+ Load w13 weight scales assuming that w1 weight scales and w3 weight
958
+ scales are stored in the same loaded_weight tensor.
959
+ """
960
+ shard_size = param.shape[shard_dim]
961
+ loaded_weight = loaded_weight.narrow(
962
+ shard_dim, shard_size * tp_rank, shard_size
963
+ )
964
+ param.copy_(loaded_weight)
965
+
966
+ def _load_model_weight_or_group_weight_scale(
967
+ self,
968
+ shard_dim: int,
969
+ expert_data: torch.Tensor,
970
+ shard_id: str,
971
+ loaded_weight: torch.Tensor,
972
+ tp_rank: int,
973
+ load_full_w2: bool = False,
974
+ ):
975
+ """
976
+ Load grouped weight scales for group quantization or model weights
977
+ :param shard_dim: dimension to shard
978
+ :param expert_data: parameter for a particular expert
979
+ :param shard_id: either w1, w2, or w3
980
+ :param loaded_weight: checkpoint weight to load into the param
981
+ :param tp_rank: tensor parallel rank
982
+ :param load_full_w2: whether or not the w2 loaded should be sharded.
983
+ """
984
+ if shard_id == "w2":
985
+ # In the case where we have actorder/g_idx, we do not partition the
986
+ # w2 scales, as indicated by `load_full` argument, for all tp cases
987
+ self._load_w2(
988
+ shard_dim=shard_dim,
989
+ loaded_weight=loaded_weight,
990
+ expert_data=expert_data,
991
+ tp_rank=tp_rank,
992
+ load_full=load_full_w2,
993
+ )
994
+ elif shard_id in ("w1", "w3"):
995
+ self._load_w13(
996
+ shard_id=shard_id,
997
+ shard_dim=shard_dim,
998
+ loaded_weight=loaded_weight,
999
+ expert_data=expert_data,
1000
+ tp_rank=tp_rank,
1001
+ )
1002
+
1003
+ def _load_per_channel_weight_scale(
1004
+ self,
1005
+ expert_data: torch.Tensor,
1006
+ shard_dim: int,
1007
+ shard_id: str,
1008
+ loaded_weight: torch.Tensor,
1009
+ tp_rank: int,
1010
+ ):
1011
+ # for per channel weight quantization
1012
+ if shard_id == "w2":
1013
+ expert_data.copy_(loaded_weight)
1014
+ elif shard_id in ("w1", "w3"):
1015
+ self._load_w13(
1016
+ shard_id=shard_id,
1017
+ shard_dim=shard_dim,
1018
+ loaded_weight=loaded_weight,
1019
+ expert_data=expert_data,
1020
+ tp_rank=tp_rank,
1021
+ )
1022
+
1023
+ def _load_w13(
1024
+ self,
1025
+ expert_data: torch.Tensor,
1026
+ shard_dim: int,
1027
+ shard_id: str,
1028
+ loaded_weight: torch.Tensor,
1029
+ tp_rank: int,
1030
+ load_full: bool = False,
1031
+ ):
1032
+ # Index the loaded weight for tp sharding.
1033
+ # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim
1034
+ if self.moe_config.is_act_and_mul:
1035
+ shard_size = expert_data.shape[shard_dim] // 2
1036
+ else:
1037
+ shard_size = expert_data.shape[shard_dim]
1038
+ if not load_full:
1039
+ loaded_weight = loaded_weight.narrow(
1040
+ shard_dim, shard_size * tp_rank, shard_size
1041
+ )
1042
+ # Narrow parameter and load.
1043
+ # w1, gate_proj: Load into first logical weight of w13.
1044
+ if shard_id == "w1":
1045
+ expert_data = expert_data.narrow(shard_dim, 0, shard_size)
1046
+ # w3, up_proj: Load into second logical weight of w13.
1047
+ else:
1048
+ assert shard_id == "w3"
1049
+ expert_data = expert_data.narrow(shard_dim, shard_size, shard_size)
1050
+ expert_data.copy_(loaded_weight)
1051
+
1052
+ def _load_w2(
1053
+ self,
1054
+ expert_data: torch.Tensor,
1055
+ shard_dim: int,
1056
+ loaded_weight: torch.Tensor,
1057
+ tp_rank: int,
1058
+ load_full: bool = False,
1059
+ ):
1060
+ # Index the loaded weight for tp sharding.
1061
+ # down_proj: "RowParallel" so tp sharding on input_dim
1062
+ # Narrow parameter and load.
1063
+ shard_size = expert_data.shape[shard_dim]
1064
+ if not load_full:
1065
+ loaded_weight = loaded_weight.narrow(
1066
+ shard_dim, shard_size * tp_rank, shard_size
1067
+ )
1068
+ # w2, down_proj: Load into only logical weight of w2.
1069
+ expert_data.copy_(loaded_weight)
1070
+
1071
+ def _load_single_value(
1072
+ self, param: torch.nn.Parameter, loaded_weight: torch.Tensor, expert_id: int
1073
+ ):
1074
+ param_data = param.data
1075
+
1076
+ # Input scales can be loaded directly and should be equal.
1077
+ param_data[expert_id] = loaded_weight
1078
+
1079
+ def _load_g_idx(
1080
+ self,
1081
+ shard_id: str,
1082
+ expert_data: torch.Tensor,
1083
+ shard_dim: int,
1084
+ loaded_weight: torch.Tensor,
1085
+ tp_rank: int,
1086
+ ):
1087
+ if shard_id == "w2":
1088
+ self._load_w2(
1089
+ shard_dim=shard_dim,
1090
+ loaded_weight=loaded_weight,
1091
+ expert_data=expert_data,
1092
+ tp_rank=tp_rank,
1093
+ )
1094
+ else:
1095
+ assert shard_id in ("w1", "w3")
1096
+ expert_data.copy_(loaded_weight)
1097
+
1098
+ def _map_global_expert_id_to_local_expert_id(self, expert_id: int) -> int:
1099
+ if self._expert_map is None:
1100
+ return expert_id
1101
+ return self._expert_map[expert_id].item()
1102
+
1103
+ def _init_aiter_shared_experts_topK_buffer(
1104
+ self, vllm_config: VllmConfig, dp_size: int
1105
+ ):
1106
+ if self.num_fused_shared_experts > 0:
1107
+ init_aiter_topK_meta_data(
1108
+ n_routed_experts=self.global_num_experts,
1109
+ n_shared_experts=self.num_fused_shared_experts,
1110
+ top_k=self.top_k,
1111
+ tp_rank=self.ep_rank if self.use_ep else self.tp_rank,
1112
+ tp_size=self.ep_size if self.use_ep else self.tp_size,
1113
+ shared_experts_score=1.0,
1114
+ max_num_tokens=vllm_config.scheduler_config.max_num_batched_tokens
1115
+ * dp_size,
1116
+ is_EP=self.use_ep,
1117
+ )
1118
+ self.local_num_experts += self.num_fused_shared_experts
1119
+
1120
+ @overload
1121
+ def weight_loader(
1122
+ self,
1123
+ param: torch.nn.Parameter,
1124
+ loaded_weight: torch.Tensor,
1125
+ weight_name: str,
1126
+ shard_id: str,
1127
+ expert_id: int,
1128
+ return_success: Literal[False],
1129
+ ) -> None: ...
1130
+
1131
+ @overload
1132
+ def weight_loader(
1133
+ self,
1134
+ param: torch.nn.Parameter,
1135
+ loaded_weight: torch.Tensor,
1136
+ weight_name: str,
1137
+ shard_id: str,
1138
+ expert_id: int,
1139
+ return_success: Literal[True],
1140
+ ) -> bool: ...
1141
+
1142
+ def weight_loader(
1143
+ self,
1144
+ param: torch.nn.Parameter,
1145
+ loaded_weight: torch.Tensor,
1146
+ weight_name: str,
1147
+ shard_id: str,
1148
+ expert_id: int,
1149
+ return_success: bool = False,
1150
+ ) -> bool | None:
1151
+ if self.quant_config and self.quant_config.get_name() == "mxfp4":
1152
+ # (FIXME) for gpt-oss all experts are combined
1153
+ if "bias" in weight_name:
1154
+ dim1 = loaded_weight.shape[1]
1155
+ param.data[:, :dim1].copy_(loaded_weight)
1156
+ else:
1157
+ dim1 = loaded_weight.shape[1]
1158
+ dim2 = loaded_weight.shape[2]
1159
+ param.data[:, :dim1, :dim2].copy_(loaded_weight)
1160
+ return True if return_success else None
1161
+
1162
+ quant_method_name = self.quant_method.__class__.__name__
1163
+ global_expert_id = expert_id
1164
+ expert_id = self._map_global_expert_id_to_local_expert_id(global_expert_id)
1165
+
1166
+ use_global_sf = (
1167
+ getattr(self.quant_method, "use_global_sf", False)
1168
+ and "input_scale" in weight_name
1169
+ )
1170
+
1171
+ if expert_id == -1 and not use_global_sf:
1172
+ # Failed to load this param since it's not local to this rank
1173
+ return False if return_success else None
1174
+ # Hereafter, `expert_id` is local physical id
1175
+
1176
+ # compressed-tensors checkpoints with packed weights are stored flipped
1177
+ # TODO (mgoin): check self.quant_method.quant_config.quant_format
1178
+ # against known CompressionFormat enum values that have this quality
1179
+ if self.quant_method.__class__.__name__ in (
1180
+ "CompressedTensorsWNA16MarlinMoEMethod",
1181
+ "CompressedTensorsWNA16MoEMethod",
1182
+ ):
1183
+ loaded_weight = loaded_weight.t().contiguous()
1184
+
1185
+ if shard_id not in ("w1", "w2", "w3"):
1186
+ raise ValueError(f"shard_id must be ['w1','w2','w3'] but got {shard_id}.")
1187
+
1188
+ # Fetch the dim to shard the parameter/loaded weight
1189
+ # based on the shard id. This will be whatever
1190
+ # dimension intermediate_size_per_partition is used.
1191
+ SHARD_ID_TO_SHARDED_DIM = {"w1": 0, "w2": 1, "w3": 0}
1192
+
1193
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
1194
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
1195
+ if is_gguf_weight_type:
1196
+ param.weight_type = loaded_weight.item()
1197
+ param.data.copy_(loaded_weight)
1198
+ return True if return_success else None
1199
+
1200
+ # Case for BitsAndBytes
1201
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
1202
+ if use_bitsandbytes_4bit:
1203
+ shard_dim = 0
1204
+
1205
+ expert_data = param.data[expert_id]
1206
+ if shard_id == "w2":
1207
+ expert_data.copy_(loaded_weight)
1208
+ elif shard_id in ("w1", "w3"):
1209
+ # BNB inflight quantization has already sharded the weights
1210
+ full_load = True
1211
+ self._load_w13(
1212
+ shard_id=shard_id,
1213
+ shard_dim=shard_dim,
1214
+ loaded_weight=loaded_weight,
1215
+ expert_data=expert_data,
1216
+ tp_rank=self.tp_rank,
1217
+ load_full=full_load,
1218
+ )
1219
+ return True if return_success else None
1220
+
1221
+ # is_transposed: if the dim to shard the weight
1222
+ # should be flipped. Required by GPTQ, compressed-tensors
1223
+ # should be whatever dimension intermediate_size_per_partition is
1224
+ is_transposed = getattr(param, "is_transposed", False)
1225
+ shard_dim = SHARD_ID_TO_SHARDED_DIM[shard_id]
1226
+ if is_transposed:
1227
+ shard_dim = int(not shard_dim)
1228
+
1229
+ full_load = len(loaded_weight.shape) == 3
1230
+ if full_load:
1231
+ shard_dim += 1
1232
+
1233
+ # Materialize GGUF UninitializedParameter accounting merged weights
1234
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
1235
+ # To materialize a tensor, we must have full shape including
1236
+ # number of experts, making this portion to require `full_load`.
1237
+ assert full_load
1238
+ final_shape = list(loaded_weight.shape)
1239
+ # w1 and w3 are merged per expert.
1240
+ if shard_id in {"w1", "w3"}:
1241
+ final_shape[1] *= 2
1242
+ final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size
1243
+ param.materialize(final_shape, dtype=loaded_weight.dtype)
1244
+
1245
+ expert_data = param.data if full_load else param.data[expert_id]
1246
+
1247
+ # Case input scale: input_scale loading is only supported for fp8
1248
+ if "input_scale" in weight_name:
1249
+ # this is needed for compressed-tensors only
1250
+ loaded_weight = loaded_weight.to(param.data.device)
1251
+
1252
+ if (
1253
+ "compressed" in quant_method_name.lower()
1254
+ and param.data[expert_id] != 1
1255
+ and (param.data[expert_id] - loaded_weight).abs() > 1e-5
1256
+ ):
1257
+ raise ValueError(
1258
+ "input_scales of w1 and w3 of a layer "
1259
+ f"must be equal. But got {param.data[expert_id]} "
1260
+ f"vs. {loaded_weight}"
1261
+ )
1262
+
1263
+ self._load_single_value(
1264
+ param=param,
1265
+ loaded_weight=loaded_weight,
1266
+ expert_id=global_expert_id if use_global_sf else expert_id,
1267
+ )
1268
+ return True if return_success else None
1269
+
1270
+ # Case g_idx
1271
+ if "g_idx" in weight_name:
1272
+ self._load_g_idx(
1273
+ shard_dim=0,
1274
+ shard_id=shard_id,
1275
+ loaded_weight=loaded_weight,
1276
+ expert_data=expert_data,
1277
+ tp_rank=self.tp_rank,
1278
+ )
1279
+ return True if return_success else None
1280
+
1281
+ # TODO @dsikka: ModelOpt should follow the proper MoE loading pattern
1282
+ if "ModelOpt" in quant_method_name:
1283
+ # Determine per-tensor weight scale patterns based on variant
1284
+ # Use the dedicated method instead of brittle string matching
1285
+ uses_weight_scale_2 = self.quant_method.uses_weight_scale_2_pattern()
1286
+
1287
+ # Call _load_per_tensor_weight_scale() to load per-tensor (scalar)
1288
+ # weights scales.
1289
+ # Input scales are always per-tensor.
1290
+ # Weight scales: FP4 uses "weight_scale_2" and FP8 uses
1291
+ # "weight_scale" for per-tensor scales.
1292
+ is_per_tensor = (
1293
+ "weight_scale_2" in weight_name
1294
+ if uses_weight_scale_2
1295
+ else "weight_scale" in weight_name
1296
+ ) or "input_scale" in weight_name
1297
+ if is_per_tensor:
1298
+ self._load_per_tensor_weight_scale(
1299
+ shard_id=shard_id,
1300
+ param=param,
1301
+ loaded_weight=loaded_weight,
1302
+ expert_id=expert_id,
1303
+ )
1304
+ return True if return_success else None
1305
+
1306
+ # If the weight is w13_weight_scale and w13_weight_scales are
1307
+ # combined into single loaded_weight, call
1308
+ # _load_combined_w13_weight_scale() to load it.
1309
+ # This is checked by comparing the hidden_out dims of the
1310
+ # loaded_weight and the param.
1311
+ if "w13_weight_scale" in weight_name:
1312
+ loaded_weight_hidden_out = loaded_weight.shape[-2]
1313
+ param_hidden_out = param.data.shape[-2] * self.tp_size
1314
+ if loaded_weight_hidden_out == param_hidden_out:
1315
+ self._load_combined_w13_weight_scale(
1316
+ shard_dim=shard_dim,
1317
+ loaded_weight=loaded_weight,
1318
+ param=expert_data,
1319
+ tp_rank=self.tp_rank,
1320
+ )
1321
+ return True if return_success else None
1322
+
1323
+ # For other weights, call _load_model_weight_or_group_weight_scale()
1324
+ # to load it.
1325
+ if "weight" in weight_name:
1326
+ self._load_model_weight_or_group_weight_scale(
1327
+ shard_id=shard_id,
1328
+ shard_dim=shard_dim,
1329
+ loaded_weight=loaded_weight,
1330
+ expert_data=expert_data,
1331
+ tp_rank=self.tp_rank,
1332
+ )
1333
+ return True if return_success else None
1334
+
1335
+ # Case weight scales, zero_points and offset, weight/input global scales
1336
+ if "scale" in weight_name or "zero" in weight_name or "offset" in weight_name:
1337
+ # load the weight scales and zp based on the quantization scheme
1338
+ # supported weight scales/zp can be found in
1339
+ # FusedMoeWeightScaleSupported
1340
+ # TODO @dsikka: once hardened, refactor to use vLLM Parameters
1341
+ # specific to each case
1342
+ quant_method = getattr(param, "quant_method", None)
1343
+ if quant_method == FusedMoeWeightScaleSupported.CHANNEL.value:
1344
+ self._load_per_channel_weight_scale(
1345
+ shard_id=shard_id,
1346
+ shard_dim=shard_dim,
1347
+ loaded_weight=loaded_weight,
1348
+ expert_data=expert_data,
1349
+ tp_rank=self.tp_rank,
1350
+ )
1351
+ elif quant_method in [
1352
+ FusedMoeWeightScaleSupported.GROUP.value,
1353
+ FusedMoeWeightScaleSupported.BLOCK.value,
1354
+ ]:
1355
+ self._load_model_weight_or_group_weight_scale(
1356
+ shard_id=shard_id,
1357
+ shard_dim=shard_dim,
1358
+ loaded_weight=loaded_weight,
1359
+ expert_data=expert_data,
1360
+ tp_rank=self.tp_rank,
1361
+ load_full_w2=getattr(param, "load_full_w2", False),
1362
+ )
1363
+ elif quant_method == FusedMoeWeightScaleSupported.TENSOR.value:
1364
+ self._load_per_tensor_weight_scale(
1365
+ shard_id=shard_id,
1366
+ param=param,
1367
+ loaded_weight=loaded_weight,
1368
+ expert_id=expert_id,
1369
+ )
1370
+ else:
1371
+ WEIGHT_SCALE_SUPPORTED = [e.value for e in FusedMoeWeightScaleSupported]
1372
+ raise ValueError(
1373
+ f"quant method must be one of {WEIGHT_SCALE_SUPPORTED}"
1374
+ )
1375
+ return True if return_success else None
1376
+
1377
+ # Case weight_shape
1378
+ if "weight_shape" in weight_name:
1379
+ # only required by compressed-tensors
1380
+ self._load_single_value(
1381
+ param=param, loaded_weight=loaded_weight, expert_id=expert_id
1382
+ )
1383
+ return True if return_success else None
1384
+
1385
+ # Case model weights
1386
+ if "weight" in weight_name:
1387
+ self._load_model_weight_or_group_weight_scale(
1388
+ shard_id=shard_id,
1389
+ shard_dim=shard_dim,
1390
+ loaded_weight=loaded_weight,
1391
+ expert_data=expert_data,
1392
+ tp_rank=self.tp_rank,
1393
+ )
1394
+ return True if return_success else None
1395
+
1396
+ return False if return_success else None
1397
+
1398
+ def load_weights(
1399
+ self, weights: Iterable[tuple[str, torch.Tensor]]
1400
+ ) -> Iterable[str]:
1401
+ if (expert_mapping := self.expert_mapping) is None:
1402
+ raise ValueError(
1403
+ "`self.expert_mapping` must be provided to "
1404
+ "load weights using `self.load_weights`."
1405
+ )
1406
+ for expert_name, loaded_weight in weights:
1407
+ qual_name = f"{self.layer_name}.{expert_name}"
1408
+ for param_name, weight_name, expert_id, shard_id in expert_mapping:
1409
+ if weight_name not in qual_name:
1410
+ continue
1411
+ weight_name = qual_name.replace(weight_name, param_name)
1412
+ param_name = weight_name.removeprefix(f"{self.layer_name}.")
1413
+ param = getattr(self, param_name)
1414
+ success = self.weight_loader(
1415
+ param=param,
1416
+ loaded_weight=loaded_weight,
1417
+ weight_name=weight_name,
1418
+ shard_id=shard_id,
1419
+ expert_id=expert_id,
1420
+ return_success=True,
1421
+ )
1422
+ if success:
1423
+ logger.debug(
1424
+ "Loaded %s for expert %d into %s",
1425
+ param_name,
1426
+ expert_id,
1427
+ self.layer_name,
1428
+ )
1429
+ yield param_name
1430
+
1431
+ def get_expert_weights(self) -> Iterable[torch.Tensor]:
1432
+ def _maybe_make_contiguous(
1433
+ name: str, p: torch.nn.Parameter
1434
+ ) -> torch.nn.Parameter:
1435
+ """
1436
+ In some cases, the last 2 dimensions (the non-expert dimensions)
1437
+ of the weight scale tensor are transposed. This function
1438
+ transforms the tensor (view update) so the tensor is contiguous().
1439
+ Example: A non-contiguous scale tensor,
1440
+ `x` of shape (E, 32, 16) and stride (512, 1, 32) is transformed to
1441
+ `x_` of shape (E, 16, 32) and stride (512, 32, 1).
1442
+ Note that we specifically use torch.transpose() so `x_` refers
1443
+ to the same underlying memory. The tensors `x` and `x_`, pointing
1444
+ to the same underlying memory make this transformation safe in the
1445
+ context of EPLB. i.e. It is the same memory and just the view
1446
+ is different.
1447
+ Note: This function handles the "weight_scale" tensors specifically.
1448
+ This could however be generalized to handle similar tensors.
1449
+ """
1450
+ if p.ndim != 3:
1451
+ return p
1452
+ if p.is_contiguous():
1453
+ # Already contiguous. do nothing.
1454
+ return p
1455
+ # p is non-contiguous. We only handle the case where the last 2
1456
+ # dimensions of the scales tensor is transposed. We can handle
1457
+ # other cases when they become relevant.
1458
+ is_transposed_12 = p.stride(1) == 1 and p.stride(2) != 1
1459
+ if "weight_scale" not in name or not is_transposed_12:
1460
+ # do nothing.
1461
+ return p
1462
+
1463
+ # Do not update the layer parameter as the layer's MoE operations would
1464
+ # expect the parameter's tensor to the same shape / stride. Instead,
1465
+ # make a new torch.nn.Parameter that is used just in the context of
1466
+ # EPLB.
1467
+ return torch.nn.Parameter(
1468
+ torch.transpose(p.data, 1, 2), requires_grad=False
1469
+ )
1470
+
1471
+ weights = list(self.named_parameters())
1472
+ weights = [(name, _maybe_make_contiguous(name, p)) for name, p in weights]
1473
+
1474
+ assert all(
1475
+ weight.is_contiguous()
1476
+ for name, weight in weights
1477
+ if not name.startswith("_shared_experts.")
1478
+ )
1479
+
1480
+ # Filter out the non-expert weights.
1481
+ # `e_score_correction_bias` is a bias for each logical expert,
1482
+ # with shape (num_logical_experts,), not an expert weight.
1483
+ NON_EXPERT_WEIGHTS = {
1484
+ "e_score_correction_bias",
1485
+ }
1486
+
1487
+ return [
1488
+ weight.view(self.local_num_experts, -1)
1489
+ for name, weight in weights
1490
+ if name not in NON_EXPERT_WEIGHTS
1491
+ and weight.shape != torch.Size([])
1492
+ and not name.startswith("_shared_experts.")
1493
+ # exclude parameters from non-expert submodules (e.g. gate/shared)
1494
+ and not name.startswith("_gate.")
1495
+ ]
1496
+
1497
+ def set_eplb_state(
1498
+ self,
1499
+ moe_layer_idx: int,
1500
+ expert_load_view: torch.Tensor,
1501
+ logical_to_physical_map: torch.Tensor,
1502
+ logical_replica_count: torch.Tensor,
1503
+ ) -> None:
1504
+ """
1505
+ Register the EPLB state in this layer.
1506
+
1507
+ This is used later in forward pass, where we get the expert mapping
1508
+ and record the load metrics in `expert_load_view`.
1509
+ """
1510
+ self.expert_load_view = expert_load_view[moe_layer_idx]
1511
+ self.logical_to_physical_map = logical_to_physical_map[moe_layer_idx]
1512
+ self.logical_replica_count = logical_replica_count[moe_layer_idx]
1513
+
1514
+ def ensure_moe_quant_config_init(self):
1515
+ if self.quant_method.moe_quant_config is None:
1516
+ # Note: the moe_quant_config can't be constructed until after
1517
+ # weight loading post processing.
1518
+ self.quant_method.moe_quant_config = (
1519
+ self.quant_method.get_fused_moe_quant_config(self)
1520
+ )
1521
+
1522
+ @property
1523
+ def moe_quant_config(self) -> FusedMoEQuantConfig | None:
1524
+ self.ensure_moe_quant_config_init()
1525
+ return self.quant_method.moe_quant_config
1526
+
1527
+ def ensure_dp_chunking_init(self):
1528
+ if not self.use_dp_chunking or self.batched_hidden_states is not None:
1529
+ return
1530
+
1531
+ states_shape: tuple[int, ...]
1532
+ logits_shape: tuple[int, ...]
1533
+
1534
+ moe = self.moe_config
1535
+
1536
+ if self.vllm_config.parallel_config.enable_dbo:
1537
+ states_shape = (2, moe.max_num_tokens, self.hidden_size)
1538
+ logits_shape = (2, moe.max_num_tokens, self.logical_num_experts)
1539
+ else:
1540
+ states_shape = (moe.max_num_tokens, self.hidden_size)
1541
+ logits_shape = (moe.max_num_tokens, self.logical_num_experts)
1542
+
1543
+ self.batched_hidden_states = torch.zeros(
1544
+ states_shape, dtype=moe.in_dtype, device=torch.cuda.current_device()
1545
+ )
1546
+
1547
+ self.batched_router_logits = torch.zeros(
1548
+ logits_shape,
1549
+ dtype=moe.router_logits_dtype,
1550
+ device=torch.cuda.current_device(),
1551
+ )
1552
+
1553
+ def _select_experts(
1554
+ self,
1555
+ hidden_states: torch.Tensor,
1556
+ router_logits: torch.Tensor,
1557
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1558
+ """
1559
+ Route the input hidden states to the top-k experts based on the
1560
+ router logits.
1561
+
1562
+ Returns:
1563
+ (topk_weights, topk_ids)
1564
+ (tuple[torch.Tensor, torch.Tensor]):
1565
+ The weights and expert ids.
1566
+
1567
+ **Compatibility**: When EPLB is not enabled, the returned ids are
1568
+ equivalent to global logical ids, so should be compatible with
1569
+ plain MoE implementations without redundant experts.
1570
+ """
1571
+ from vllm.model_executor.layers.fused_moe.fused_moe import (
1572
+ fused_topk,
1573
+ fused_topk_bias,
1574
+ )
1575
+
1576
+ if self.enable_eplb:
1577
+ if self.quant_method.supports_eplb:
1578
+ if self.expert_load_view is None:
1579
+ raise ValueError(
1580
+ "enable_eplb=True requiere expert_load_view != None"
1581
+ )
1582
+ if self.logical_to_physical_map is None:
1583
+ raise ValueError(
1584
+ "enable_eplb=True requiere logical_to_physical_map != None"
1585
+ )
1586
+ if self.logical_replica_count is None:
1587
+ raise ValueError(
1588
+ "enable_eplb=True requiere logical_replica_count != None"
1589
+ )
1590
+ else:
1591
+ raise NotImplementedError(
1592
+ f"EPLB is not supported for {self.quant_method.method_name}."
1593
+ )
1594
+
1595
+ def valid_grouping() -> bool:
1596
+ # Check if num_experts is greater than num_expert_group
1597
+ # and is divisible by num_expert_group
1598
+ num_experts = router_logits.shape[-1]
1599
+ if num_experts <= self.num_expert_group:
1600
+ return False
1601
+ return num_experts % self.num_expert_group == 0
1602
+
1603
+ indices_type = self.quant_method.topk_indices_dtype
1604
+
1605
+ # Check if we should use a routing simulation strategy
1606
+ routing_strategy = envs.VLLM_MOE_ROUTING_SIMULATION_STRATEGY
1607
+ if routing_strategy != "":
1608
+ topk_weights, topk_ids = RoutingSimulator.simulate_routing(
1609
+ hidden_states=hidden_states,
1610
+ router_logits=router_logits,
1611
+ strategy_name=routing_strategy,
1612
+ top_k=self.top_k,
1613
+ indices_type=indices_type,
1614
+ )
1615
+
1616
+ # DeepSeekv2 uses grouped_top_k
1617
+ elif self.use_grouped_topk and valid_grouping():
1618
+ assert self._grouped_topk_impl is not None
1619
+ topk_weights, topk_ids = self._grouped_topk_impl(
1620
+ hidden_states=hidden_states,
1621
+ gating_output=router_logits,
1622
+ e_score_correction_bias=self.e_score_correction_bias,
1623
+ )
1624
+ elif self.e_score_correction_bias is not None:
1625
+ topk_weights, topk_ids = fused_topk_bias(
1626
+ hidden_states=hidden_states,
1627
+ gating_output=router_logits,
1628
+ e_score_correction_bias=self.e_score_correction_bias.data,
1629
+ topk=self.top_k,
1630
+ renormalize=self.renormalize,
1631
+ )
1632
+ if self.routed_scaling_factor != 1.0:
1633
+ topk_weights *= self.routed_scaling_factor
1634
+ elif self.custom_routing_function is None:
1635
+ topk_weights, topk_ids, token_expert_indices = fused_topk(
1636
+ hidden_states=hidden_states,
1637
+ gating_output=router_logits,
1638
+ topk=self.top_k,
1639
+ renormalize=self.renormalize,
1640
+ indices_type=indices_type,
1641
+ )
1642
+ else:
1643
+ topk_weights, topk_ids = self.custom_routing_function(
1644
+ hidden_states=hidden_states,
1645
+ gating_output=router_logits,
1646
+ topk=self.top_k,
1647
+ renormalize=self.renormalize,
1648
+ )
1649
+
1650
+ if self.enable_eplb:
1651
+ topk_ids = eplb_map_to_physical_and_record(
1652
+ topk_ids=topk_ids,
1653
+ expert_load_view=self.expert_load_view,
1654
+ logical_to_physical_map=self.logical_to_physical_map,
1655
+ logical_replica_count=self.logical_replica_count,
1656
+ )
1657
+
1658
+ if (indices_type is not None) and topk_ids.dtype != indices_type:
1659
+ topk_ids = topk_ids.to(dtype=indices_type)
1660
+
1661
+ assert topk_ids.dtype == indices_type or indices_type is None
1662
+
1663
+ if (
1664
+ self.vllm_config.model_config is not None
1665
+ and self.vllm_config.model_config.enable_return_routed_experts
1666
+ ):
1667
+ # In dummy runs, the capturer is not initialized.
1668
+ capturer = RoutedExpertsCapturer.get_instance()
1669
+ if capturer is not None: # in dummmy_run may be None
1670
+ capturer.capture( # noqa
1671
+ layer_id=self.layer_id,
1672
+ topk_ids=topk_ids,
1673
+ )
1674
+
1675
+ return topk_weights, topk_ids
1676
+
1677
+ def must_reduce_shared_expert_outputs(self) -> bool:
1678
+ """
1679
+ The shared_experts are typically computed using the RowParallelLinear
1680
+ layer. The result of this function is typically used as
1681
+ the reduce_results argument to the module.
1682
+ When just tensor-parallel is used, it is not required to reduce
1683
+ the shared_experts results immediately. Instead we reduce at the
1684
+ once at the end of the MoE op. (Refer to DeepSeekV2MoE module)
1685
+ With EP and all2all kernels - this is no longer viable as all
1686
+ GPU ranks in DP, produce the complete set of hidden_states.
1687
+ Therefore it is required that we reduce the shared_experts output
1688
+ early.
1689
+ """
1690
+ assert self.quant_method is not None
1691
+ return (
1692
+ isinstance(self.quant_method, FusedMoEModularMethod)
1693
+ and self.quant_method.fused_experts.output_is_reduced()
1694
+ )
1695
+
1696
+ def maybe_all_reduce_tensor_model_parallel(self, final_hidden_states: torch.Tensor):
1697
+ """
1698
+ Some combine kernels reduce across GPU ranks by default.
1699
+ """
1700
+ if self.must_reduce_shared_expert_outputs():
1701
+ return final_hidden_states
1702
+ else:
1703
+ return tensor_model_parallel_all_reduce(final_hidden_states)
1704
+
1705
+ def forward_native(
1706
+ self,
1707
+ hidden_states: torch.Tensor,
1708
+ router_logits: torch.Tensor,
1709
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1710
+ og_hidden_states = hidden_states.shape[-1]
1711
+ if self.hidden_size != og_hidden_states:
1712
+ hidden_states = F.pad(
1713
+ hidden_states,
1714
+ (0, self.hidden_size - og_hidden_states),
1715
+ mode="constant",
1716
+ value=0.0,
1717
+ )
1718
+
1719
+ def reduce_output(states: torch.Tensor) -> torch.Tensor:
1720
+ if (
1721
+ not self.is_sequence_parallel
1722
+ and not self.use_dp_chunking
1723
+ and self.reduce_results
1724
+ and (self.tp_size > 1 or self.ep_size > 1)
1725
+ ):
1726
+ states = self.maybe_all_reduce_tensor_model_parallel(states)
1727
+ return states
1728
+
1729
+ if self.shared_experts is None:
1730
+ if current_platform.is_tpu() or current_platform.is_cpu():
1731
+ # TODO: Once the OOM issue for the TPU backend is resolved, we
1732
+ # will switch to using the moe_forward custom op.
1733
+ # Note: CPU doesn't require wrapped forward_impl.
1734
+ fused_output = self.forward_impl(hidden_states, router_logits)
1735
+ assert not isinstance(fused_output, tuple)
1736
+ else:
1737
+ fused_output = torch.ops.vllm.moe_forward(
1738
+ hidden_states, router_logits, self.layer_name
1739
+ )
1740
+ return reduce_output(fused_output)[..., :og_hidden_states]
1741
+ else:
1742
+ if current_platform.is_tpu() or current_platform.is_cpu():
1743
+ # TODO: Once the OOM issue for the TPU backend is resolved, we
1744
+ # will switch to using the moe_forward custom op.
1745
+ # Note: CPU doesn't require wrapped forward_impl.
1746
+ shared_output, fused_output = self.forward_impl(
1747
+ hidden_states, router_logits
1748
+ )
1749
+ else:
1750
+ shared_output, fused_output = torch.ops.vllm.moe_forward_shared(
1751
+ hidden_states, router_logits, self.layer_name
1752
+ )
1753
+ return (
1754
+ reduce_output(shared_output)[..., :og_hidden_states],
1755
+ reduce_output(fused_output)[..., :og_hidden_states],
1756
+ )
1757
+
1758
+ @property
1759
+ def expert_map(self) -> torch.Tensor | None:
1760
+ return (
1761
+ self._expert_map if not self.rocm_aiter_fmoe_enabled else self.expert_mask
1762
+ )
1763
+
1764
+ def forward_cuda(
1765
+ self,
1766
+ hidden_states: torch.Tensor,
1767
+ router_logits: torch.Tensor,
1768
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1769
+ return self.forward_native(hidden_states, router_logits)
1770
+
1771
+ def forward_impl_chunked(
1772
+ self,
1773
+ full_hidden_states: torch.Tensor,
1774
+ full_router_logits: torch.Tensor,
1775
+ has_separate_shared_experts: bool,
1776
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1777
+ assert self.batched_hidden_states is not None
1778
+ assert self.batched_router_logits is not None
1779
+ assert self.batched_hidden_states.dtype == full_hidden_states.dtype
1780
+ assert self.batched_router_logits.dtype == full_router_logits.dtype
1781
+ # Check size compatibility.
1782
+ assert self.batched_hidden_states.size(-1) == full_hidden_states.size(-1)
1783
+ assert self.batched_router_logits.size(-1) == full_router_logits.size(-1)
1784
+
1785
+ full_fused_final_hidden_states = torch.empty_like(full_hidden_states)
1786
+ if self.shared_experts is not None:
1787
+ full_shared_final_hidden_states = torch.empty_like(full_hidden_states)
1788
+
1789
+ def process_chunk(chunk_start, chunk_end, skip_result_store=False):
1790
+ chunk_size = chunk_end - chunk_start
1791
+ hidden_states = full_hidden_states[chunk_start:chunk_end, :]
1792
+ router_logits = full_router_logits[chunk_start:chunk_end, :]
1793
+
1794
+ assert self.batched_hidden_states is not None
1795
+ assert self.batched_router_logits is not None
1796
+ # This is only true when DBO has been enabled in the config.
1797
+ # Both tensors will have an outer dimension for the ubatch id
1798
+ if self.batched_hidden_states.dim() == 3:
1799
+ assert self.batched_router_logits.dim() == 3
1800
+ batch_buffer_idx = dbo_current_ubatch_id()
1801
+ batched_hidden_states = self.batched_hidden_states[batch_buffer_idx, :]
1802
+ batched_router_logits = self.batched_router_logits[batch_buffer_idx, :]
1803
+ else:
1804
+ batched_hidden_states = self.batched_hidden_states
1805
+ batched_router_logits = self.batched_router_logits
1806
+
1807
+ assert (
1808
+ batched_hidden_states.size(0) # type: ignore
1809
+ >= chunk_size
1810
+ )
1811
+ assert (
1812
+ batched_router_logits.size(0) # type: ignore
1813
+ >= chunk_size
1814
+ )
1815
+ staged_hidden_states = batched_hidden_states[:chunk_size, :] # type: ignore
1816
+ staged_router_logits = batched_router_logits[:chunk_size, :] # type: ignore
1817
+ staged_hidden_states.copy_(hidden_states, non_blocking=True)
1818
+ staged_router_logits.copy_(router_logits, non_blocking=True)
1819
+
1820
+ # Matrix multiply.
1821
+ final_hidden_states = self.quant_method.apply(
1822
+ layer=self,
1823
+ router=self.router,
1824
+ x=staged_hidden_states,
1825
+ router_logits=staged_router_logits,
1826
+ )
1827
+
1828
+ if has_separate_shared_experts:
1829
+ assert not isinstance(final_hidden_states, tuple)
1830
+ assert self.shared_experts is not None
1831
+
1832
+ shared_output = self.shared_experts(staged_hidden_states)
1833
+
1834
+ final_hidden_states = (
1835
+ shared_output,
1836
+ final_hidden_states,
1837
+ )
1838
+
1839
+ if not skip_result_store:
1840
+ if self.shared_experts is None:
1841
+ full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
1842
+ final_hidden_states, non_blocking=True
1843
+ )
1844
+ else:
1845
+ full_shared_final_hidden_states[chunk_start:chunk_end, :].copy_(
1846
+ final_hidden_states[0], non_blocking=True
1847
+ )
1848
+ full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
1849
+ final_hidden_states[1], non_blocking=True
1850
+ )
1851
+
1852
+ ctx = get_forward_context()
1853
+ # flashinfer_cutlass_kernels can handle: optional DP + TP/EP
1854
+ max_tokens_across_dispatchers = ctx.dp_metadata.max_tokens_across_dp_cpu
1855
+ moe_dp_chunk_size_per_rank = self.moe_config.max_num_tokens
1856
+
1857
+ # If the input to the MoE is sequence parallel then divide by sp_size
1858
+ # to find the maximum number of tokens for any individual dispatcher.
1859
+ if self.is_sequence_parallel:
1860
+ max_tokens_across_dispatchers = cdiv(
1861
+ max_tokens_across_dispatchers, self.sp_size
1862
+ )
1863
+
1864
+ num_tokens = full_hidden_states.size(0)
1865
+ for chunk_idx, chunk_start_ in enumerate(
1866
+ range(0, max_tokens_across_dispatchers, moe_dp_chunk_size_per_rank)
1867
+ ):
1868
+ chunk_start = chunk_start_
1869
+ chunk_end = min(
1870
+ chunk_start + moe_dp_chunk_size_per_rank, max_tokens_across_dispatchers
1871
+ )
1872
+ # clamp start and end
1873
+ chunk_start = min(chunk_start, num_tokens - 1)
1874
+ chunk_end = min(chunk_end, num_tokens)
1875
+ with ctx.dp_metadata.chunked_sizes(
1876
+ self.sp_size, moe_dp_chunk_size_per_rank, chunk_idx
1877
+ ):
1878
+ process_chunk(
1879
+ chunk_start, chunk_end, skip_result_store=chunk_start_ >= num_tokens
1880
+ )
1881
+
1882
+ if self.shared_experts is None:
1883
+ return full_fused_final_hidden_states
1884
+ else:
1885
+ return (full_shared_final_hidden_states, full_fused_final_hidden_states)
1886
+
1887
+ def forward_impl(
1888
+ self,
1889
+ hidden_states: torch.Tensor,
1890
+ router_logits: torch.Tensor,
1891
+ ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
1892
+ assert self.quant_method is not None
1893
+
1894
+ self.ensure_moe_quant_config_init()
1895
+ self.ensure_dp_chunking_init()
1896
+
1897
+ has_separate_shared_experts = (
1898
+ not isinstance(self.quant_method, FusedMoEModularMethod)
1899
+ and self.shared_experts is not None
1900
+ )
1901
+
1902
+ use_chunked_impl = self.use_dp_chunking
1903
+
1904
+ use_shared_experts_stream, hidden_states_clone = (
1905
+ self._maybe_setup_shared_experts_stream(
1906
+ hidden_states, has_separate_shared_experts, use_chunked_impl
1907
+ )
1908
+ )
1909
+
1910
+ # If router/gate provided, then apply it here.
1911
+ # (Note: This code runs only when "overlapped mode" is on to allow
1912
+ # parallel execution of shared experts with the FusedMoE via
1913
+ # separate cuda stream)
1914
+ if self.gate is not None:
1915
+ router_logits, _ = self.gate(hidden_states)
1916
+
1917
+ if use_chunked_impl:
1918
+ return self.forward_impl_chunked(
1919
+ hidden_states, router_logits, has_separate_shared_experts
1920
+ )
1921
+
1922
+ do_naive_dispatch_combine: bool = self.dp_size > 1 and not isinstance(
1923
+ self.quant_method, FusedMoEModularMethod
1924
+ )
1925
+
1926
+ ctx = get_forward_context()
1927
+ sp_ctx = (
1928
+ ctx.dp_metadata.sp_local_sizes(self.sp_size)
1929
+ if ctx.dp_metadata
1930
+ else nullcontext()
1931
+ )
1932
+
1933
+ with sp_ctx:
1934
+ extra_tensors = None
1935
+ if do_naive_dispatch_combine:
1936
+ # Avoid circular import
1937
+ from vllm.model_executor.layers.quantization.modelopt import (
1938
+ ModelOptNvFp4FusedMoE,
1939
+ )
1940
+
1941
+ post_quant_allgather = (
1942
+ self.quant_method is not None
1943
+ and self.dp_size > 1
1944
+ and self.use_ep
1945
+ and isinstance(self.quant_method, ModelOptNvFp4FusedMoE)
1946
+ and has_flashinfer_trtllm_fused_moe()
1947
+ )
1948
+ if post_quant_allgather:
1949
+ hidden_states_to_dispatch, extra_tensors = (
1950
+ self.quant_method.prepare_dp_allgather_tensor(
1951
+ self, hidden_states, router_logits
1952
+ )
1953
+ )
1954
+ else:
1955
+ hidden_states_to_dispatch = hidden_states
1956
+
1957
+ dispatch_res = get_ep_group().dispatch(
1958
+ hidden_states_to_dispatch,
1959
+ router_logits,
1960
+ self.is_sequence_parallel,
1961
+ extra_tensors=extra_tensors,
1962
+ )
1963
+ if extra_tensors is not None:
1964
+ hidden_states_combined, router_logits, extra_tensors_combined = (
1965
+ dispatch_res
1966
+ )
1967
+ hidden_states_combined = (
1968
+ hidden_states_combined,
1969
+ extra_tensors_combined[0],
1970
+ )
1971
+ else:
1972
+ hidden_states_combined, router_logits = dispatch_res
1973
+
1974
+ # Run shared experts before matrix multiply.
1975
+ # because matrix multiply maybe modify the hidden_states.
1976
+ if has_separate_shared_experts and not use_shared_experts_stream:
1977
+ assert self.shared_experts is not None
1978
+ shared_output = self.shared_experts(hidden_states)
1979
+
1980
+ # NOTE: Similar with DP, PCP also needs dispatch and combine. For
1981
+ # simplicity, AgRsAll2All was added separately for PCP here. Maybe
1982
+ # we should modify All2AllManager abstract to better support PCP.
1983
+ if self.pcp_size > 1:
1984
+ hidden_states = get_pcp_group().all_gather(
1985
+ hidden_states,
1986
+ dim=0,
1987
+ )
1988
+ router_logits = get_pcp_group().all_gather(
1989
+ router_logits,
1990
+ dim=0,
1991
+ )
1992
+
1993
+ # Matrix multiply.
1994
+ final_hidden_states = self.quant_method.apply(
1995
+ layer=self,
1996
+ router=self.router,
1997
+ x=hidden_states_combined
1998
+ if do_naive_dispatch_combine
1999
+ else hidden_states,
2000
+ router_logits=router_logits,
2001
+ )
2002
+
2003
+ if has_separate_shared_experts:
2004
+ assert self.shared_experts is not None
2005
+
2006
+ if use_shared_experts_stream:
2007
+ # Run shared experts in parallel on a separate stream
2008
+ # NOTE: We start the separate stream here and mark the
2009
+ # sync end point immediately after it is done. This is
2010
+ # important to avoid excessive stream allocations by the cuda
2011
+ # graph replay later.
2012
+ with torch.cuda.stream(self.shared_experts_stream):
2013
+ # Note that hidden_states clone() is necessary here to avoid
2014
+ # conflict with the main stream
2015
+ shared_output = self.shared_experts(hidden_states_clone)
2016
+ current_stream().wait_stream(self.shared_experts_stream)
2017
+
2018
+ final_hidden_states = (
2019
+ shared_output,
2020
+ final_hidden_states,
2021
+ )
2022
+
2023
+ def combine_output(states: torch.Tensor) -> torch.Tensor:
2024
+ if do_naive_dispatch_combine:
2025
+ states = get_ep_group().combine(states, self.is_sequence_parallel)
2026
+
2027
+ if self.pcp_size > 1:
2028
+ states = get_pcp_group().reduce_scatter(
2029
+ states,
2030
+ dim=0,
2031
+ )
2032
+
2033
+ return states
2034
+
2035
+ if self.shared_experts is not None:
2036
+ return (
2037
+ final_hidden_states[0],
2038
+ combine_output(final_hidden_states[1]),
2039
+ )
2040
+ else:
2041
+ return combine_output(final_hidden_states)
2042
+
2043
+ @classmethod
2044
+ def make_expert_params_mapping(
2045
+ cls,
2046
+ model: torch.nn.Module,
2047
+ ckpt_gate_proj_name: str,
2048
+ ckpt_down_proj_name: str,
2049
+ ckpt_up_proj_name: str,
2050
+ num_experts: int,
2051
+ num_redundant_experts: int = 0,
2052
+ ) -> list[tuple[str, str, int, str]]:
2053
+ num_physical_experts = num_experts + num_redundant_experts
2054
+
2055
+ # In the returned mapping:
2056
+ # - `expert_id` is the physical expert id
2057
+ # - `weight_name` contains the weight name of the logical expert
2058
+ # So that we should map the expert id to logical in `weight_name`
2059
+ physical_to_logical_map = (
2060
+ EplbState.build_initial_global_physical_to_logical_map(
2061
+ num_experts, num_redundant_experts
2062
+ )
2063
+ )
2064
+
2065
+ base_layer = (
2066
+ "base_layer."
2067
+ if any(".base_layer." in name for name, _ in model.named_parameters())
2068
+ else ""
2069
+ )
2070
+
2071
+ return [
2072
+ # (param_name, weight_name, expert_id, shard_id)
2073
+ (
2074
+ f"experts.{base_layer}w13_"
2075
+ if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name]
2076
+ else f"experts.{base_layer}w2_",
2077
+ f"experts.{physical_to_logical_map[expert_id]}.{weight_name}.{base_layer}",
2078
+ expert_id,
2079
+ shard_id,
2080
+ )
2081
+ for expert_id in range(num_physical_experts)
2082
+ for shard_id, weight_name in [
2083
+ ("w1", ckpt_gate_proj_name),
2084
+ ("w2", ckpt_down_proj_name),
2085
+ ("w3", ckpt_up_proj_name),
2086
+ ]
2087
+ ]
2088
+
2089
+ def extra_repr(self) -> str:
2090
+ s = (
2091
+ f"global_num_experts={self.global_num_experts}, "
2092
+ f"local_num_experts={self.local_num_experts}, "
2093
+ f"top_k={self.top_k}, "
2094
+ f"intermediate_size_per_partition={self.intermediate_size_per_partition}, " # noqa: E501
2095
+ f"tp_size={self.tp_size},\n"
2096
+ f"ep_size={self.ep_size}, "
2097
+ f"reduce_results={self.reduce_results}, "
2098
+ f"renormalize={self.renormalize}, "
2099
+ f"use_grouped_topk={self.use_grouped_topk}"
2100
+ )
2101
+
2102
+ if self.use_grouped_topk:
2103
+ s += f", num_expert_group={self.num_expert_group}, topk_group={self.topk_group}" # noqa: E501
2104
+
2105
+ s += f", scoring_func='{self.scoring_func}', activation='{self.activation}'" # noqa: E501
2106
+
2107
+ return s
2108
+
2109
+
2110
+ def moe_forward(
2111
+ hidden_states: torch.Tensor,
2112
+ router_logits: torch.Tensor,
2113
+ layer_name: str,
2114
+ ) -> torch.Tensor:
2115
+ forward_context: ForwardContext = get_forward_context()
2116
+ self = forward_context.no_compile_layers[layer_name]
2117
+ assert self.shared_experts is None
2118
+ return self.forward_impl(hidden_states, router_logits)
2119
+
2120
+
2121
+ def moe_forward_fake(
2122
+ hidden_states: torch.Tensor,
2123
+ router_logits: torch.Tensor,
2124
+ layer_name: str,
2125
+ ) -> torch.Tensor:
2126
+ return torch.empty_like(hidden_states)
2127
+
2128
+
2129
+ direct_register_custom_op(
2130
+ op_name="moe_forward",
2131
+ op_func=moe_forward,
2132
+ mutates_args=["hidden_states"],
2133
+ fake_impl=moe_forward_fake,
2134
+ tags=(torch.Tag.needs_fixed_stride_order,),
2135
+ )
2136
+
2137
+
2138
+ def moe_forward_shared(
2139
+ hidden_states: torch.Tensor,
2140
+ router_logits: torch.Tensor,
2141
+ layer_name: str,
2142
+ ) -> tuple[torch.Tensor, torch.Tensor]:
2143
+ forward_context: ForwardContext = get_forward_context()
2144
+ self = forward_context.no_compile_layers[layer_name]
2145
+ assert self.shared_experts is not None
2146
+ return self.forward_impl(hidden_states, router_logits)
2147
+
2148
+
2149
+ def moe_forward_shared_fake(
2150
+ hidden_states: torch.Tensor,
2151
+ router_logits: torch.Tensor,
2152
+ layer_name: str,
2153
+ ) -> tuple[torch.Tensor, torch.Tensor]:
2154
+ shared_out = torch.empty_like(hidden_states)
2155
+ fused_out = torch.empty_like(hidden_states)
2156
+ return shared_out, fused_out
2157
+
2158
+
2159
+ direct_register_custom_op(
2160
+ op_name="moe_forward_shared",
2161
+ op_func=moe_forward_shared,
2162
+ mutates_args=["hidden_states"],
2163
+ fake_impl=moe_forward_shared_fake,
2164
+ tags=(torch.Tag.needs_fixed_stride_order,),
2165
+ )
2166
+
2167
+ # Mark the FusedMoE weight_loader as supporting MoE-specific parameters
2168
+ # to avoid expensive runtime reflection in model loading code
2169
+ FusedMoE.weight_loader.supports_moe_loading = True # type: ignore[attr-defined]