vllm-cpu-avx512bf16 0.14.0__cp313-cp313-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1712) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +1511 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +3206 -0
  6. vllm/_ipex_ops.py +445 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +62 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +0 -0
  14. vllm/attention/layer.py +913 -0
  15. vllm/attention/utils/__init__.py +0 -0
  16. vllm/attention/utils/kv_sharing_utils.py +33 -0
  17. vllm/attention/utils/kv_transfer_utils.py +60 -0
  18. vllm/beam_search.py +88 -0
  19. vllm/benchmarks/__init__.py +0 -0
  20. vllm/benchmarks/datasets.py +3277 -0
  21. vllm/benchmarks/latency.py +172 -0
  22. vllm/benchmarks/lib/__init__.py +3 -0
  23. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  24. vllm/benchmarks/lib/ready_checker.py +72 -0
  25. vllm/benchmarks/lib/utils.py +79 -0
  26. vllm/benchmarks/mm_processor.py +363 -0
  27. vllm/benchmarks/serve.py +1761 -0
  28. vllm/benchmarks/startup.py +321 -0
  29. vllm/benchmarks/sweep/__init__.py +0 -0
  30. vllm/benchmarks/sweep/cli.py +41 -0
  31. vllm/benchmarks/sweep/param_sweep.py +159 -0
  32. vllm/benchmarks/sweep/plot.py +675 -0
  33. vllm/benchmarks/sweep/plot_pareto.py +393 -0
  34. vllm/benchmarks/sweep/serve.py +450 -0
  35. vllm/benchmarks/sweep/serve_sla.py +459 -0
  36. vllm/benchmarks/sweep/server.py +114 -0
  37. vllm/benchmarks/sweep/sla_sweep.py +138 -0
  38. vllm/benchmarks/sweep/utils.py +4 -0
  39. vllm/benchmarks/throughput.py +946 -0
  40. vllm/collect_env.py +857 -0
  41. vllm/compilation/__init__.py +0 -0
  42. vllm/compilation/activation_quant_fusion.py +214 -0
  43. vllm/compilation/backends.py +840 -0
  44. vllm/compilation/base_static_graph.py +57 -0
  45. vllm/compilation/caching.py +196 -0
  46. vllm/compilation/collective_fusion.py +1224 -0
  47. vllm/compilation/compiler_interface.py +639 -0
  48. vllm/compilation/counter.py +50 -0
  49. vllm/compilation/cuda_graph.py +309 -0
  50. vllm/compilation/decorators.py +662 -0
  51. vllm/compilation/fix_functionalization.py +266 -0
  52. vllm/compilation/fusion.py +570 -0
  53. vllm/compilation/fusion_attn.py +363 -0
  54. vllm/compilation/fx_utils.py +92 -0
  55. vllm/compilation/inductor_pass.py +145 -0
  56. vllm/compilation/matcher_utils.py +454 -0
  57. vllm/compilation/monitor.py +62 -0
  58. vllm/compilation/noop_elimination.py +130 -0
  59. vllm/compilation/partition_rules.py +75 -0
  60. vllm/compilation/pass_manager.py +164 -0
  61. vllm/compilation/piecewise_backend.py +191 -0
  62. vllm/compilation/post_cleanup.py +21 -0
  63. vllm/compilation/qk_norm_rope_fusion.py +244 -0
  64. vllm/compilation/rocm_aiter_fusion.py +401 -0
  65. vllm/compilation/sequence_parallelism.py +368 -0
  66. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  67. vllm/compilation/vllm_inductor_pass.py +180 -0
  68. vllm/compilation/wrapper.py +329 -0
  69. vllm/config/__init__.py +112 -0
  70. vllm/config/attention.py +114 -0
  71. vllm/config/cache.py +233 -0
  72. vllm/config/compilation.py +1149 -0
  73. vllm/config/device.py +75 -0
  74. vllm/config/ec_transfer.py +110 -0
  75. vllm/config/kv_events.py +56 -0
  76. vllm/config/kv_transfer.py +119 -0
  77. vllm/config/load.py +124 -0
  78. vllm/config/lora.py +102 -0
  79. vllm/config/model.py +2026 -0
  80. vllm/config/model_arch.py +57 -0
  81. vllm/config/multimodal.py +247 -0
  82. vllm/config/observability.py +157 -0
  83. vllm/config/parallel.py +703 -0
  84. vllm/config/pooler.py +188 -0
  85. vllm/config/profiler.py +199 -0
  86. vllm/config/scheduler.py +298 -0
  87. vllm/config/speculative.py +656 -0
  88. vllm/config/speech_to_text.py +39 -0
  89. vllm/config/structured_outputs.py +78 -0
  90. vllm/config/utils.py +374 -0
  91. vllm/config/vllm.py +1487 -0
  92. vllm/connections.py +189 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +301 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +43 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +509 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +303 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +346 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +190 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  106. vllm/distributed/device_communicators/pynccl.py +386 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +567 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +778 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +697 -0
  113. vllm/distributed/device_communicators/symm_mem.py +156 -0
  114. vllm/distributed/device_communicators/xpu_communicator.py +98 -0
  115. vllm/distributed/ec_transfer/__init__.py +14 -0
  116. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  117. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  118. vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
  119. vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
  120. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  121. vllm/distributed/eplb/__init__.py +3 -0
  122. vllm/distributed/eplb/async_worker.py +115 -0
  123. vllm/distributed/eplb/eplb_state.py +1192 -0
  124. vllm/distributed/eplb/policy/__init__.py +19 -0
  125. vllm/distributed/eplb/policy/abstract.py +43 -0
  126. vllm/distributed/eplb/policy/default.py +376 -0
  127. vllm/distributed/eplb/rebalance_execute.py +699 -0
  128. vllm/distributed/kv_events.py +505 -0
  129. vllm/distributed/kv_transfer/README.md +29 -0
  130. vllm/distributed/kv_transfer/__init__.py +20 -0
  131. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  132. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  133. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  134. vllm/distributed/kv_transfer/kv_connector/factory.py +203 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +459 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +607 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +344 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  142. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +395 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +211 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1431 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +941 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +916 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/moriio/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py +321 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py +1515 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py +609 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +477 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2688 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +557 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  159. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  160. vllm/distributed/parallel_state.py +1809 -0
  161. vllm/distributed/utils.py +545 -0
  162. vllm/engine/__init__.py +0 -0
  163. vllm/engine/arg_utils.py +2137 -0
  164. vllm/engine/async_llm_engine.py +6 -0
  165. vllm/engine/llm_engine.py +6 -0
  166. vllm/engine/protocol.py +194 -0
  167. vllm/entrypoints/__init__.py +0 -0
  168. vllm/entrypoints/anthropic/__init__.py +0 -0
  169. vllm/entrypoints/anthropic/protocol.py +162 -0
  170. vllm/entrypoints/anthropic/serving_messages.py +468 -0
  171. vllm/entrypoints/api_server.py +186 -0
  172. vllm/entrypoints/chat_utils.py +1912 -0
  173. vllm/entrypoints/cli/__init__.py +19 -0
  174. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/base.py +25 -0
  176. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  177. vllm/entrypoints/cli/benchmark/main.py +57 -0
  178. vllm/entrypoints/cli/benchmark/mm_processor.py +21 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  180. vllm/entrypoints/cli/benchmark/startup.py +21 -0
  181. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  182. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  183. vllm/entrypoints/cli/collect_env.py +38 -0
  184. vllm/entrypoints/cli/main.py +79 -0
  185. vllm/entrypoints/cli/openai.py +260 -0
  186. vllm/entrypoints/cli/run_batch.py +68 -0
  187. vllm/entrypoints/cli/serve.py +253 -0
  188. vllm/entrypoints/cli/types.py +29 -0
  189. vllm/entrypoints/constants.py +12 -0
  190. vllm/entrypoints/context.py +898 -0
  191. vllm/entrypoints/grpc_server.py +531 -0
  192. vllm/entrypoints/launcher.py +175 -0
  193. vllm/entrypoints/llm.py +1807 -0
  194. vllm/entrypoints/logger.py +86 -0
  195. vllm/entrypoints/openai/__init__.py +0 -0
  196. vllm/entrypoints/openai/api_server.py +1390 -0
  197. vllm/entrypoints/openai/cli_args.py +320 -0
  198. vllm/entrypoints/openai/orca_metrics.py +120 -0
  199. vllm/entrypoints/openai/parser/__init__.py +0 -0
  200. vllm/entrypoints/openai/parser/harmony_utils.py +820 -0
  201. vllm/entrypoints/openai/parser/responses_parser.py +176 -0
  202. vllm/entrypoints/openai/protocol.py +2566 -0
  203. vllm/entrypoints/openai/run_batch.py +635 -0
  204. vllm/entrypoints/openai/serving_chat.py +1897 -0
  205. vllm/entrypoints/openai/serving_chat_stream_harmony.py +101 -0
  206. vllm/entrypoints/openai/serving_completion.py +740 -0
  207. vllm/entrypoints/openai/serving_engine.py +1612 -0
  208. vllm/entrypoints/openai/serving_models.py +309 -0
  209. vllm/entrypoints/openai/serving_responses.py +2552 -0
  210. vllm/entrypoints/openai/serving_transcription.py +168 -0
  211. vllm/entrypoints/openai/speech_to_text.py +711 -0
  212. vllm/entrypoints/openai/utils.py +49 -0
  213. vllm/entrypoints/pooling/__init__.py +16 -0
  214. vllm/entrypoints/pooling/classify/__init__.py +0 -0
  215. vllm/entrypoints/pooling/classify/api_router.py +48 -0
  216. vllm/entrypoints/pooling/classify/protocol.py +181 -0
  217. vllm/entrypoints/pooling/classify/serving.py +233 -0
  218. vllm/entrypoints/pooling/embed/__init__.py +0 -0
  219. vllm/entrypoints/pooling/embed/api_router.py +65 -0
  220. vllm/entrypoints/pooling/embed/conftest.py +28 -0
  221. vllm/entrypoints/pooling/embed/protocol.py +217 -0
  222. vllm/entrypoints/pooling/embed/serving.py +684 -0
  223. vllm/entrypoints/pooling/pooling/__init__.py +0 -0
  224. vllm/entrypoints/pooling/pooling/api_router.py +62 -0
  225. vllm/entrypoints/pooling/pooling/protocol.py +146 -0
  226. vllm/entrypoints/pooling/pooling/serving.py +354 -0
  227. vllm/entrypoints/pooling/score/__init__.py +0 -0
  228. vllm/entrypoints/pooling/score/api_router.py +147 -0
  229. vllm/entrypoints/pooling/score/protocol.py +146 -0
  230. vllm/entrypoints/pooling/score/serving.py +511 -0
  231. vllm/entrypoints/renderer.py +411 -0
  232. vllm/entrypoints/responses_utils.py +218 -0
  233. vllm/entrypoints/sagemaker/__init__.py +4 -0
  234. vllm/entrypoints/sagemaker/routes.py +118 -0
  235. vllm/entrypoints/score_utils.py +271 -0
  236. vllm/entrypoints/serve/__init__.py +94 -0
  237. vllm/entrypoints/serve/cache/__init__.py +0 -0
  238. vllm/entrypoints/serve/cache/api_router.py +61 -0
  239. vllm/entrypoints/serve/disagg/__init__.py +0 -0
  240. vllm/entrypoints/serve/disagg/api_router.py +109 -0
  241. vllm/entrypoints/serve/disagg/protocol.py +90 -0
  242. vllm/entrypoints/serve/disagg/serving.py +285 -0
  243. vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
  244. vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
  245. vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
  246. vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
  247. vllm/entrypoints/serve/instrumentator/health.py +33 -0
  248. vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
  249. vllm/entrypoints/serve/instrumentator/offline_docs.py +50 -0
  250. vllm/entrypoints/serve/instrumentator/server_info.py +56 -0
  251. vllm/entrypoints/serve/instrumentator/static/swagger-ui-bundle.js +2 -0
  252. vllm/entrypoints/serve/instrumentator/static/swagger-ui.css +3 -0
  253. vllm/entrypoints/serve/lora/__init__.py +0 -0
  254. vllm/entrypoints/serve/lora/api_router.py +70 -0
  255. vllm/entrypoints/serve/profile/__init__.py +0 -0
  256. vllm/entrypoints/serve/profile/api_router.py +46 -0
  257. vllm/entrypoints/serve/rlhf/__init__.py +0 -0
  258. vllm/entrypoints/serve/rlhf/api_router.py +102 -0
  259. vllm/entrypoints/serve/rpc/__init__.py +0 -0
  260. vllm/entrypoints/serve/rpc/api_router.py +61 -0
  261. vllm/entrypoints/serve/sleep/__init__.py +0 -0
  262. vllm/entrypoints/serve/sleep/api_router.py +56 -0
  263. vllm/entrypoints/serve/tokenize/__init__.py +0 -0
  264. vllm/entrypoints/serve/tokenize/api_router.py +112 -0
  265. vllm/entrypoints/serve/tokenize/serving.py +204 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +187 -0
  268. vllm/entrypoints/tool_server.py +234 -0
  269. vllm/entrypoints/utils.py +336 -0
  270. vllm/env_override.py +402 -0
  271. vllm/envs.py +1791 -0
  272. vllm/exceptions.py +36 -0
  273. vllm/forward_context.py +375 -0
  274. vllm/grpc/__init__.py +17 -0
  275. vllm/grpc/compile_protos.py +94 -0
  276. vllm/grpc/vllm_engine.proto +195 -0
  277. vllm/grpc/vllm_engine_pb2.py +77 -0
  278. vllm/grpc/vllm_engine_pb2.pyi +213 -0
  279. vllm/grpc/vllm_engine_pb2_grpc.py +330 -0
  280. vllm/inputs/__init__.py +44 -0
  281. vllm/inputs/data.py +359 -0
  282. vllm/inputs/parse.py +147 -0
  283. vllm/inputs/preprocess.py +716 -0
  284. vllm/logger.py +303 -0
  285. vllm/logging_utils/__init__.py +13 -0
  286. vllm/logging_utils/dump_input.py +83 -0
  287. vllm/logging_utils/formatter.py +127 -0
  288. vllm/logging_utils/lazy.py +20 -0
  289. vllm/logging_utils/log_time.py +34 -0
  290. vllm/logits_process.py +121 -0
  291. vllm/logprobs.py +206 -0
  292. vllm/lora/__init__.py +0 -0
  293. vllm/lora/layers/__init__.py +43 -0
  294. vllm/lora/layers/base.py +66 -0
  295. vllm/lora/layers/base_linear.py +172 -0
  296. vllm/lora/layers/column_parallel_linear.py +577 -0
  297. vllm/lora/layers/fused_moe.py +739 -0
  298. vllm/lora/layers/logits_processor.py +203 -0
  299. vllm/lora/layers/replicated_linear.py +70 -0
  300. vllm/lora/layers/row_parallel_linear.py +176 -0
  301. vllm/lora/layers/utils.py +115 -0
  302. vllm/lora/layers/vocal_parallel_embedding.py +140 -0
  303. vllm/lora/lora_model.py +221 -0
  304. vllm/lora/lora_weights.py +227 -0
  305. vllm/lora/model_manager.py +858 -0
  306. vllm/lora/ops/__init__.py +0 -0
  307. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  308. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  309. vllm/lora/ops/torch_ops/__init__.py +20 -0
  310. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  311. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  312. vllm/lora/ops/triton_ops/__init__.py +21 -0
  313. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +677 -0
  314. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  315. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  316. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  317. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  318. vllm/lora/ops/triton_ops/utils.py +313 -0
  319. vllm/lora/peft_helper.py +128 -0
  320. vllm/lora/punica_wrapper/__init__.py +10 -0
  321. vllm/lora/punica_wrapper/punica_base.py +493 -0
  322. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  323. vllm/lora/punica_wrapper/punica_gpu.py +413 -0
  324. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  325. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  326. vllm/lora/punica_wrapper/utils.py +150 -0
  327. vllm/lora/request.py +60 -0
  328. vllm/lora/resolver.py +88 -0
  329. vllm/lora/utils.py +281 -0
  330. vllm/lora/worker_manager.py +278 -0
  331. vllm/model_executor/__init__.py +9 -0
  332. vllm/model_executor/custom_op.py +203 -0
  333. vllm/model_executor/layers/__init__.py +0 -0
  334. vllm/model_executor/layers/activation.py +628 -0
  335. vllm/model_executor/layers/attention/__init__.py +0 -0
  336. vllm/model_executor/layers/attention/chunked_local_attention.py +130 -0
  337. vllm/model_executor/layers/attention/cross_attention.py +182 -0
  338. vllm/model_executor/layers/attention/encoder_only_attention.py +103 -0
  339. vllm/model_executor/layers/attention/mm_encoder_attention.py +234 -0
  340. vllm/model_executor/layers/attention/static_sink_attention.py +254 -0
  341. vllm/model_executor/layers/attention_layer_base.py +34 -0
  342. vllm/model_executor/layers/batch_invariant.py +1063 -0
  343. vllm/model_executor/layers/conv.py +262 -0
  344. vllm/model_executor/layers/fla/__init__.py +8 -0
  345. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  346. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  347. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  348. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  349. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  350. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  351. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  352. vllm/model_executor/layers/fla/ops/index.py +41 -0
  353. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  354. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  355. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  356. vllm/model_executor/layers/fla/ops/op.py +60 -0
  357. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  358. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  359. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  360. vllm/model_executor/layers/fused_moe/__init__.py +120 -0
  361. vllm/model_executor/layers/fused_moe/all2all_utils.py +173 -0
  362. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +411 -0
  363. vllm/model_executor/layers/fused_moe/config.py +1111 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200.json +147 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=129,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=160,N=768,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=64,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  625. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  626. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  627. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  628. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  629. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  630. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  631. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  632. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  633. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  634. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  635. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  636. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  637. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  638. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  639. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  640. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  641. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  642. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  643. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  644. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  645. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  646. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  647. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  648. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  649. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  650. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  651. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +444 -0
  652. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1086 -0
  653. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +364 -0
  654. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
  655. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  656. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +436 -0
  657. vllm/model_executor/layers/fused_moe/fallback.py +127 -0
  658. vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +338 -0
  659. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +310 -0
  660. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +371 -0
  661. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  662. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1018 -0
  663. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +824 -0
  664. vllm/model_executor/layers/fused_moe/fused_moe.py +2638 -0
  665. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +119 -0
  666. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +117 -0
  667. vllm/model_executor/layers/fused_moe/fused_moe_router.py +40 -0
  668. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +531 -0
  669. vllm/model_executor/layers/fused_moe/layer.py +2169 -0
  670. vllm/model_executor/layers/fused_moe/modular_kernel.py +1251 -0
  671. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
  672. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  673. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  674. vllm/model_executor/layers/fused_moe/oracle/__init__.py +2 -0
  675. vllm/model_executor/layers/fused_moe/oracle/fp8.py +358 -0
  676. vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +280 -0
  677. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  678. vllm/model_executor/layers/fused_moe/prepare_finalize.py +87 -0
  679. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +347 -0
  680. vllm/model_executor/layers/fused_moe/routed_experts_capturer.py +324 -0
  681. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  682. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
  683. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  684. vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py +78 -0
  685. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +75 -0
  686. vllm/model_executor/layers/fused_moe/trtllm_moe.py +144 -0
  687. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +403 -0
  688. vllm/model_executor/layers/fused_moe/utils.py +382 -0
  689. vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py +189 -0
  690. vllm/model_executor/layers/kda.py +442 -0
  691. vllm/model_executor/layers/layernorm.py +451 -0
  692. vllm/model_executor/layers/lightning_attn.py +735 -0
  693. vllm/model_executor/layers/linear.py +1478 -0
  694. vllm/model_executor/layers/logits_processor.py +109 -0
  695. vllm/model_executor/layers/mamba/__init__.py +0 -0
  696. vllm/model_executor/layers/mamba/abstract.py +68 -0
  697. vllm/model_executor/layers/mamba/linear_attn.py +410 -0
  698. vllm/model_executor/layers/mamba/mamba_mixer.py +541 -0
  699. vllm/model_executor/layers/mamba/mamba_mixer2.py +936 -0
  700. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  701. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  702. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  703. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  704. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
  705. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  706. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  707. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  708. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  709. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  710. vllm/model_executor/layers/mamba/short_conv.py +254 -0
  711. vllm/model_executor/layers/mla.py +179 -0
  712. vllm/model_executor/layers/pooler/__init__.py +5 -0
  713. vllm/model_executor/layers/pooler/abstract.py +39 -0
  714. vllm/model_executor/layers/pooler/activations.py +162 -0
  715. vllm/model_executor/layers/pooler/common.py +32 -0
  716. vllm/model_executor/layers/pooler/seqwise/__init__.py +45 -0
  717. vllm/model_executor/layers/pooler/seqwise/heads.py +151 -0
  718. vllm/model_executor/layers/pooler/seqwise/methods.py +93 -0
  719. vllm/model_executor/layers/pooler/seqwise/poolers.py +127 -0
  720. vllm/model_executor/layers/pooler/special.py +128 -0
  721. vllm/model_executor/layers/pooler/tokwise/__init__.py +39 -0
  722. vllm/model_executor/layers/pooler/tokwise/heads.py +133 -0
  723. vllm/model_executor/layers/pooler/tokwise/methods.py +122 -0
  724. vllm/model_executor/layers/pooler/tokwise/poolers.py +127 -0
  725. vllm/model_executor/layers/quantization/__init__.py +195 -0
  726. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  727. vllm/model_executor/layers/quantization/awq.py +277 -0
  728. vllm/model_executor/layers/quantization/awq_marlin.py +795 -0
  729. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  730. vllm/model_executor/layers/quantization/base_config.py +170 -0
  731. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  732. vllm/model_executor/layers/quantization/bitsandbytes.py +631 -0
  733. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  734. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +982 -0
  735. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2368 -0
  736. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +37 -0
  737. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  738. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  739. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  740. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py +106 -0
  741. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  742. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  743. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
  744. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  745. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  746. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +203 -0
  747. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  748. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
  749. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  750. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  751. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  752. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  753. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  754. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  755. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  756. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  757. vllm/model_executor/layers/quantization/cpu_wna16.py +299 -0
  758. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  759. vllm/model_executor/layers/quantization/experts_int8.py +209 -0
  760. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  761. vllm/model_executor/layers/quantization/fp8.py +1224 -0
  762. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  763. vllm/model_executor/layers/quantization/gguf.py +682 -0
  764. vllm/model_executor/layers/quantization/gptq.py +393 -0
  765. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  766. vllm/model_executor/layers/quantization/gptq_marlin.py +934 -0
  767. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  768. vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
  769. vllm/model_executor/layers/quantization/inc.py +65 -0
  770. vllm/model_executor/layers/quantization/input_quant_fp8.py +212 -0
  771. vllm/model_executor/layers/quantization/ipex_quant.py +403 -0
  772. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  773. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  774. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +113 -0
  775. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  776. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  777. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  778. vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py +126 -0
  779. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
  780. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  781. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +168 -0
  782. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  783. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
  784. vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
  785. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
  786. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +77 -0
  787. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
  788. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
  789. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
  790. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +88 -0
  791. vllm/model_executor/layers/quantization/kv_cache.py +153 -0
  792. vllm/model_executor/layers/quantization/modelopt.py +1665 -0
  793. vllm/model_executor/layers/quantization/moe_wna16.py +518 -0
  794. vllm/model_executor/layers/quantization/mxfp4.py +1145 -0
  795. vllm/model_executor/layers/quantization/petit.py +319 -0
  796. vllm/model_executor/layers/quantization/ptpc_fp8.py +140 -0
  797. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  798. vllm/model_executor/layers/quantization/quark/quark.py +570 -0
  799. vllm/model_executor/layers/quantization/quark/quark_moe.py +797 -0
  800. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  801. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
  802. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  803. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  804. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  805. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  806. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  807. vllm/model_executor/layers/quantization/rtn.py +626 -0
  808. vllm/model_executor/layers/quantization/schema.py +90 -0
  809. vllm/model_executor/layers/quantization/torchao.py +380 -0
  810. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  811. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  812. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  976. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  977. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  978. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  979. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  980. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  981. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  982. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  983. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  984. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  985. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  986. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  987. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  988. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  989. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  990. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  991. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  992. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  993. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  994. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  995. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  996. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  997. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  998. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  999. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1000. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1001. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1002. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1003. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1004. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1005. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1006. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1007. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1008. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1009. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1010. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1011. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1012. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1013. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1014. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1015. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1016. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1017. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1018. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1019. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1020. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1021. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1022. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1023. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1024. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1025. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1026. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1027. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  1028. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +514 -0
  1029. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +370 -0
  1030. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1658 -0
  1031. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  1032. vllm/model_executor/layers/quantization/utils/int8_utils.py +477 -0
  1033. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  1034. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  1035. vllm/model_executor/layers/quantization/utils/marlin_utils.py +720 -0
  1036. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +565 -0
  1037. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
  1038. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
  1039. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  1040. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
  1041. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  1042. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  1043. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  1044. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
  1045. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  1046. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  1047. vllm/model_executor/layers/quantization/utils/quant_utils.py +767 -0
  1048. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
  1049. vllm/model_executor/layers/resampler.py +283 -0
  1050. vllm/model_executor/layers/rotary_embedding/__init__.py +291 -0
  1051. vllm/model_executor/layers/rotary_embedding/base.py +282 -0
  1052. vllm/model_executor/layers/rotary_embedding/common.py +289 -0
  1053. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +184 -0
  1054. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +218 -0
  1055. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1056. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1057. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
  1058. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1059. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1060. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +83 -0
  1061. vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
  1062. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1063. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1064. vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
  1065. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
  1066. vllm/model_executor/layers/utils.py +251 -0
  1067. vllm/model_executor/layers/vocab_parallel_embedding.py +564 -0
  1068. vllm/model_executor/model_loader/__init__.py +150 -0
  1069. vllm/model_executor/model_loader/base_loader.py +71 -0
  1070. vllm/model_executor/model_loader/bitsandbytes_loader.py +821 -0
  1071. vllm/model_executor/model_loader/default_loader.py +304 -0
  1072. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1073. vllm/model_executor/model_loader/gguf_loader.py +371 -0
  1074. vllm/model_executor/model_loader/online_quantization.py +275 -0
  1075. vllm/model_executor/model_loader/runai_streamer_loader.py +115 -0
  1076. vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
  1077. vllm/model_executor/model_loader/tensorizer.py +793 -0
  1078. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1079. vllm/model_executor/model_loader/utils.py +299 -0
  1080. vllm/model_executor/model_loader/weight_utils.py +1183 -0
  1081. vllm/model_executor/models/__init__.py +44 -0
  1082. vllm/model_executor/models/adapters.py +592 -0
  1083. vllm/model_executor/models/afmoe.py +697 -0
  1084. vllm/model_executor/models/aimv2.py +248 -0
  1085. vllm/model_executor/models/apertus.py +567 -0
  1086. vllm/model_executor/models/arcee.py +428 -0
  1087. vllm/model_executor/models/arctic.py +633 -0
  1088. vllm/model_executor/models/aria.py +663 -0
  1089. vllm/model_executor/models/audioflamingo3.py +639 -0
  1090. vllm/model_executor/models/aya_vision.py +448 -0
  1091. vllm/model_executor/models/bagel.py +591 -0
  1092. vllm/model_executor/models/baichuan.py +493 -0
  1093. vllm/model_executor/models/bailing_moe.py +643 -0
  1094. vllm/model_executor/models/bamba.py +511 -0
  1095. vllm/model_executor/models/bee.py +157 -0
  1096. vllm/model_executor/models/bert.py +911 -0
  1097. vllm/model_executor/models/bert_with_rope.py +729 -0
  1098. vllm/model_executor/models/blip.py +350 -0
  1099. vllm/model_executor/models/blip2.py +736 -0
  1100. vllm/model_executor/models/bloom.py +390 -0
  1101. vllm/model_executor/models/chameleon.py +1095 -0
  1102. vllm/model_executor/models/chatglm.py +502 -0
  1103. vllm/model_executor/models/clip.py +1045 -0
  1104. vllm/model_executor/models/cohere2_vision.py +470 -0
  1105. vllm/model_executor/models/commandr.py +469 -0
  1106. vllm/model_executor/models/config.py +571 -0
  1107. vllm/model_executor/models/dbrx.py +484 -0
  1108. vllm/model_executor/models/deepencoder.py +679 -0
  1109. vllm/model_executor/models/deepseek_eagle.py +253 -0
  1110. vllm/model_executor/models/deepseek_mtp.py +447 -0
  1111. vllm/model_executor/models/deepseek_ocr.py +601 -0
  1112. vllm/model_executor/models/deepseek_v2.py +1727 -0
  1113. vllm/model_executor/models/deepseek_vl2.py +642 -0
  1114. vllm/model_executor/models/dots1.py +566 -0
  1115. vllm/model_executor/models/dots_ocr.py +830 -0
  1116. vllm/model_executor/models/ernie45.py +53 -0
  1117. vllm/model_executor/models/ernie45_moe.py +755 -0
  1118. vllm/model_executor/models/ernie45_vl.py +1702 -0
  1119. vllm/model_executor/models/ernie45_vl_moe.py +801 -0
  1120. vllm/model_executor/models/ernie_mtp.py +278 -0
  1121. vllm/model_executor/models/exaone.py +524 -0
  1122. vllm/model_executor/models/exaone4.py +518 -0
  1123. vllm/model_executor/models/exaone_moe.py +579 -0
  1124. vllm/model_executor/models/exaone_moe_mtp.py +255 -0
  1125. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1126. vllm/model_executor/models/falcon.py +543 -0
  1127. vllm/model_executor/models/falcon_h1.py +675 -0
  1128. vllm/model_executor/models/flex_olmo.py +155 -0
  1129. vllm/model_executor/models/fuyu.py +371 -0
  1130. vllm/model_executor/models/gemma.py +425 -0
  1131. vllm/model_executor/models/gemma2.py +435 -0
  1132. vllm/model_executor/models/gemma3.py +520 -0
  1133. vllm/model_executor/models/gemma3_mm.py +664 -0
  1134. vllm/model_executor/models/gemma3n.py +1166 -0
  1135. vllm/model_executor/models/gemma3n_audio_utils.py +57 -0
  1136. vllm/model_executor/models/gemma3n_mm.py +820 -0
  1137. vllm/model_executor/models/glm.py +24 -0
  1138. vllm/model_executor/models/glm4.py +295 -0
  1139. vllm/model_executor/models/glm4_1v.py +1823 -0
  1140. vllm/model_executor/models/glm4_moe.py +725 -0
  1141. vllm/model_executor/models/glm4_moe_mtp.py +365 -0
  1142. vllm/model_executor/models/glm4v.py +783 -0
  1143. vllm/model_executor/models/glmasr.py +1154 -0
  1144. vllm/model_executor/models/glmasr_utils.py +188 -0
  1145. vllm/model_executor/models/gpt2.py +385 -0
  1146. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1147. vllm/model_executor/models/gpt_j.py +346 -0
  1148. vllm/model_executor/models/gpt_neox.py +340 -0
  1149. vllm/model_executor/models/gpt_oss.py +745 -0
  1150. vllm/model_executor/models/granite.py +475 -0
  1151. vllm/model_executor/models/granite_speech.py +919 -0
  1152. vllm/model_executor/models/granitemoe.py +561 -0
  1153. vllm/model_executor/models/granitemoehybrid.py +703 -0
  1154. vllm/model_executor/models/granitemoeshared.py +328 -0
  1155. vllm/model_executor/models/gritlm.py +242 -0
  1156. vllm/model_executor/models/grok1.py +803 -0
  1157. vllm/model_executor/models/h2ovl.py +554 -0
  1158. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1159. vllm/model_executor/models/hunyuan_vision.py +1034 -0
  1160. vllm/model_executor/models/hyperclovax_vision.py +1163 -0
  1161. vllm/model_executor/models/idefics2_vision_model.py +427 -0
  1162. vllm/model_executor/models/idefics3.py +734 -0
  1163. vllm/model_executor/models/interfaces.py +1180 -0
  1164. vllm/model_executor/models/interfaces_base.py +252 -0
  1165. vllm/model_executor/models/intern_vit.py +454 -0
  1166. vllm/model_executor/models/internlm2.py +451 -0
  1167. vllm/model_executor/models/internlm2_ve.py +139 -0
  1168. vllm/model_executor/models/interns1.py +828 -0
  1169. vllm/model_executor/models/interns1_vit.py +433 -0
  1170. vllm/model_executor/models/internvl.py +1436 -0
  1171. vllm/model_executor/models/iquest_loopcoder.py +595 -0
  1172. vllm/model_executor/models/isaac.py +1503 -0
  1173. vllm/model_executor/models/jais.py +397 -0
  1174. vllm/model_executor/models/jais2.py +508 -0
  1175. vllm/model_executor/models/jamba.py +599 -0
  1176. vllm/model_executor/models/jina_vl.py +145 -0
  1177. vllm/model_executor/models/kanana_v.py +756 -0
  1178. vllm/model_executor/models/keye.py +1709 -0
  1179. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1180. vllm/model_executor/models/kimi_linear.py +659 -0
  1181. vllm/model_executor/models/kimi_vl.py +577 -0
  1182. vllm/model_executor/models/lfm2.py +515 -0
  1183. vllm/model_executor/models/lfm2_moe.py +746 -0
  1184. vllm/model_executor/models/lfm2_vl.py +732 -0
  1185. vllm/model_executor/models/lightonocr.py +197 -0
  1186. vllm/model_executor/models/llama.py +724 -0
  1187. vllm/model_executor/models/llama4.py +860 -0
  1188. vllm/model_executor/models/llama4_eagle.py +225 -0
  1189. vllm/model_executor/models/llama_eagle.py +213 -0
  1190. vllm/model_executor/models/llama_eagle3.py +375 -0
  1191. vllm/model_executor/models/llava.py +879 -0
  1192. vllm/model_executor/models/llava_next.py +583 -0
  1193. vllm/model_executor/models/llava_next_video.py +467 -0
  1194. vllm/model_executor/models/llava_onevision.py +922 -0
  1195. vllm/model_executor/models/longcat_flash.py +767 -0
  1196. vllm/model_executor/models/longcat_flash_mtp.py +348 -0
  1197. vllm/model_executor/models/mamba.py +276 -0
  1198. vllm/model_executor/models/mamba2.py +288 -0
  1199. vllm/model_executor/models/medusa.py +179 -0
  1200. vllm/model_executor/models/midashenglm.py +826 -0
  1201. vllm/model_executor/models/mimo.py +188 -0
  1202. vllm/model_executor/models/mimo_mtp.py +294 -0
  1203. vllm/model_executor/models/mimo_v2_flash.py +718 -0
  1204. vllm/model_executor/models/minicpm.py +660 -0
  1205. vllm/model_executor/models/minicpm3.py +233 -0
  1206. vllm/model_executor/models/minicpm_eagle.py +386 -0
  1207. vllm/model_executor/models/minicpmo.py +768 -0
  1208. vllm/model_executor/models/minicpmv.py +1742 -0
  1209. vllm/model_executor/models/minimax_m2.py +552 -0
  1210. vllm/model_executor/models/minimax_text_01.py +1008 -0
  1211. vllm/model_executor/models/minimax_vl_01.py +395 -0
  1212. vllm/model_executor/models/mistral3.py +638 -0
  1213. vllm/model_executor/models/mistral_large_3.py +63 -0
  1214. vllm/model_executor/models/mistral_large_3_eagle.py +137 -0
  1215. vllm/model_executor/models/mixtral.py +599 -0
  1216. vllm/model_executor/models/mllama4.py +1170 -0
  1217. vllm/model_executor/models/mlp_speculator.py +235 -0
  1218. vllm/model_executor/models/modernbert.py +458 -0
  1219. vllm/model_executor/models/module_mapping.py +74 -0
  1220. vllm/model_executor/models/molmo.py +1592 -0
  1221. vllm/model_executor/models/moonvit.py +601 -0
  1222. vllm/model_executor/models/mpt.py +335 -0
  1223. vllm/model_executor/models/nano_nemotron_vl.py +1725 -0
  1224. vllm/model_executor/models/nemotron.py +499 -0
  1225. vllm/model_executor/models/nemotron_h.py +902 -0
  1226. vllm/model_executor/models/nemotron_nas.py +474 -0
  1227. vllm/model_executor/models/nemotron_parse.py +958 -0
  1228. vllm/model_executor/models/nemotron_vl.py +651 -0
  1229. vllm/model_executor/models/nvlm_d.py +216 -0
  1230. vllm/model_executor/models/olmo.py +412 -0
  1231. vllm/model_executor/models/olmo2.py +454 -0
  1232. vllm/model_executor/models/olmoe.py +498 -0
  1233. vllm/model_executor/models/opencua.py +262 -0
  1234. vllm/model_executor/models/openpangu.py +1378 -0
  1235. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1236. vllm/model_executor/models/opt.py +426 -0
  1237. vllm/model_executor/models/orion.py +365 -0
  1238. vllm/model_executor/models/ouro.py +507 -0
  1239. vllm/model_executor/models/ovis.py +557 -0
  1240. vllm/model_executor/models/ovis2_5.py +661 -0
  1241. vllm/model_executor/models/paddleocr_vl.py +1261 -0
  1242. vllm/model_executor/models/paligemma.py +429 -0
  1243. vllm/model_executor/models/persimmon.py +373 -0
  1244. vllm/model_executor/models/phi.py +363 -0
  1245. vllm/model_executor/models/phi3.py +18 -0
  1246. vllm/model_executor/models/phi3v.py +729 -0
  1247. vllm/model_executor/models/phi4mm.py +1250 -0
  1248. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1249. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1250. vllm/model_executor/models/phimoe.py +671 -0
  1251. vllm/model_executor/models/pixtral.py +1437 -0
  1252. vllm/model_executor/models/plamo2.py +993 -0
  1253. vllm/model_executor/models/plamo3.py +437 -0
  1254. vllm/model_executor/models/qwen.py +377 -0
  1255. vllm/model_executor/models/qwen2.py +600 -0
  1256. vllm/model_executor/models/qwen2_5_omni_thinker.py +1200 -0
  1257. vllm/model_executor/models/qwen2_5_vl.py +1598 -0
  1258. vllm/model_executor/models/qwen2_audio.py +478 -0
  1259. vllm/model_executor/models/qwen2_moe.py +604 -0
  1260. vllm/model_executor/models/qwen2_rm.py +120 -0
  1261. vllm/model_executor/models/qwen2_vl.py +1588 -0
  1262. vllm/model_executor/models/qwen3.py +331 -0
  1263. vllm/model_executor/models/qwen3_moe.py +752 -0
  1264. vllm/model_executor/models/qwen3_next.py +1410 -0
  1265. vllm/model_executor/models/qwen3_next_mtp.py +293 -0
  1266. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1814 -0
  1267. vllm/model_executor/models/qwen3_vl.py +2120 -0
  1268. vllm/model_executor/models/qwen3_vl_moe.py +474 -0
  1269. vllm/model_executor/models/qwen_vl.py +821 -0
  1270. vllm/model_executor/models/radio.py +573 -0
  1271. vllm/model_executor/models/registry.py +1218 -0
  1272. vllm/model_executor/models/roberta.py +239 -0
  1273. vllm/model_executor/models/rvl.py +107 -0
  1274. vllm/model_executor/models/seed_oss.py +492 -0
  1275. vllm/model_executor/models/siglip.py +1259 -0
  1276. vllm/model_executor/models/siglip2.py +495 -0
  1277. vllm/model_executor/models/siglip2navit.py +660 -0
  1278. vllm/model_executor/models/skyworkr1v.py +951 -0
  1279. vllm/model_executor/models/smolvlm.py +38 -0
  1280. vllm/model_executor/models/solar.py +484 -0
  1281. vllm/model_executor/models/stablelm.py +354 -0
  1282. vllm/model_executor/models/starcoder2.py +365 -0
  1283. vllm/model_executor/models/step3_text.py +554 -0
  1284. vllm/model_executor/models/step3_vl.py +1147 -0
  1285. vllm/model_executor/models/swin.py +500 -0
  1286. vllm/model_executor/models/tarsier.py +624 -0
  1287. vllm/model_executor/models/telechat2.py +153 -0
  1288. vllm/model_executor/models/teleflm.py +78 -0
  1289. vllm/model_executor/models/terratorch.py +318 -0
  1290. vllm/model_executor/models/transformers/__init__.py +127 -0
  1291. vllm/model_executor/models/transformers/base.py +523 -0
  1292. vllm/model_executor/models/transformers/causal.py +65 -0
  1293. vllm/model_executor/models/transformers/legacy.py +90 -0
  1294. vllm/model_executor/models/transformers/moe.py +329 -0
  1295. vllm/model_executor/models/transformers/multimodal.py +441 -0
  1296. vllm/model_executor/models/transformers/pooling.py +102 -0
  1297. vllm/model_executor/models/transformers/utils.py +253 -0
  1298. vllm/model_executor/models/ultravox.py +786 -0
  1299. vllm/model_executor/models/utils.py +832 -0
  1300. vllm/model_executor/models/vision.py +546 -0
  1301. vllm/model_executor/models/voxtral.py +867 -0
  1302. vllm/model_executor/models/voxtral_streaming.py +304 -0
  1303. vllm/model_executor/models/whisper.py +993 -0
  1304. vllm/model_executor/models/whisper_utils.py +299 -0
  1305. vllm/model_executor/models/zamba2.py +986 -0
  1306. vllm/model_executor/parameter.py +642 -0
  1307. vllm/model_executor/utils.py +113 -0
  1308. vllm/model_executor/warmup/__init__.py +0 -0
  1309. vllm/model_executor/warmup/deep_gemm_warmup.py +371 -0
  1310. vllm/model_executor/warmup/kernel_warmup.py +97 -0
  1311. vllm/model_inspection.py +136 -0
  1312. vllm/multimodal/__init__.py +38 -0
  1313. vllm/multimodal/audio.py +287 -0
  1314. vllm/multimodal/base.py +60 -0
  1315. vllm/multimodal/cache.py +829 -0
  1316. vllm/multimodal/evs.py +294 -0
  1317. vllm/multimodal/hasher.py +123 -0
  1318. vllm/multimodal/image.py +155 -0
  1319. vllm/multimodal/inputs.py +1027 -0
  1320. vllm/multimodal/parse.py +674 -0
  1321. vllm/multimodal/processing.py +2469 -0
  1322. vllm/multimodal/profiling.py +351 -0
  1323. vllm/multimodal/registry.py +375 -0
  1324. vllm/multimodal/utils.py +550 -0
  1325. vllm/multimodal/video.py +512 -0
  1326. vllm/outputs.py +347 -0
  1327. vllm/platforms/__init__.py +277 -0
  1328. vllm/platforms/cpu.py +423 -0
  1329. vllm/platforms/cuda.py +618 -0
  1330. vllm/platforms/interface.py +707 -0
  1331. vllm/platforms/rocm.py +586 -0
  1332. vllm/platforms/tpu.py +20 -0
  1333. vllm/platforms/xpu.py +262 -0
  1334. vllm/plugins/__init__.py +81 -0
  1335. vllm/plugins/io_processors/__init__.py +68 -0
  1336. vllm/plugins/io_processors/interface.py +77 -0
  1337. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1338. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1339. vllm/pooling_params.py +229 -0
  1340. vllm/profiler/__init__.py +0 -0
  1341. vllm/profiler/layerwise_profile.py +392 -0
  1342. vllm/profiler/utils.py +151 -0
  1343. vllm/profiler/wrapper.py +241 -0
  1344. vllm/py.typed +2 -0
  1345. vllm/ray/__init__.py +0 -0
  1346. vllm/ray/lazy_utils.py +30 -0
  1347. vllm/ray/ray_env.py +79 -0
  1348. vllm/reasoning/__init__.py +96 -0
  1349. vllm/reasoning/abs_reasoning_parsers.py +318 -0
  1350. vllm/reasoning/basic_parsers.py +175 -0
  1351. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1352. vllm/reasoning/deepseek_v3_reasoning_parser.py +69 -0
  1353. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1354. vllm/reasoning/glm4_moe_reasoning_parser.py +13 -0
  1355. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1356. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1357. vllm/reasoning/holo2_reasoning_parser.py +89 -0
  1358. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1359. vllm/reasoning/identity_reasoning_parser.py +63 -0
  1360. vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
  1361. vllm/reasoning/mistral_reasoning_parser.py +154 -0
  1362. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1363. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1364. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1365. vllm/reasoning/step3_reasoning_parser.py +113 -0
  1366. vllm/sampling_params.py +629 -0
  1367. vllm/scalar_type.py +355 -0
  1368. vllm/scripts.py +17 -0
  1369. vllm/sequence.py +64 -0
  1370. vllm/tasks.py +13 -0
  1371. vllm/third_party/__init__.py +0 -0
  1372. vllm/third_party/pynvml.py +6140 -0
  1373. vllm/tokenizers/__init__.py +18 -0
  1374. vllm/tokenizers/deepseek_v32.py +187 -0
  1375. vllm/tokenizers/deepseek_v32_encoding.py +463 -0
  1376. vllm/tokenizers/detokenizer_utils.py +198 -0
  1377. vllm/tokenizers/grok2.py +443 -0
  1378. vllm/tokenizers/hf.py +119 -0
  1379. vllm/tokenizers/mistral.py +543 -0
  1380. vllm/tokenizers/protocol.py +123 -0
  1381. vllm/tokenizers/registry.py +238 -0
  1382. vllm/tool_parsers/__init__.py +158 -0
  1383. vllm/tool_parsers/abstract_tool_parser.py +274 -0
  1384. vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
  1385. vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
  1386. vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
  1387. vllm/tool_parsers/ernie45_tool_parser.py +210 -0
  1388. vllm/tool_parsers/functiongemma_tool_parser.py +321 -0
  1389. vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
  1390. vllm/tool_parsers/glm47_moe_tool_parser.py +23 -0
  1391. vllm/tool_parsers/glm4_moe_tool_parser.py +215 -0
  1392. vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  1393. vllm/tool_parsers/granite_tool_parser.py +253 -0
  1394. vllm/tool_parsers/hermes_tool_parser.py +495 -0
  1395. vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  1396. vllm/tool_parsers/internlm2_tool_parser.py +227 -0
  1397. vllm/tool_parsers/jamba_tool_parser.py +323 -0
  1398. vllm/tool_parsers/kimi_k2_tool_parser.py +598 -0
  1399. vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  1400. vllm/tool_parsers/llama_tool_parser.py +324 -0
  1401. vllm/tool_parsers/longcat_tool_parser.py +37 -0
  1402. vllm/tool_parsers/minimax_m2_tool_parser.py +776 -0
  1403. vllm/tool_parsers/minimax_tool_parser.py +849 -0
  1404. vllm/tool_parsers/mistral_tool_parser.py +612 -0
  1405. vllm/tool_parsers/olmo3_tool_parser.py +366 -0
  1406. vllm/tool_parsers/openai_tool_parser.py +111 -0
  1407. vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
  1408. vllm/tool_parsers/pythonic_tool_parser.py +332 -0
  1409. vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
  1410. vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  1411. vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
  1412. vllm/tool_parsers/step3_tool_parser.py +303 -0
  1413. vllm/tool_parsers/utils.py +229 -0
  1414. vllm/tool_parsers/xlam_tool_parser.py +556 -0
  1415. vllm/tracing.py +135 -0
  1416. vllm/transformers_utils/__init__.py +26 -0
  1417. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1418. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1419. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1420. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1421. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1422. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1423. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1424. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1425. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1426. vllm/transformers_utils/config.py +1169 -0
  1427. vllm/transformers_utils/config_parser_base.py +20 -0
  1428. vllm/transformers_utils/configs/__init__.py +106 -0
  1429. vllm/transformers_utils/configs/afmoe.py +87 -0
  1430. vllm/transformers_utils/configs/arctic.py +216 -0
  1431. vllm/transformers_utils/configs/bagel.py +53 -0
  1432. vllm/transformers_utils/configs/chatglm.py +75 -0
  1433. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1434. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1435. vllm/transformers_utils/configs/eagle.py +90 -0
  1436. vllm/transformers_utils/configs/falcon.py +89 -0
  1437. vllm/transformers_utils/configs/flex_olmo.py +82 -0
  1438. vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
  1439. vllm/transformers_utils/configs/isaac.py +100 -0
  1440. vllm/transformers_utils/configs/jais.py +243 -0
  1441. vllm/transformers_utils/configs/kimi_linear.py +148 -0
  1442. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1443. vllm/transformers_utils/configs/lfm2_moe.py +163 -0
  1444. vllm/transformers_utils/configs/medusa.py +65 -0
  1445. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1446. vllm/transformers_utils/configs/mistral.py +263 -0
  1447. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1448. vllm/transformers_utils/configs/moonvit.py +33 -0
  1449. vllm/transformers_utils/configs/nemotron.py +220 -0
  1450. vllm/transformers_utils/configs/nemotron_h.py +284 -0
  1451. vllm/transformers_utils/configs/olmo3.py +83 -0
  1452. vllm/transformers_utils/configs/ovis.py +182 -0
  1453. vllm/transformers_utils/configs/qwen3_next.py +277 -0
  1454. vllm/transformers_utils/configs/radio.py +98 -0
  1455. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1456. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1457. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1458. vllm/transformers_utils/configs/step3_vl.py +178 -0
  1459. vllm/transformers_utils/configs/tarsier2.py +24 -0
  1460. vllm/transformers_utils/configs/ultravox.py +120 -0
  1461. vllm/transformers_utils/dynamic_module.py +70 -0
  1462. vllm/transformers_utils/gguf_utils.py +280 -0
  1463. vllm/transformers_utils/model_arch_config_convertor.py +402 -0
  1464. vllm/transformers_utils/processor.py +424 -0
  1465. vllm/transformers_utils/processors/__init__.py +25 -0
  1466. vllm/transformers_utils/processors/bagel.py +78 -0
  1467. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1468. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1469. vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
  1470. vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
  1471. vllm/transformers_utils/processors/ovis.py +453 -0
  1472. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1473. vllm/transformers_utils/repo_utils.py +287 -0
  1474. vllm/transformers_utils/runai_utils.py +102 -0
  1475. vllm/transformers_utils/s3_utils.py +95 -0
  1476. vllm/transformers_utils/tokenizer.py +19 -0
  1477. vllm/transformers_utils/utils.py +112 -0
  1478. vllm/triton_utils/__init__.py +20 -0
  1479. vllm/triton_utils/importing.py +103 -0
  1480. vllm/usage/__init__.py +0 -0
  1481. vllm/usage/usage_lib.py +278 -0
  1482. vllm/utils/__init__.py +36 -0
  1483. vllm/utils/argparse_utils.py +491 -0
  1484. vllm/utils/async_utils.py +310 -0
  1485. vllm/utils/cache.py +214 -0
  1486. vllm/utils/collection_utils.py +112 -0
  1487. vllm/utils/counter.py +45 -0
  1488. vllm/utils/deep_gemm.py +424 -0
  1489. vllm/utils/flashinfer.py +602 -0
  1490. vllm/utils/func_utils.py +236 -0
  1491. vllm/utils/gc_utils.py +151 -0
  1492. vllm/utils/hashing.py +117 -0
  1493. vllm/utils/import_utils.py +438 -0
  1494. vllm/utils/jsontree.py +158 -0
  1495. vllm/utils/math_utils.py +32 -0
  1496. vllm/utils/mem_constants.py +13 -0
  1497. vllm/utils/mem_utils.py +285 -0
  1498. vllm/utils/nccl.py +64 -0
  1499. vllm/utils/network_utils.py +331 -0
  1500. vllm/utils/nvtx_pytorch_hooks.py +286 -0
  1501. vllm/utils/platform_utils.py +59 -0
  1502. vllm/utils/profiling.py +56 -0
  1503. vllm/utils/registry.py +51 -0
  1504. vllm/utils/serial_utils.py +214 -0
  1505. vllm/utils/system_utils.py +296 -0
  1506. vllm/utils/tensor_schema.py +255 -0
  1507. vllm/utils/torch_utils.py +781 -0
  1508. vllm/v1/__init__.py +0 -0
  1509. vllm/v1/attention/__init__.py +0 -0
  1510. vllm/v1/attention/backend.py +736 -0
  1511. vllm/v1/attention/backends/__init__.py +0 -0
  1512. vllm/v1/attention/backends/cpu_attn.py +501 -0
  1513. vllm/v1/attention/backends/fa_utils.py +126 -0
  1514. vllm/v1/attention/backends/flash_attn.py +1092 -0
  1515. vllm/v1/attention/backends/flash_attn_diffkv.py +277 -0
  1516. vllm/v1/attention/backends/flashinfer.py +1713 -0
  1517. vllm/v1/attention/backends/flex_attention.py +1024 -0
  1518. vllm/v1/attention/backends/gdn_attn.py +382 -0
  1519. vllm/v1/attention/backends/linear_attn.py +77 -0
  1520. vllm/v1/attention/backends/mamba1_attn.py +28 -0
  1521. vllm/v1/attention/backends/mamba2_attn.py +256 -0
  1522. vllm/v1/attention/backends/mamba_attn.py +313 -0
  1523. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1524. vllm/v1/attention/backends/mla/aiter_triton_mla.py +66 -0
  1525. vllm/v1/attention/backends/mla/common.py +2156 -0
  1526. vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
  1527. vllm/v1/attention/backends/mla/flashattn_mla.py +348 -0
  1528. vllm/v1/attention/backends/mla/flashinfer_mla.py +175 -0
  1529. vllm/v1/attention/backends/mla/flashmla.py +321 -0
  1530. vllm/v1/attention/backends/mla/flashmla_sparse.py +1021 -0
  1531. vllm/v1/attention/backends/mla/indexer.py +345 -0
  1532. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +284 -0
  1533. vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +321 -0
  1534. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1535. vllm/v1/attention/backends/registry.py +258 -0
  1536. vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
  1537. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
  1538. vllm/v1/attention/backends/rocm_attn.py +405 -0
  1539. vllm/v1/attention/backends/short_conv_attn.py +26 -0
  1540. vllm/v1/attention/backends/tree_attn.py +430 -0
  1541. vllm/v1/attention/backends/triton_attn.py +578 -0
  1542. vllm/v1/attention/backends/utils.py +978 -0
  1543. vllm/v1/attention/ops/__init__.py +0 -0
  1544. vllm/v1/attention/ops/chunked_prefill_paged_decode.py +459 -0
  1545. vllm/v1/attention/ops/common.py +469 -0
  1546. vllm/v1/attention/ops/flashmla.py +254 -0
  1547. vllm/v1/attention/ops/merge_attn_states.py +47 -0
  1548. vllm/v1/attention/ops/paged_attn.py +51 -0
  1549. vllm/v1/attention/ops/pallas_kv_cache_update.py +130 -0
  1550. vllm/v1/attention/ops/prefix_prefill.py +862 -0
  1551. vllm/v1/attention/ops/rocm_aiter_mla_sparse.py +210 -0
  1552. vllm/v1/attention/ops/triton_decode_attention.py +709 -0
  1553. vllm/v1/attention/ops/triton_merge_attn_states.py +116 -0
  1554. vllm/v1/attention/ops/triton_prefill_attention.py +272 -0
  1555. vllm/v1/attention/ops/triton_reshape_and_cache_flash.py +395 -0
  1556. vllm/v1/attention/ops/triton_unified_attention.py +1088 -0
  1557. vllm/v1/attention/ops/vit_attn_wrappers.py +185 -0
  1558. vllm/v1/attention/selector.py +145 -0
  1559. vllm/v1/core/__init__.py +0 -0
  1560. vllm/v1/core/block_pool.py +489 -0
  1561. vllm/v1/core/encoder_cache_manager.py +402 -0
  1562. vllm/v1/core/kv_cache_coordinator.py +560 -0
  1563. vllm/v1/core/kv_cache_manager.py +485 -0
  1564. vllm/v1/core/kv_cache_metrics.py +96 -0
  1565. vllm/v1/core/kv_cache_utils.py +1642 -0
  1566. vllm/v1/core/sched/__init__.py +0 -0
  1567. vllm/v1/core/sched/async_scheduler.py +66 -0
  1568. vllm/v1/core/sched/interface.py +205 -0
  1569. vllm/v1/core/sched/output.py +261 -0
  1570. vllm/v1/core/sched/request_queue.py +208 -0
  1571. vllm/v1/core/sched/scheduler.py +1936 -0
  1572. vllm/v1/core/sched/utils.py +64 -0
  1573. vllm/v1/core/single_type_kv_cache_manager.py +926 -0
  1574. vllm/v1/cudagraph_dispatcher.py +183 -0
  1575. vllm/v1/engine/__init__.py +224 -0
  1576. vllm/v1/engine/async_llm.py +874 -0
  1577. vllm/v1/engine/coordinator.py +396 -0
  1578. vllm/v1/engine/core.py +1614 -0
  1579. vllm/v1/engine/core_client.py +1422 -0
  1580. vllm/v1/engine/detokenizer.py +351 -0
  1581. vllm/v1/engine/exceptions.py +18 -0
  1582. vllm/v1/engine/input_processor.py +713 -0
  1583. vllm/v1/engine/llm_engine.py +415 -0
  1584. vllm/v1/engine/logprobs.py +245 -0
  1585. vllm/v1/engine/output_processor.py +715 -0
  1586. vllm/v1/engine/parallel_sampling.py +150 -0
  1587. vllm/v1/engine/utils.py +1086 -0
  1588. vllm/v1/executor/__init__.py +6 -0
  1589. vllm/v1/executor/abstract.py +352 -0
  1590. vllm/v1/executor/multiproc_executor.py +888 -0
  1591. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1592. vllm/v1/executor/ray_executor.py +623 -0
  1593. vllm/v1/executor/ray_utils.py +468 -0
  1594. vllm/v1/executor/uniproc_executor.py +186 -0
  1595. vllm/v1/kv_cache_interface.py +485 -0
  1596. vllm/v1/kv_offload/__init__.py +0 -0
  1597. vllm/v1/kv_offload/abstract.py +161 -0
  1598. vllm/v1/kv_offload/arc_manager.py +237 -0
  1599. vllm/v1/kv_offload/backend.py +97 -0
  1600. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1601. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1602. vllm/v1/kv_offload/cpu.py +109 -0
  1603. vllm/v1/kv_offload/factory.py +58 -0
  1604. vllm/v1/kv_offload/lru_manager.py +139 -0
  1605. vllm/v1/kv_offload/mediums.py +39 -0
  1606. vllm/v1/kv_offload/spec.py +70 -0
  1607. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1608. vllm/v1/kv_offload/worker/cpu_gpu.py +287 -0
  1609. vllm/v1/kv_offload/worker/worker.py +163 -0
  1610. vllm/v1/metrics/__init__.py +0 -0
  1611. vllm/v1/metrics/loggers.py +1320 -0
  1612. vllm/v1/metrics/perf.py +1244 -0
  1613. vllm/v1/metrics/prometheus.py +82 -0
  1614. vllm/v1/metrics/ray_wrappers.py +194 -0
  1615. vllm/v1/metrics/reader.py +257 -0
  1616. vllm/v1/metrics/stats.py +440 -0
  1617. vllm/v1/outputs.py +242 -0
  1618. vllm/v1/pool/__init__.py +0 -0
  1619. vllm/v1/pool/metadata.py +124 -0
  1620. vllm/v1/request.py +281 -0
  1621. vllm/v1/sample/__init__.py +0 -0
  1622. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1623. vllm/v1/sample/logits_processor/builtin.py +278 -0
  1624. vllm/v1/sample/logits_processor/interface.py +106 -0
  1625. vllm/v1/sample/logits_processor/state.py +165 -0
  1626. vllm/v1/sample/metadata.py +44 -0
  1627. vllm/v1/sample/ops/__init__.py +0 -0
  1628. vllm/v1/sample/ops/bad_words.py +57 -0
  1629. vllm/v1/sample/ops/logprobs.py +25 -0
  1630. vllm/v1/sample/ops/penalties.py +57 -0
  1631. vllm/v1/sample/ops/topk_topp_sampler.py +388 -0
  1632. vllm/v1/sample/rejection_sampler.py +822 -0
  1633. vllm/v1/sample/sampler.py +319 -0
  1634. vllm/v1/sample/tpu/__init__.py +0 -0
  1635. vllm/v1/sample/tpu/metadata.py +120 -0
  1636. vllm/v1/sample/tpu/sampler.py +215 -0
  1637. vllm/v1/serial_utils.py +514 -0
  1638. vllm/v1/spec_decode/__init__.py +0 -0
  1639. vllm/v1/spec_decode/eagle.py +1346 -0
  1640. vllm/v1/spec_decode/medusa.py +73 -0
  1641. vllm/v1/spec_decode/metadata.py +66 -0
  1642. vllm/v1/spec_decode/metrics.py +225 -0
  1643. vllm/v1/spec_decode/ngram_proposer.py +281 -0
  1644. vllm/v1/spec_decode/suffix_decoding.py +95 -0
  1645. vllm/v1/spec_decode/utils.py +109 -0
  1646. vllm/v1/structured_output/__init__.py +337 -0
  1647. vllm/v1/structured_output/backend_guidance.py +291 -0
  1648. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1649. vllm/v1/structured_output/backend_outlines.py +324 -0
  1650. vllm/v1/structured_output/backend_types.py +136 -0
  1651. vllm/v1/structured_output/backend_xgrammar.py +378 -0
  1652. vllm/v1/structured_output/request.py +91 -0
  1653. vllm/v1/structured_output/utils.py +457 -0
  1654. vllm/v1/utils.py +466 -0
  1655. vllm/v1/worker/__init__.py +0 -0
  1656. vllm/v1/worker/block_table.py +343 -0
  1657. vllm/v1/worker/cp_utils.py +42 -0
  1658. vllm/v1/worker/cpu_model_runner.py +122 -0
  1659. vllm/v1/worker/cpu_worker.py +192 -0
  1660. vllm/v1/worker/dp_utils.py +240 -0
  1661. vllm/v1/worker/ec_connector_model_runner_mixin.py +85 -0
  1662. vllm/v1/worker/gpu/README.md +4 -0
  1663. vllm/v1/worker/gpu/__init__.py +0 -0
  1664. vllm/v1/worker/gpu/async_utils.py +98 -0
  1665. vllm/v1/worker/gpu/attn_utils.py +183 -0
  1666. vllm/v1/worker/gpu/block_table.py +222 -0
  1667. vllm/v1/worker/gpu/buffer_utils.py +224 -0
  1668. vllm/v1/worker/gpu/cudagraph_utils.py +264 -0
  1669. vllm/v1/worker/gpu/dp_utils.py +31 -0
  1670. vllm/v1/worker/gpu/input_batch.py +526 -0
  1671. vllm/v1/worker/gpu/metrics/__init__.py +0 -0
  1672. vllm/v1/worker/gpu/metrics/logits.py +42 -0
  1673. vllm/v1/worker/gpu/mm/__init__.py +0 -0
  1674. vllm/v1/worker/gpu/mm/mrope_utils.py +127 -0
  1675. vllm/v1/worker/gpu/model_runner.py +1005 -0
  1676. vllm/v1/worker/gpu/sample/__init__.py +0 -0
  1677. vllm/v1/worker/gpu/sample/gumbel.py +106 -0
  1678. vllm/v1/worker/gpu/sample/logit_bias.py +270 -0
  1679. vllm/v1/worker/gpu/sample/logprob.py +167 -0
  1680. vllm/v1/worker/gpu/sample/metadata.py +79 -0
  1681. vllm/v1/worker/gpu/sample/min_p.py +58 -0
  1682. vllm/v1/worker/gpu/sample/output.py +14 -0
  1683. vllm/v1/worker/gpu/sample/penalties.py +155 -0
  1684. vllm/v1/worker/gpu/sample/sampler.py +88 -0
  1685. vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
  1686. vllm/v1/worker/gpu/spec_decode/eagle.py +566 -0
  1687. vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
  1688. vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
  1689. vllm/v1/worker/gpu/states.py +282 -0
  1690. vllm/v1/worker/gpu/structured_outputs.py +100 -0
  1691. vllm/v1/worker/gpu_input_batch.py +1030 -0
  1692. vllm/v1/worker/gpu_model_runner.py +5761 -0
  1693. vllm/v1/worker/gpu_ubatch_wrapper.py +475 -0
  1694. vllm/v1/worker/gpu_worker.py +968 -0
  1695. vllm/v1/worker/kv_connector_model_runner_mixin.py +300 -0
  1696. vllm/v1/worker/lora_model_runner_mixin.py +225 -0
  1697. vllm/v1/worker/tpu_input_batch.py +574 -0
  1698. vllm/v1/worker/tpu_worker.py +18 -0
  1699. vllm/v1/worker/ubatch_utils.py +112 -0
  1700. vllm/v1/worker/ubatching.py +242 -0
  1701. vllm/v1/worker/utils.py +400 -0
  1702. vllm/v1/worker/worker_base.py +372 -0
  1703. vllm/v1/worker/workspace.py +253 -0
  1704. vllm/v1/worker/xpu_model_runner.py +48 -0
  1705. vllm/v1/worker/xpu_worker.py +174 -0
  1706. vllm/version.py +39 -0
  1707. vllm/vllm_flash_attn/.gitkeep +0 -0
  1708. vllm_cpu_avx512bf16-0.14.0.dist-info/METADATA +348 -0
  1709. vllm_cpu_avx512bf16-0.14.0.dist-info/RECORD +1712 -0
  1710. vllm_cpu_avx512bf16-0.14.0.dist-info/WHEEL +5 -0
  1711. vllm_cpu_avx512bf16-0.14.0.dist-info/entry_points.txt +5 -0
  1712. vllm_cpu_avx512bf16-0.14.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2156 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """
4
+ # MLA Common Components
5
+
6
+ This file implements common components for MLA implementations.
7
+
8
+ First we define:
9
+
10
+ Sq as Q sequence length
11
+ Skv as KV sequence length
12
+
13
+ MLA has two possible ways of computing, a data-movement friendly approach and a
14
+ compute friendly approach, we generally want to use the compute friendly
15
+ approach for "prefill" (i.e. the ratio Sq / Skv is "small", is near 1)
16
+ and the data-movement friendly approach for "decode" (i.e. the ratio
17
+ Sq / Skv is "large").
18
+
19
+ NOTE what we deem small and large is currently determined by if its labelled
20
+ prefill or decode by the scheduler, but this is something we should probably
21
+ tune.
22
+
23
+ Main reference: DeepseekV2 paper, and FlashInfer Implementation
24
+ (https://arxiv.org/abs/2405.04434 and https://github.com/flashinfer-ai/flashinfer/pull/551).
25
+
26
+ Deepseek's MLA attention works the following way:
27
+ * Use a single latent vector to represent the per-token entry of the KV cache.
28
+ * For decode (i.e. the memory friendly approach) the attention "simulates" a
29
+ multi-head attention, while the compute is similar to multi-query attention.
30
+
31
+ Below is example of both paths assuming batchsize = 1
32
+
33
+ ## More Extent Definitions:
34
+
35
+ C Context length, `Skv - Sq`
36
+ H hidden size
37
+ N number of attention heads
38
+ Lq latent dimension for Q 1536 in DSV3
39
+ Lkv latent dimension for K/V 512 in DSV3
40
+ P nope dimension, no rope. 128 in DSV3
41
+ R rope dimension, goes through rope. 64 in DSV3
42
+ V V head dim. 128 in DSV3
43
+
44
+ ## Vector/Matrix Definitions
45
+
46
+ h_t hidden states (input to attention) shape [Sq, H]
47
+ q_c latent/compressed Q shape [Sq, Lq]
48
+ q_nope uncompressed Q (no-rope) shape [Sq, N, P]
49
+ q_pe uncompressed Q (rope) shape [Sq, N, R]
50
+ kv_c latent/compressed KV shape [Skv, Lkv]
51
+ k_pe decoupled k position embeddings shape [Skv, R]
52
+ new_kv_c new kv_c from current iter shape [Sq, Lkv]
53
+ new_k_pe new k_pe from current iter shape [Sq, R]
54
+ cache_kv_c cached k_c from previous iters shape [C, Lkv]
55
+ cache_k_pe cached k_pe from previous iters shape [C, R]
56
+ W_DQ project h_t to q_c shape [H, Lq]
57
+ W_UQ project q_c to q_nope shape [Lq, N * P]
58
+ W_QR project q_c to q_pe shape [Lq, N * R]
59
+ W_DKV project h_t to kv_c shape [H, Lkv]
60
+ W_UK project kv_c to k_nope shape [Lkv, N, P]
61
+ W_KR project h_t to k_pe shape [H, R]
62
+ W_UV project kv_c to v shape [Lkv, N, V]
63
+ W_O project v to h_t shape [N * V, H]
64
+
65
+
66
+ ## Compute Friendly Approach (i.e. "_forward_prefill"):
67
+
68
+ q_c = h_t @ W_DQ
69
+ q_nope = (q_c @ W_UQ).view(Sq, N, P)
70
+ q_pe = RoPE(q_c @ W_QR).view(Sq, N, R)
71
+ new_kv_c = h_t @ W_DKV
72
+ new_k_pe = RoPE(h_t @ W_KR)
73
+ kv_c = torch.cat([new_kv_c, cache_kv_c], dim=0)
74
+ k_pe = torch.cat([new_k_pe, cache_k_pe], dim=0)
75
+ k_nope = (kv_c @ W_UK.view(Lkv, N * P)).view(Skv, N, P)
76
+ v = (kv_c @ W_UV.view(Lkv, N * V)).view(Skv, N, V)
77
+
78
+ // MHA with QK headdim = P + R
79
+ // V headdim = V
80
+ // spda_o shape [Sq, N, V]
81
+ spda_o = scaled_dot_product_attention(
82
+ torch.cat([q_nope, q_pe], dim=-1),
83
+ torch.cat([k_nope, k_pe.unsqueeze(1).expand(-1, N, -1)], dim=-1),
84
+ v
85
+ )
86
+ return spda_o @ W_O
87
+
88
+ NOTE: in the actual code,
89
+ `kv_b_proj` is [W_UK; W_UV] concatenated per head
90
+ `q_b_proj` is [W_UQ; W_QR] concatenated per head
91
+ `out_proj` is W_O
92
+
93
+
94
+ ## Data-Movement Friendly Approach (i.e. "_forward_decode"):
95
+
96
+ Runtime
97
+ q_c = h_t @ W_DQ
98
+ q_nope = (q_c @ W_UQ).view(-1, N, P)
99
+ ql_nope = einsum("snh,lnh->snl", q, W_UK)
100
+ q_pe = RoPE(q_c @ W_QR).view(Sq, N, R)
101
+ new_kv_c = h_t @ W_DKV
102
+ new_k_pe = RoPE(h_t @ W_KR)
103
+ kv_c = torch.cat([new_kv_c, cache_kv_c], dim=0)
104
+ k_pe = torch.cat([new_k_pe, cache_k_pe], dim=0)
105
+
106
+ // MQA with QK headdim = Lkv + R
107
+ // V headdim = Lkv
108
+ // spda_o shape [Sq, N, Lkv]
109
+ // NOTE: this is less compute-friendly since Lkv > P
110
+ // but is more data-movement friendly since its MQA vs MHA
111
+ spda_o = scaled_dot_product_attention(
112
+ torch.cat([ql_nope, q_pe], dim=-1),
113
+ torch.cat([kv_c, k_pe], dim=-1),
114
+ kv_c
115
+ )
116
+
117
+ o = einsum("snl,lnv->snv", spda_o.reshape(-1, N, Lkv), W_UV)
118
+ return o.view(-1, N * V) @ self.num_heads @ W_O
119
+
120
+
121
+ ## Chunked Prefill
122
+
123
+ For chunked prefill we want to use the compute friendly algorithm. We are
124
+ assuming sufficiently large Sq / Skv ratio, in the future may want to switch to
125
+ the data-movement friendly approach if the chunk (i.e. `Sq`) is small.
126
+
127
+ However, the compute-friendly approach can potentially run out of memory if Skv
128
+ is large due to: `k_nope = (kv_c @ W_UK).view(Skv, N, P)`
129
+
130
+ To mitigate this, we chunk the computation of attention with respect to the
131
+ current context (i.e. `cache_kv_c` and `cache_k_pe`) so that we can used a
132
+ fixed workspace size.
133
+
134
+ The chunked prefill approach is as follows:
135
+
136
+ MCC Max chunk of context to process per iter, computed dynamically,
137
+ used to bound the memory usage
138
+
139
+ q_c = h_t @ W_DQ
140
+ q_nope = (q_c @ W_UQ).view(Sq, N, P)
141
+ q_pe = RoPE(q_c @ W_QR).view(Sq, N, R)
142
+ new_kv_c = h_t @ W_DKV
143
+ new_k_pe = RoPE(h_t @ W_KR)
144
+ new_k_nope = (new_kv_c @ W_UK.view(Lkv, N * P)).view(Sq, N, P)
145
+ new_v = (new_kv_c @ W_UV.view(Lkv, N * V)).view(Sq, N, V)
146
+
147
+ // MHA between queries and new KV
148
+ // with QK headdim = P + R
149
+ // V headdim = V
150
+ // curr_o shape [Sq, N, V]
151
+ // curr_lse shape [N, Sq], this is just order FA returns
152
+ curr_o, curr_lse = scaled_dot_product_attention(
153
+ torch.cat([q_nope, q_pe], dim=-1),
154
+ torch.cat([new_k_nope, new_k_pe.unsqueeze(1).expand(-1, N, -1)], dim=-1),
155
+ new_v,
156
+ casual=True,
157
+ return_softmax_lse=True
158
+ )
159
+
160
+ // Compute attention with the already existing context
161
+ for chunk_idx in range(cdiv(C, MCC)):
162
+ chunk_start = chunk_idx * MCC
163
+ chunk_end = min(chunk_start + MCC, C)
164
+ Sc = chunk_end - chunk_start
165
+ cache_kv_c_chunk = cache_kv_c[chunk_start:chunk_end]
166
+ cache_k_pe_chunk = cache_k_pe[chunk_start:chunk_end]
167
+ cache_k_nope_chunk = (cache_kv_c_chunk @ W_UK).view(-1, N, P)
168
+ cache_v_chunk = (cache_kv_c_chunk @ W_UV).view(-1, N, V)
169
+
170
+ chunk_o, chunk_lse = scaled_dot_product_attention(
171
+ torch.cat([q_nope, q_pe], dim=-1),
172
+ torch.cat([cache_k_nope_chunk,
173
+ cache_k_pe_chunk.unsqueeze(1).expand(-1, N, -1)],
174
+ dim=-1),
175
+ cache_v_chunk,
176
+ casual=False,
177
+ return_softmax_lse=True
178
+ )
179
+
180
+ curr_o, curr_lse = merge_attn_states(
181
+ suffix_output=curr_o,
182
+ suffix_lse=curr_lse,
183
+ prefix_output=chunk_o,
184
+ prefix_lse=chunk_lse,
185
+ )
186
+
187
+ return curr_o @ W_O
188
+ """
189
+
190
+ import functools
191
+ from abc import abstractmethod
192
+ from dataclasses import dataclass, field
193
+ from enum import Enum
194
+ from typing import ClassVar, Generic, TypeVar
195
+
196
+ import torch
197
+ from tqdm import tqdm
198
+
199
+ from vllm import _custom_ops as ops
200
+ from vllm import envs
201
+ from vllm._aiter_ops import rocm_aiter_ops
202
+ from vllm.config import ModelConfig, VllmConfig, get_current_vllm_config
203
+ from vllm.distributed.parallel_state import get_dcp_group, is_global_first_rank
204
+ from vllm.logger import init_logger
205
+ from vllm.model_executor.layers.batch_invariant import (
206
+ vllm_is_batch_invariant,
207
+ )
208
+ from vllm.model_executor.layers.linear import (
209
+ ColumnParallelLinear,
210
+ LinearBase,
211
+ UnquantizedLinearMethod,
212
+ )
213
+ from vllm.platforms import current_platform
214
+ from vllm.utils.flashinfer import has_nvidia_artifactory
215
+ from vllm.utils.math_utils import cdiv, round_down
216
+ from vllm.v1.attention.backend import (
217
+ AttentionBackend,
218
+ AttentionLayer,
219
+ AttentionMetadata,
220
+ AttentionMetadataBuilder,
221
+ CommonAttentionMetadata,
222
+ MLAAttentionImpl,
223
+ )
224
+ from vllm.v1.attention.backends.fa_utils import get_flash_attn_version
225
+ from vllm.v1.attention.backends.utils import (
226
+ get_dcp_local_seq_lens,
227
+ get_per_layer_parameters,
228
+ infer_global_hyperparameters,
229
+ split_decodes_and_prefills,
230
+ )
231
+ from vllm.v1.attention.ops.common import cp_lse_ag_out_rs
232
+ from vllm.v1.attention.ops.merge_attn_states import merge_attn_states
233
+ from vllm.v1.kv_cache_interface import AttentionSpec
234
+
235
+
236
+ class QueryLenSupport(Enum):
237
+ """Defines the level of query length support for an attention backend's
238
+ decode pipeline.
239
+
240
+ - SINGLE_ONLY: Decode pipeline only supports single-token queries
241
+ (query_len=1)
242
+ - UNIFORM: Decode pipeline supports uniform multi-token queries
243
+ (all requests must have same query_len > 1)
244
+ - VARLEN: Decode pipeline supports variable-length queries
245
+ (mixed query lengths in same batch)
246
+ """
247
+
248
+ SINGLE_ONLY = "single_only"
249
+ UNIFORM = "uniform"
250
+ VARLEN = "varlen"
251
+
252
+
253
+ try:
254
+ from vllm.vllm_flash_attn import ( # type: ignore[attr-defined]
255
+ flash_attn_varlen_func,
256
+ )
257
+
258
+ is_vllm_fa = True
259
+ except ImportError:
260
+ # For rocm use upstream flash attention
261
+ if current_platform.is_rocm():
262
+ from flash_attn import flash_attn_varlen_func # type: ignore[no-redef]
263
+ is_vllm_fa = False
264
+
265
+ try:
266
+ from flashinfer import BatchPrefillWithRaggedKVCacheWrapper
267
+ from flashinfer.prefill import cudnn_batch_prefill_with_kv_cache # noqa: F401
268
+
269
+ flashinfer_available = True
270
+ except ImportError:
271
+ BatchPrefillWithRaggedKVCacheWrapper = object
272
+
273
+ flashinfer_available = False
274
+
275
+
276
+ def dynamic_per_batched_tensor_quant(
277
+ x: torch.Tensor, dtype: torch.dtype = torch.float8_e4m3fn
278
+ ):
279
+ DTYPE_MAX = torch.finfo(dtype).max
280
+ min_val, max_val = x.aminmax()
281
+ amax = torch.maximum(min_val.abs(), max_val.abs()).clamp(min=1e-10)
282
+ scale = DTYPE_MAX / amax
283
+ x_scl_sat = (x * scale).clamp(min=-DTYPE_MAX, max=DTYPE_MAX)
284
+ return x_scl_sat.to(dtype).contiguous(), scale.float().reciprocal()
285
+
286
+
287
+ logger = init_logger(__name__)
288
+
289
+ CUDNN_WORKSPACE_SIZE = 12800
290
+
291
+
292
+ class MLACommonBackend(AttentionBackend):
293
+ accept_output_buffer: bool = True
294
+
295
+ @staticmethod
296
+ def get_name() -> str:
297
+ return "TRITON_MLA"
298
+
299
+ @staticmethod
300
+ def get_builder_cls() -> type["MLACommonMetadataBuilder"]:
301
+ return MLACommonMetadataBuilder
302
+
303
+ @staticmethod
304
+ def get_kv_cache_shape(
305
+ num_blocks: int,
306
+ block_size: int,
307
+ num_kv_heads: int, # assumed to be 1 for MLA
308
+ head_size: int,
309
+ cache_dtype_str: str = "auto",
310
+ ) -> tuple[int, ...]:
311
+ return (num_blocks, block_size, head_size)
312
+
313
+ @staticmethod
314
+ def get_kv_cache_stride_order(
315
+ include_num_layers_dimension: bool = False,
316
+ ) -> tuple[int, ...]:
317
+ # `stride_order` indicates the permutation that gets
318
+ # us from `get_kv_cache_shape` to the actual memory layout we want.
319
+ # (num_blocks, num_layers, block_size, head_size)
320
+ return (1, 0, 2, 3) if include_num_layers_dimension else (0, 1, 2)
321
+
322
+ @classmethod
323
+ def get_supported_head_sizes(cls) -> list[int]:
324
+ return [576]
325
+
326
+ @classmethod
327
+ def is_mla(cls) -> bool:
328
+ return True
329
+
330
+
331
+ @dataclass
332
+ class MLACommonPrefillMetadata:
333
+ """Prefill Specific Metadata"""
334
+
335
+ @dataclass
336
+ class ChunkedContextMetadata:
337
+ # New for MLA (compared to FlashAttention)
338
+ # For handling chunked prefill
339
+ cu_seq_lens: torch.Tensor
340
+ starts: torch.Tensor
341
+ seq_tot: list[int]
342
+ max_seq_lens: list[int]
343
+ seq_lens: torch.Tensor
344
+ workspace: torch.Tensor
345
+ token_to_seq: torch.Tensor
346
+ chunk_total_token: list[int]
347
+
348
+ # for mla DCP
349
+ padded_local_chunk_seq_lens: list[list[int]] | None = None
350
+ local_context_lens_allranks: list[list[int]] | None = None
351
+ padded_local_cu_seq_lens: torch.Tensor | None = None
352
+ cu_seq_lens_lst: list[list[int]] | None = None
353
+ chunk_size: int | None = None
354
+
355
+ block_table: torch.Tensor
356
+ query_start_loc: torch.Tensor
357
+ max_query_len: int
358
+ chunked_context: ChunkedContextMetadata | None = None
359
+ query_seq_lens: torch.Tensor | None = None
360
+ workspace_buffer: torch.Tensor | None = None
361
+ q_data_type: torch.dtype | None = None
362
+
363
+
364
+ @dataclass
365
+ class FlashInferPrefillMetadata(MLACommonPrefillMetadata):
366
+ prefill_main: BatchPrefillWithRaggedKVCacheWrapper | None = None
367
+ prefill_chunks: list[BatchPrefillWithRaggedKVCacheWrapper] = field(
368
+ default_factory=list
369
+ )
370
+
371
+
372
+ @dataclass
373
+ class CudnnPrefillMetadata(MLACommonPrefillMetadata):
374
+ class ChunkedContextMetadata(MLACommonPrefillMetadata.ChunkedContextMetadata):
375
+ seq_lens: torch.Tensor
376
+
377
+ cudnn_workspace: torch.Tensor | None = None
378
+
379
+
380
+ @dataclass
381
+ class MLACommonDecodeMetadata:
382
+ block_table: torch.Tensor
383
+ seq_lens: torch.Tensor
384
+ dcp_tot_seq_lens: torch.Tensor | None
385
+
386
+
387
+ D = TypeVar("D", bound=MLACommonDecodeMetadata)
388
+
389
+
390
+ @dataclass
391
+ class MLACommonMetadata(AttentionMetadata, Generic[D]):
392
+ """Metadata for MLACommon.
393
+
394
+ NOTE: Please read the comment at the top of the file before trying to
395
+ understand this class
396
+ """
397
+
398
+ # NOTE(sang): Definition of context_len, query_len, and seq_len.
399
+ # |---------- N-1 iteration --------|
400
+ # |---------------- N iteration ---------------------|
401
+ # |- tokenA -|......................|-- newTokens ---|
402
+ # |---------- context_len ----------|
403
+ # |-------------------- seq_len ---------------------|
404
+ # |-- query_len ---|
405
+
406
+ num_reqs: int
407
+ max_query_len: int
408
+ max_seq_len: int
409
+
410
+ num_actual_tokens: int # Number of tokens excluding padding.
411
+ query_start_loc: torch.Tensor
412
+ slot_mapping: torch.Tensor
413
+
414
+ # New for MLA (compared to FlashAttention)
415
+ # For handling prefill decode split
416
+ num_decodes: int
417
+ num_decode_tokens: int
418
+ num_prefills: int
419
+
420
+ # The dimension of the attention heads
421
+ head_dim: int | None = None
422
+
423
+ decode: D | None = None
424
+ prefill: (
425
+ MLACommonPrefillMetadata
426
+ | FlashInferPrefillMetadata
427
+ | CudnnPrefillMetadata
428
+ | None
429
+ ) = None
430
+
431
+ def __post_init__(self):
432
+ if self.head_dim is not None and not MLACommonBackend.supports_head_size(
433
+ self.head_dim
434
+ ):
435
+ raise ValueError(f"Head dimension {self.head_dim} is not supported by MLA.")
436
+
437
+
438
+ M = TypeVar("M", bound=MLACommonMetadata)
439
+ A = TypeVar("A", bound=AttentionMetadata)
440
+
441
+
442
+ def use_flashinfer_prefill() -> bool:
443
+ # For blackwell default to flashinfer prefill if it's available since
444
+ # it is faster than FA2.
445
+ from vllm.config import get_current_vllm_config
446
+
447
+ vllm_config = get_current_vllm_config()
448
+ return (
449
+ not vllm_config.attention_config.disable_flashinfer_prefill
450
+ and flashinfer_available
451
+ and not vllm_config.attention_config.use_cudnn_prefill
452
+ and not vllm_config.attention_config.use_trtllm_ragged_deepseek_prefill
453
+ and current_platform.is_device_capability_family(100)
454
+ )
455
+
456
+
457
+ def use_cudnn_prefill() -> bool:
458
+ from vllm.config import get_current_vllm_config
459
+
460
+ vllm_config = get_current_vllm_config()
461
+ return (
462
+ flashinfer_available
463
+ and vllm_config.attention_config.use_cudnn_prefill
464
+ and current_platform.is_device_capability_family(100)
465
+ and has_nvidia_artifactory()
466
+ )
467
+
468
+
469
+ def use_trtllm_ragged_deepseek_prefill() -> bool:
470
+ """Check if TRT-LLM ragged DeepSeek prefill should be used."""
471
+ from vllm.config import get_current_vllm_config
472
+
473
+ vllm_config = get_current_vllm_config()
474
+ return (
475
+ flashinfer_available
476
+ and vllm_config.attention_config.use_trtllm_ragged_deepseek_prefill
477
+ and current_platform.is_device_capability_family(100)
478
+ )
479
+
480
+
481
+ @dataclass
482
+ class MLADims:
483
+ q_lora_rank: int | None
484
+ kv_lora_rank: int
485
+ qk_nope_head_dim: int
486
+ qk_rope_head_dim: int
487
+ v_head_dim: int
488
+
489
+
490
+ def get_mla_dims(model_config: ModelConfig) -> MLADims:
491
+ hf_text_config = model_config.hf_text_config
492
+
493
+ return MLADims(
494
+ q_lora_rank=getattr(hf_text_config, "q_lora_rank", None),
495
+ kv_lora_rank=hf_text_config.kv_lora_rank,
496
+ qk_nope_head_dim=hf_text_config.qk_nope_head_dim,
497
+ qk_rope_head_dim=hf_text_config.qk_rope_head_dim,
498
+ v_head_dim=hf_text_config.v_head_dim,
499
+ )
500
+
501
+
502
+ class MLACommonMetadataBuilder(AttentionMetadataBuilder[M]):
503
+ """
504
+ NOTE: Please read the comment at the top of the file before trying to
505
+ understand this class
506
+ """
507
+
508
+ # Defines the level of query length support for this backend.
509
+ # - SINGLE_ONLY: Only single-token queries (no spec decode support)
510
+ # - UNIFORM: Supports uniform multi-token queries (spec decode with uniform lengths)
511
+ # - VARLEN: Supports variable-length queries (spec decode with mixed lengths)
512
+ # If set to UNIFORM or VARLEN, this will increase `reorder_batch_threshold` when
513
+ # speculative decoding is enabled.
514
+ query_len_support: ClassVar[QueryLenSupport] = QueryLenSupport.SINGLE_ONLY
515
+
516
+ # The threshold for reordering the batch into decode and prefill requests.
517
+ # If > 1, the batch will be reordered such that requests with
518
+ # query length <= threshold are classified as decode requests.
519
+ # Use `query_len_support` (above) to set this automatically
520
+ # when speculative decoding is enabled.
521
+ reorder_batch_threshold: int = 1
522
+
523
+ @staticmethod
524
+ def determine_chunked_prefill_workspace_size(vllm_config: VllmConfig) -> int:
525
+ scheduler_config = vllm_config.scheduler_config
526
+ cache_config = vllm_config.cache_config
527
+ model_config = vllm_config.model_config
528
+
529
+ chunked_prefill_workspace_size = min(
530
+ # Try for 8 full length request or at least 4 pages per-request
531
+ max(
532
+ 8 * model_config.max_model_len,
533
+ 4 * scheduler_config.max_num_seqs * cache_config.block_size,
534
+ ),
535
+ # For long-context models try not to over-allocate limiting
536
+ # kv-cache space, limiting it to 64k tokens,
537
+ # which would result in the workspace being:
538
+ # 2*(576)*(64*1024) = 144mb
539
+ # (assuming 576 MLA head dim, and fp16)
540
+ # which would result in up-projected context being
541
+ # 2*(192*128)*(64*1024) = 3gb
542
+ # (assuming 192 QK head dim, 128 heads, and fp16)
543
+ 64 * 1024,
544
+ )
545
+
546
+ # Enforce that we enough for at least 1 page per request
547
+ chunked_prefill_workspace_size = max(
548
+ chunked_prefill_workspace_size,
549
+ scheduler_config.max_num_seqs * cache_config.block_size,
550
+ )
551
+
552
+ return chunked_prefill_workspace_size
553
+
554
+ def __init__(
555
+ self,
556
+ kv_cache_spec: AttentionSpec,
557
+ layer_names: list[str],
558
+ vllm_config: VllmConfig,
559
+ device: torch.device,
560
+ metadata_cls: type[M] | None = None,
561
+ supports_dcp_with_varlen: bool = False,
562
+ ):
563
+ self.metadata_cls = (
564
+ metadata_cls if metadata_cls is not None else MLACommonMetadata
565
+ )
566
+ self.kv_cache_spec = kv_cache_spec
567
+ scheduler_config = vllm_config.scheduler_config
568
+ self.model_config = vllm_config.model_config
569
+ parallel_config = vllm_config.parallel_config
570
+ self.compilation_config = vllm_config.compilation_config
571
+ self.vllm_config = vllm_config
572
+ self.device = device
573
+
574
+ self.num_heads = self.model_config.get_num_attention_heads(parallel_config)
575
+ self.mla_dims = get_mla_dims(self.model_config)
576
+ self.aot_schedule = current_platform.is_cuda()
577
+ try:
578
+ self.dcp_world_size = get_dcp_group().world_size
579
+ self.dcp_rank = get_dcp_group().rank_in_group
580
+ except AssertionError:
581
+ # DCP might not be initialized in testing
582
+ self.dcp_world_size = 1
583
+ self.dcp_rank = 0
584
+ self.dcp_local_block_size = parallel_config.cp_kv_cache_interleave_size
585
+ self.dcp_virtual_block_size = self.dcp_local_block_size * self.dcp_world_size
586
+ self.cp_kv_cache_interleave_size = parallel_config.cp_kv_cache_interleave_size
587
+
588
+ # Don't try to access the runner on AMD
589
+ if self.aot_schedule:
590
+ self.page_size = self.kv_cache_spec.block_size
591
+
592
+ self.chunked_prefill_workspace_size = (
593
+ self.determine_chunked_prefill_workspace_size(vllm_config)
594
+ )
595
+
596
+ if self.dcp_world_size > 1:
597
+ # Note(hc): The local kvcache is incomplete when DCP is triggered,
598
+ # an additional kvcache allgather across the DCP group is therefore
599
+ # required, so the workspace has to be enlarged by 1/DCP relative
600
+ # to the original TP allocation.
601
+ assert self.chunked_prefill_workspace_size % self.dcp_world_size == 0
602
+ self.chunked_prefill_workspace = torch.empty(
603
+ (
604
+ self.chunked_prefill_workspace_size
605
+ + self.chunked_prefill_workspace_size // self.dcp_world_size,
606
+ self.model_config.get_head_size(),
607
+ ),
608
+ dtype=self.model_config.dtype,
609
+ device=device,
610
+ )
611
+ else:
612
+ self.chunked_prefill_workspace = torch.empty(
613
+ (
614
+ self.chunked_prefill_workspace_size,
615
+ self.model_config.get_head_size(),
616
+ ),
617
+ dtype=self.model_config.dtype,
618
+ device=device,
619
+ )
620
+
621
+ self._use_cudnn_prefill = use_cudnn_prefill()
622
+ self._use_fi_prefill = use_flashinfer_prefill()
623
+ self._use_trtllm_ragged_prefill = use_trtllm_ragged_deepseek_prefill()
624
+ self.prefill_metadata_cls = (
625
+ FlashInferPrefillMetadata
626
+ if self._use_fi_prefill
627
+ else CudnnPrefillMetadata
628
+ if self._use_cudnn_prefill
629
+ else MLACommonPrefillMetadata
630
+ )
631
+
632
+ if self._use_fi_prefill:
633
+ self._workspace_buffer = torch.empty(
634
+ envs.VLLM_FLASHINFER_WORKSPACE_BUFFER_SIZE,
635
+ dtype=torch.uint8,
636
+ device=device,
637
+ )
638
+
639
+ self._fi_prefill_main: BatchPrefillWithRaggedKVCacheWrapper | None = None
640
+ self._fi_prefill_chunks: list[BatchPrefillWithRaggedKVCacheWrapper] = []
641
+
642
+ self._global_hyperparameters = infer_global_hyperparameters(
643
+ get_per_layer_parameters(vllm_config, layer_names, MLACommonImpl) # type: ignore[type-abstract]
644
+ )
645
+
646
+ if self._use_trtllm_ragged_prefill:
647
+ self._workspace_buffer = torch.empty(
648
+ envs.VLLM_FLASHINFER_WORKSPACE_BUFFER_SIZE,
649
+ dtype=torch.uint8,
650
+ device=device,
651
+ )
652
+
653
+ if self._use_cudnn_prefill:
654
+ self.cudnn_workspace = torch.empty(
655
+ CUDNN_WORKSPACE_SIZE * scheduler_config.max_num_seqs,
656
+ dtype=torch.int8,
657
+ device=device,
658
+ )
659
+
660
+ supports_spec_decode = self.query_len_support != QueryLenSupport.SINGLE_ONLY
661
+ self._init_reorder_batch_threshold(
662
+ self.reorder_batch_threshold, supports_spec_decode, supports_dcp_with_varlen
663
+ )
664
+
665
+ # Validate consistency between query_len_support and reorder_batch_threshold
666
+ if self.query_len_support == QueryLenSupport.SINGLE_ONLY:
667
+ assert self.reorder_batch_threshold == 1, (
668
+ f"reorder_batch_threshold must be 1 when query_len_support is "
669
+ f"SINGLE_ONLY, got {self.reorder_batch_threshold}"
670
+ )
671
+
672
+ def _build_fi_prefill_wrappers(self, prefill: FlashInferPrefillMetadata):
673
+ qo_indptr = prefill.query_start_loc
674
+
675
+ has_context = False
676
+ if prefill.chunked_context is not None:
677
+ chunked_context = prefill.chunked_context
678
+ has_context = True
679
+
680
+ if self._fi_prefill_main is None:
681
+ self._fi_prefill_main = BatchPrefillWithRaggedKVCacheWrapper(
682
+ self._workspace_buffer, "NHD", backend="cutlass"
683
+ )
684
+
685
+ if has_context:
686
+ num_chunks = chunked_context.cu_seq_lens.shape[0]
687
+ # Allocate more prefill chunk wrappers if needed
688
+ if len(self._fi_prefill_chunks) < num_chunks:
689
+ for _ in range(len(self._fi_prefill_chunks), num_chunks):
690
+ self._fi_prefill_chunks.append(
691
+ BatchPrefillWithRaggedKVCacheWrapper(
692
+ self._workspace_buffer, "NHD", backend="cutlass"
693
+ )
694
+ )
695
+ assert num_chunks <= len(self._fi_prefill_chunks)
696
+
697
+ # In MLA, the non-latent num_qo_heads == num_kv_heads
698
+ num_qo_heads = self.num_heads
699
+ num_kv_heads = num_qo_heads
700
+
701
+ # Sanity: Verify that num_kv_heads == 1 since it is latent space
702
+ assert self.kv_cache_spec.num_kv_heads == 1
703
+
704
+ # Get non-latent head_dim_qk and head_dim_vo
705
+ head_dim_qk = self.mla_dims.qk_nope_head_dim + self.mla_dims.qk_rope_head_dim
706
+ head_dim_vo = self.mla_dims.v_head_dim
707
+
708
+ # For main run, qo_indptr == kv_indptr
709
+ kv_indptr = qo_indptr.clone()
710
+
711
+ # Prepare main prefill
712
+ self._fi_prefill_main.plan(
713
+ qo_indptr=qo_indptr,
714
+ kv_indptr=kv_indptr,
715
+ num_qo_heads=num_qo_heads,
716
+ num_kv_heads=num_kv_heads,
717
+ head_dim_qk=head_dim_qk,
718
+ head_dim_vo=head_dim_vo,
719
+ causal=True, # This is main run
720
+ sm_scale=self._global_hyperparameters.sm_scale,
721
+ window_left=self._global_hyperparameters.window_left,
722
+ logits_soft_cap=self._global_hyperparameters.logits_soft_cap,
723
+ q_data_type=self.model_config.dtype,
724
+ )
725
+
726
+ # Prepare context prefills
727
+ if has_context:
728
+ for i in range(num_chunks):
729
+ kv_indptr_chunk = chunked_context.cu_seq_lens[i]
730
+
731
+ self._fi_prefill_chunks[i].plan(
732
+ qo_indptr=qo_indptr,
733
+ kv_indptr=kv_indptr_chunk,
734
+ num_qo_heads=num_qo_heads,
735
+ num_kv_heads=num_kv_heads,
736
+ head_dim_qk=head_dim_qk,
737
+ head_dim_vo=head_dim_vo,
738
+ causal=False, # This is context run
739
+ sm_scale=self._global_hyperparameters.sm_scale,
740
+ window_left=self._global_hyperparameters.window_left,
741
+ logits_soft_cap=self._global_hyperparameters.logits_soft_cap,
742
+ q_data_type=self.model_config.dtype,
743
+ )
744
+
745
+ prefill.prefill_main = self._fi_prefill_main
746
+ prefill.prefill_chunks = self._fi_prefill_chunks
747
+
748
+ def _build_decode(
749
+ self,
750
+ block_table_tensor: torch.Tensor,
751
+ seq_lens_device: torch.Tensor,
752
+ max_seq_len: int,
753
+ query_start_loc_cpu: torch.Tensor,
754
+ query_start_loc_device: torch.Tensor,
755
+ num_decode_tokens: int,
756
+ dcp_tot_seq_lens_device: torch.Tensor | None,
757
+ ) -> MLACommonDecodeMetadata:
758
+ return MLACommonDecodeMetadata(
759
+ block_table=block_table_tensor,
760
+ seq_lens=seq_lens_device,
761
+ dcp_tot_seq_lens=dcp_tot_seq_lens_device,
762
+ )
763
+
764
+ def build_for_cudagraph_capture(
765
+ self, common_attn_metadata: CommonAttentionMetadata
766
+ ) -> M:
767
+ """
768
+ This method builds the metadata for full cudagraph capture.
769
+ Currently, only decode is supported for full cudagraphs with MLA.
770
+ """
771
+ m = common_attn_metadata
772
+ assert m.num_reqs <= (m.num_actual_tokens * self.reorder_batch_threshold), (
773
+ "MLA only supports decode-only full CUDAGraph capture. "
774
+ "Make sure all cudagraph capture sizes <= max_num_seq."
775
+ )
776
+
777
+ assert m.max_query_len <= self.reorder_batch_threshold # decode only
778
+
779
+ return self.build(0, m)
780
+
781
+ def build(
782
+ self,
783
+ common_prefix_len: int,
784
+ common_attn_metadata: CommonAttentionMetadata,
785
+ fast_build: bool = False,
786
+ ) -> M:
787
+ num_reqs = common_attn_metadata.num_reqs
788
+ num_tokens = common_attn_metadata.num_actual_tokens
789
+ max_query_len = common_attn_metadata.max_query_len
790
+ max_seq_len = common_attn_metadata.max_seq_len
791
+
792
+ # Note(simon): be careful about the CPU <> GPU memory movement in this
793
+ # function. We should avoid GPU -> CPU sync as much as possible because
794
+ # it blocks on all previous kernels.
795
+ device = self.device
796
+ block_table_tensor = common_attn_metadata.block_table_tensor
797
+ slot_mapping = common_attn_metadata.slot_mapping
798
+
799
+ query_start_loc = common_attn_metadata.query_start_loc
800
+ query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
801
+ seq_lens = common_attn_metadata.seq_lens
802
+ dcp_local_seq_lens = common_attn_metadata.dcp_local_seq_lens
803
+
804
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
805
+ split_decodes_and_prefills(
806
+ common_attn_metadata,
807
+ decode_threshold=self.reorder_batch_threshold,
808
+ require_uniform=(self.query_len_support != QueryLenSupport.VARLEN),
809
+ )
810
+ )
811
+
812
+ assert num_decodes + num_prefills == num_reqs
813
+ assert num_decode_tokens + num_prefill_tokens == num_tokens
814
+
815
+ prefill_metadata = None
816
+ if num_prefills > 0:
817
+ num_computed_tokens_cpu = (
818
+ common_attn_metadata.compute_num_computed_tokens().cpu()
819
+ )
820
+
821
+ reqs_start = num_decodes # prefill_start
822
+
823
+ context_lens_cpu = num_computed_tokens_cpu[reqs_start:num_reqs]
824
+ max_context_len_cpu = context_lens_cpu.max().item()
825
+ num_prefills_with_context_cpu = (context_lens_cpu > 0).sum().item()
826
+ prefill_query_start_loc = (
827
+ query_start_loc[reqs_start:] - query_start_loc[reqs_start]
828
+ )
829
+
830
+ chunked_context_metadata = None
831
+ if max_context_len_cpu > 0:
832
+ # NOTE: it is recommend you read the `Chunked Prefill` section
833
+ # in the comment at the top of the file before trying to
834
+ # understand the following code
835
+
836
+ # currently we allocate an equal amount of workspace for each
837
+ # prefill in the batch, we could probably use a more advanced
838
+ # algorithm here and allocate more workspace to prefills with
839
+ # longer context lengths
840
+ max_context_chunk = (
841
+ self.chunked_prefill_workspace_size // num_prefills_with_context_cpu
842
+ )
843
+
844
+ if self.aot_schedule:
845
+ # align max_context_chunk to page_size by rounding down,
846
+ # currently the `gather_and_maybe_dequant_cache` kernel
847
+ # cannot handle `context_chunk_starts` that are not aligned
848
+ # to page_size
849
+ max_context_chunk = round_down(max_context_chunk, self.page_size)
850
+
851
+ assert max_context_chunk > 0
852
+ num_chunks = cdiv(max_context_len_cpu, max_context_chunk)
853
+
854
+ # if `max_context_chunk = 256`, `num_chunks = 3`, and
855
+ # `num_prefills_with_context = 4`, create a tensor that looks
856
+ # like
857
+ # [[0, 0, 0, 0], [256, 256, 256, 256], [512, 512, 512, 512]]
858
+ # Note(simon): this is done in CPU because of downstream's
859
+ # of `to_list`.
860
+ chunk_starts = (
861
+ torch.arange(num_chunks, dtype=torch.int32)
862
+ .unsqueeze(1)
863
+ .expand(-1, num_prefills)
864
+ * max_context_chunk
865
+ )
866
+ chunk_ends = torch.min(
867
+ context_lens_cpu.unsqueeze(0), chunk_starts + max_context_chunk
868
+ )
869
+ chunk_seq_lens = (chunk_ends - chunk_starts).clamp(min=0)
870
+
871
+ cu_seq_lens_cpu = torch.zeros(
872
+ num_chunks, num_prefills + 1, dtype=torch.int32, pin_memory=True
873
+ )
874
+ torch.cumsum(
875
+ chunk_seq_lens, dim=1, out=cu_seq_lens_cpu[:, 1:], dtype=torch.int32
876
+ )
877
+ chunk_total_token = cu_seq_lens_cpu[:, -1]
878
+
879
+ max_token_num_over_chunk = chunk_total_token.max().item()
880
+ token_to_seq_tensor_cpu = torch.zeros(
881
+ [num_chunks, max_token_num_over_chunk], dtype=torch.int32
882
+ )
883
+ range_idx = torch.arange(num_prefills, dtype=torch.int32)
884
+ for i in range(num_chunks):
885
+ chunk_token_to_seq_tensor = torch.repeat_interleave(
886
+ range_idx, chunk_seq_lens[i]
887
+ )
888
+ chunk_len = chunk_token_to_seq_tensor.shape[0]
889
+ token_to_seq_tensor_cpu[i, :chunk_len] = chunk_token_to_seq_tensor
890
+
891
+ if self.dcp_world_size > 1:
892
+ local_context_lens_allranks = get_dcp_local_seq_lens(
893
+ context_lens_cpu,
894
+ self.dcp_world_size,
895
+ None,
896
+ self.dcp_local_block_size,
897
+ )
898
+ # Note(qcs): The max local context lengths
899
+ # padded to `dcp_local_block_size`.
900
+ padded_local_context_lens_cpu: torch.Tensor = (
901
+ cdiv(
902
+ context_lens_cpu,
903
+ self.dcp_virtual_block_size,
904
+ )
905
+ * self.dcp_local_block_size
906
+ )
907
+ # Note(hc): The above max_context_chunk already enforces
908
+ # block_size alignment, DCP just need the block_size can
909
+ # be divisible by dcp_world_size, because DCP use
910
+ # cp_gather_cache which not require `cp_chunk_starts`
911
+ # aligned to page_size.
912
+ assert max_context_chunk % self.dcp_world_size == 0
913
+ padded_local_max_context_chunk_across_ranks = (
914
+ cdiv(
915
+ max_context_chunk,
916
+ self.dcp_virtual_block_size,
917
+ )
918
+ * self.dcp_local_block_size
919
+ )
920
+ local_chunk_starts = (
921
+ torch.arange(num_chunks, dtype=torch.int32)
922
+ .unsqueeze(1)
923
+ .expand(-1, num_prefills)
924
+ * padded_local_max_context_chunk_across_ranks
925
+ )
926
+ local_chunk_ends = torch.min(
927
+ padded_local_context_lens_cpu.unsqueeze(0),
928
+ local_chunk_starts
929
+ + padded_local_max_context_chunk_across_ranks,
930
+ )
931
+ padded_local_chunk_seq_lens = (
932
+ local_chunk_ends - local_chunk_starts
933
+ ).clamp(min=0)
934
+
935
+ padded_local_cu_chunk_seq_lens_cpu = torch.zeros(
936
+ num_chunks, num_prefills + 1, dtype=torch.int32, pin_memory=True
937
+ )
938
+ torch.cumsum(
939
+ padded_local_chunk_seq_lens,
940
+ dim=1,
941
+ out=padded_local_cu_chunk_seq_lens_cpu[:, 1:],
942
+ dtype=torch.int32,
943
+ )
944
+
945
+ chunked_context_metadata_cls = (
946
+ CudnnPrefillMetadata.ChunkedContextMetadata
947
+ if self._use_cudnn_prefill
948
+ else MLACommonPrefillMetadata.ChunkedContextMetadata
949
+ )
950
+ if self.dcp_world_size > 1:
951
+ chunked_context_metadata = chunked_context_metadata_cls(
952
+ cu_seq_lens=cu_seq_lens_cpu.to(device, non_blocking=True),
953
+ starts=local_chunk_starts.to(device, non_blocking=True),
954
+ seq_tot=padded_local_chunk_seq_lens.sum(dim=1).tolist(),
955
+ max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(),
956
+ seq_lens=chunk_seq_lens,
957
+ token_to_seq=token_to_seq_tensor_cpu.to(
958
+ device, non_blocking=True
959
+ ),
960
+ chunk_total_token=chunk_total_token.tolist(),
961
+ workspace=self.chunked_prefill_workspace,
962
+ padded_local_chunk_seq_lens=padded_local_chunk_seq_lens.tolist(),
963
+ local_context_lens_allranks=local_context_lens_allranks.tolist(),
964
+ padded_local_cu_seq_lens=padded_local_cu_chunk_seq_lens_cpu.to(
965
+ device, non_blocking=True
966
+ ),
967
+ cu_seq_lens_lst=cu_seq_lens_cpu.tolist(),
968
+ chunk_size=padded_local_max_context_chunk_across_ranks,
969
+ )
970
+ else:
971
+ chunked_context_metadata = chunked_context_metadata_cls(
972
+ cu_seq_lens=cu_seq_lens_cpu.to(device, non_blocking=True),
973
+ starts=chunk_starts.to(device, non_blocking=True),
974
+ seq_tot=chunk_seq_lens.sum(dim=1).tolist(),
975
+ max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(),
976
+ seq_lens=chunk_seq_lens,
977
+ token_to_seq=token_to_seq_tensor_cpu.to(
978
+ device, non_blocking=True
979
+ ),
980
+ chunk_total_token=chunk_total_token,
981
+ workspace=self.chunked_prefill_workspace,
982
+ )
983
+
984
+ if self._use_cudnn_prefill:
985
+ chunked_context_metadata.seq_lens = chunk_seq_lens
986
+
987
+ assert (
988
+ max(chunked_context_metadata.max_seq_lens)
989
+ <= self.chunked_prefill_workspace_size
990
+ )
991
+
992
+ prefill_metadata = self.prefill_metadata_cls(
993
+ block_table=block_table_tensor[reqs_start:, ...],
994
+ query_start_loc=prefill_query_start_loc,
995
+ max_query_len=max_query_len,
996
+ chunked_context=chunked_context_metadata,
997
+ )
998
+
999
+ if self._use_cudnn_prefill:
1000
+ assert isinstance(prefill_metadata, CudnnPrefillMetadata)
1001
+ prefill_metadata.query_seq_lens = (
1002
+ prefill_query_start_loc[1:] - prefill_query_start_loc[:-1]
1003
+ )
1004
+ prefill_metadata.cudnn_workspace = self.cudnn_workspace
1005
+
1006
+ if self._use_trtllm_ragged_prefill:
1007
+ prefill_metadata.query_seq_lens = (
1008
+ prefill_query_start_loc[1:] - prefill_query_start_loc[:-1]
1009
+ )
1010
+ prefill_metadata.workspace_buffer = self._workspace_buffer
1011
+
1012
+ decode_metadata = None
1013
+ if num_decodes > 0:
1014
+ dcp_tot_seq_lens_device = None
1015
+ if self.dcp_world_size > 1:
1016
+ dcp_tot_seq_lens_device = seq_lens[:num_decodes]
1017
+ seq_lens = dcp_local_seq_lens
1018
+
1019
+ # After DCP distribution, the maximum number of tokens for any rank is
1020
+ # ceil(L / (N * I)) * I, where L is max_seq_len, N is dcp_world_size,
1021
+ # and I is cp_kv_cache_interleave_size.
1022
+ # This eliminates GPU->CPU sync while minimizing workspace
1023
+ # over-allocation.
1024
+ num_partitions = self.dcp_world_size * self.cp_kv_cache_interleave_size
1025
+ max_seq_len = (
1026
+ (max_seq_len + num_partitions - 1) // num_partitions
1027
+ ) * self.cp_kv_cache_interleave_size
1028
+
1029
+ decode_metadata = self._build_decode(
1030
+ block_table_tensor=block_table_tensor[:num_decodes, ...],
1031
+ seq_lens_device=seq_lens[:num_decodes],
1032
+ max_seq_len=max_seq_len,
1033
+ query_start_loc_cpu=query_start_loc_cpu[: num_decodes + 1],
1034
+ query_start_loc_device=query_start_loc[: num_decodes + 1],
1035
+ num_decode_tokens=num_decode_tokens,
1036
+ dcp_tot_seq_lens_device=dcp_tot_seq_lens_device,
1037
+ )
1038
+
1039
+ attn_metadata = self.metadata_cls(
1040
+ num_reqs=common_attn_metadata.num_reqs,
1041
+ max_query_len=common_attn_metadata.max_query_len,
1042
+ max_seq_len=max_seq_len,
1043
+ num_actual_tokens=num_tokens,
1044
+ query_start_loc=query_start_loc,
1045
+ slot_mapping=slot_mapping,
1046
+ head_dim=self.model_config.get_head_size(),
1047
+ # MLACommonMetadata Chunk prefill specific
1048
+ num_decodes=num_decodes,
1049
+ num_decode_tokens=num_decode_tokens,
1050
+ num_prefills=num_prefills,
1051
+ prefill=prefill_metadata,
1052
+ decode=decode_metadata,
1053
+ )
1054
+
1055
+ if self._use_fi_prefill and num_prefills > 0:
1056
+ assert isinstance(attn_metadata.prefill, FlashInferPrefillMetadata)
1057
+ self._build_fi_prefill_wrappers(attn_metadata.prefill)
1058
+
1059
+ return attn_metadata
1060
+
1061
+
1062
+ def reorg_kvcache(
1063
+ allgatered_kv_c_normed: torch.Tensor,
1064
+ allgatered_k_pe: torch.Tensor,
1065
+ padded_local_chunk_seq_lens_lst: list[int],
1066
+ local_context_lens_allranks: list[list[int]],
1067
+ sum_seq_len: int,
1068
+ max_seq_len: int,
1069
+ chunk_size: int,
1070
+ chunk_idx: int,
1071
+ toks: int,
1072
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1073
+ """
1074
+ reorg and unpad kvcache after cp local gather to tp layout for attn kernel.
1075
+ e.g.
1076
+ allgatered_kv_c_normed = [T0_0, T0_1, T0_2, T0_3, T1_0, T1_1, ...,
1077
+ T0_4, T0_5, pad, pad, T1_2, pad, ...]
1078
+ -> reorganized_kv_c_normed = [T0_0, T0_1, T0_2, T0_3, T0_4, T0_5,
1079
+ T1_0, T1_1, T1_2, ...]
1080
+ Args:
1081
+ padded_local_chunk_seq_lens_lst: local chunk context lengths
1082
+ under current CP rank.
1083
+ local_context_lens_allranks: local context lengths on each CP rank.
1084
+ sum_seq_len: the sum of cp_chunk_seq_lens_lst.
1085
+ max_seq_len: the max value of cp_chunk_seq_lens_lst.
1086
+ chunk_size: the local padded max context chunk from
1087
+ chunked_context_metadata building.
1088
+ chunk_idx: chunk idx of chunked_prefill.
1089
+ toks: the number of tokens for local gather cache.
1090
+ """
1091
+ kv_c_segments = []
1092
+ k_pe_segments = []
1093
+ src_token_idx = 0
1094
+ max_seq_len_check = 0
1095
+ for padded_local_chunk_seq_len, local_context_lens in zip(
1096
+ padded_local_chunk_seq_lens_lst, local_context_lens_allranks
1097
+ ):
1098
+ cur_seq_len = 0
1099
+ for rank, local_context_len in enumerate(local_context_lens):
1100
+ # Note(qcs): We split the context into multiple chunks,
1101
+ # depending on the size of the workspace.
1102
+ # local_context in dcp0: |-----------------|
1103
+ # local_context in dcp1: |--------------|
1104
+ # n*padded_local_chunk: |-----|-----|-----|
1105
+ # local_chunk_len in dcp1: |-----|-----|--|
1106
+ # so we need update the last chunk length in dcp1.
1107
+ local_chunk_len = min(
1108
+ max(0, local_context_len - chunk_idx * chunk_size),
1109
+ padded_local_chunk_seq_len,
1110
+ )
1111
+ if local_chunk_len != 0:
1112
+ kv_c_segment = allgatered_kv_c_normed[
1113
+ rank * toks + src_token_idx : rank * toks
1114
+ + src_token_idx
1115
+ + local_chunk_len
1116
+ ]
1117
+ k_pe_segment = allgatered_k_pe[
1118
+ rank * toks + src_token_idx : rank * toks
1119
+ + src_token_idx
1120
+ + local_chunk_len
1121
+ ]
1122
+ kv_c_segments.append(kv_c_segment)
1123
+ k_pe_segments.append(k_pe_segment)
1124
+ cur_seq_len += local_chunk_len
1125
+ max_seq_len_check = max(max_seq_len_check, cur_seq_len)
1126
+ src_token_idx += padded_local_chunk_seq_len
1127
+ reorganized_kv_c_normed = torch.cat(kv_c_segments, dim=0)
1128
+ reorganized_k_pe = torch.cat(k_pe_segments, dim=0)
1129
+ assert reorganized_kv_c_normed.shape[0] == sum_seq_len
1130
+ assert reorganized_k_pe.shape[0] == sum_seq_len
1131
+ assert max_seq_len_check == max_seq_len
1132
+ return reorganized_kv_c_normed, reorganized_k_pe
1133
+
1134
+
1135
+ # TODO(Lucas): rename MLACommonBaseImpl -> MLACommonImpl,
1136
+ # and MLACommonImpl -> MLACommonDenseImpl or somthing like that
1137
+ class MLACommonBaseImpl(MLAAttentionImpl[A], Generic[A]):
1138
+ """
1139
+ NOTE: Please read the comment at the top of the file before trying to
1140
+ understand this class
1141
+ """
1142
+
1143
+ def __init__(
1144
+ self,
1145
+ num_heads: int,
1146
+ head_size: int,
1147
+ scale: float,
1148
+ num_kv_heads: int,
1149
+ alibi_slopes: list[float] | None,
1150
+ sliding_window: int | None,
1151
+ kv_cache_dtype: str,
1152
+ logits_soft_cap: float | None,
1153
+ attn_type: str,
1154
+ kv_sharing_target_layer_name: str | None,
1155
+ # MLA Specific Arguments
1156
+ q_lora_rank: int | None,
1157
+ kv_lora_rank: int,
1158
+ qk_nope_head_dim: int,
1159
+ qk_rope_head_dim: int,
1160
+ qk_head_dim: int,
1161
+ v_head_dim: int,
1162
+ kv_b_proj: ColumnParallelLinear,
1163
+ indexer=None,
1164
+ q_pad_num_heads: int | None = None,
1165
+ ) -> None:
1166
+ if kv_sharing_target_layer_name is not None:
1167
+ raise NotImplementedError("KV sharing is not supported for MLA")
1168
+
1169
+ self.num_heads = num_heads
1170
+ self.head_size = head_size
1171
+ self.scale = float(scale)
1172
+ self.num_kv_heads = num_kv_heads
1173
+ self.kv_cache_dtype = kv_cache_dtype
1174
+
1175
+ self.q_lora_rank = q_lora_rank
1176
+ self.kv_lora_rank = kv_lora_rank
1177
+ self.qk_nope_head_dim = qk_nope_head_dim
1178
+ self.qk_rope_head_dim = qk_rope_head_dim
1179
+ self.qk_head_dim = qk_head_dim
1180
+ self.v_head_dim = v_head_dim
1181
+ self.kv_b_proj = kv_b_proj
1182
+ self.indexer = indexer
1183
+ self.q_pad_num_heads = q_pad_num_heads
1184
+ self.is_aiter_triton_fp8_bmm_enabled = rocm_aiter_ops.is_fp8bmm_enabled()
1185
+
1186
+ def process_weights_after_loading(self, act_dtype: torch.dtype):
1187
+ def get_layer_weight(layer):
1188
+ WEIGHT_NAMES = ("weight", "qweight", "weight_packed")
1189
+ for attr in WEIGHT_NAMES:
1190
+ if hasattr(layer, attr):
1191
+ return getattr(layer, attr)
1192
+ raise AttributeError(
1193
+ f"Layer '{layer}' has no recognized weight attribute: {WEIGHT_NAMES}."
1194
+ )
1195
+
1196
+ def get_and_maybe_dequant_weights(layer: LinearBase):
1197
+ if layer.quant_method is not None and not isinstance(
1198
+ layer.quant_method, UnquantizedLinearMethod
1199
+ ):
1200
+ # NOTE: This should only be used offline, since it's O(N^3)
1201
+ eye = torch.eye(
1202
+ layer.input_size_per_partition,
1203
+ dtype=act_dtype,
1204
+ device=get_layer_weight(layer).device,
1205
+ )
1206
+ dequant_weights = layer.quant_method.apply(layer, eye, bias=None)
1207
+ del eye
1208
+ # standardize to (output, input)
1209
+ return dequant_weights.T
1210
+ return layer.weight
1211
+
1212
+ # we currently do not have quantized bmm's which are needed for
1213
+ # `W_UV` and `W_UK_T`, we just store fp16/bf16 copies and perform
1214
+ # the bmm's in 16-bit, the extra memory overhead of this is fairly low
1215
+ kv_b_proj_weight = get_and_maybe_dequant_weights(self.kv_b_proj).T
1216
+ assert kv_b_proj_weight.shape == (
1217
+ self.kv_lora_rank,
1218
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
1219
+ ), (
1220
+ f"{kv_b_proj_weight.shape=}, "
1221
+ f"{self.kv_lora_rank=}, "
1222
+ f"{self.num_heads=}, "
1223
+ f"{self.qk_nope_head_dim=}, "
1224
+ f"{self.v_head_dim=}"
1225
+ )
1226
+ kv_b_proj_weight = kv_b_proj_weight.view(
1227
+ self.kv_lora_rank,
1228
+ self.num_heads,
1229
+ self.qk_nope_head_dim + self.v_head_dim,
1230
+ )
1231
+
1232
+ W_UK, W_UV = kv_b_proj_weight.split(
1233
+ [self.qk_nope_head_dim, self.v_head_dim], dim=-1
1234
+ )
1235
+
1236
+ if self.is_aiter_triton_fp8_bmm_enabled:
1237
+ W_K = W_UK.transpose(0, 1) # 16 512 128
1238
+ W_V = W_UV.permute(1, 2, 0) # 16 128 512
1239
+ self.W_K, self.W_K_scale = dynamic_per_batched_tensor_quant(
1240
+ W_K, dtype=current_platform.fp8_dtype()
1241
+ )
1242
+ self.W_V, self.W_V_scale = dynamic_per_batched_tensor_quant(
1243
+ W_V, dtype=current_platform.fp8_dtype()
1244
+ )
1245
+
1246
+ # The kernel operates on non-padded inputs. Hence, pre-compiling
1247
+ # triton kernel to avoid runtime compilation for unseen batch sizes
1248
+ # Pre-compile for batch sizes 1 to 1024 to cover most use-cases.
1249
+ # On DS-R1, this step adds roughly 50s to the model loading time.
1250
+ max_batch_size = 1024 # [ToDo] Find the optimal upper limit
1251
+ pre_compilation_list = list(range(1, max_batch_size + 1))
1252
+ if is_global_first_rank():
1253
+ pre_compilation_list = tqdm(
1254
+ pre_compilation_list,
1255
+ desc="[Aiter Triton] Pre-compiling fp8 BMM kernel",
1256
+ total=max_batch_size,
1257
+ )
1258
+
1259
+ for m in pre_compilation_list:
1260
+ x = torch.empty(
1261
+ (self.W_K.shape[0], m, self.W_K.shape[2]),
1262
+ dtype=torch.bfloat16,
1263
+ device=self.W_K.device,
1264
+ )
1265
+ rocm_aiter_ops.triton_fp8_bmm(
1266
+ x, self.W_K, self.W_K_scale, group_size=128, transpose_bm=True
1267
+ )
1268
+
1269
+ x = torch.empty(
1270
+ (self.W_V.shape[0], m, self.W_V.shape[2]),
1271
+ dtype=torch.bfloat16,
1272
+ device=self.W_V.device,
1273
+ )
1274
+ rocm_aiter_ops.triton_fp8_bmm(
1275
+ x, self.W_V, self.W_V_scale, group_size=128, transpose_bm=True
1276
+ )
1277
+ else:
1278
+ # Convert from (L, N, V) to (N, L, V)
1279
+ self.W_UV = W_UV.transpose(0, 1)
1280
+ # Convert from (L, N, P) to (N, P, L)
1281
+ self.W_UK_T = W_UK.permute(1, 2, 0)
1282
+
1283
+ def _v_up_proj(self, x: torch.Tensor, out: torch.Tensor):
1284
+ # Convert from (B, N, L) to (N, B, L)
1285
+ x = x.view(-1, self.num_heads, self.kv_lora_rank).transpose(0, 1)
1286
+
1287
+ if self.is_aiter_triton_fp8_bmm_enabled:
1288
+ out = out.view(-1, self.num_heads, self.v_head_dim)
1289
+ # Multiply + Transpose (N, B, L) x (N, L, V)->(N, B, V)->(B, N, V)
1290
+ x = rocm_aiter_ops.triton_fp8_bmm(
1291
+ x, self.W_V, self.W_V_scale, group_size=128, transpose_bm=True, YQ=out
1292
+ )
1293
+ else:
1294
+ # Convert from (B, N * V) to (N, B, V)
1295
+ out = out.view(-1, self.num_heads, self.v_head_dim).transpose(0, 1)
1296
+
1297
+ # Multiply (N, B, L) x (N, L, V) -> (N, B, V)
1298
+ torch.bmm(x, self.W_UV, out=out) # Reuse "out" to make it "hot"
1299
+
1300
+ # Convert from (N, B, V) to (B, N * V)
1301
+ out_new = out.transpose(0, 1).reshape(-1, self.num_heads * self.v_head_dim)
1302
+
1303
+ # Adjust output buffer shape back to the original (B, N * V)
1304
+ N, B, V = out.shape
1305
+ out.resize_((B, N * V))
1306
+ out.copy_(out_new) # Copy result
1307
+
1308
+
1309
+ class MLACommonImpl(MLACommonBaseImpl[M], Generic[M]):
1310
+ """
1311
+ NOTE: Please read the comment at the top of the file before trying to
1312
+ understand this class
1313
+ """
1314
+
1315
+ def __init__(self, *args, **kwargs) -> None:
1316
+ super().__init__(*args, **kwargs)
1317
+
1318
+ if use_flashinfer_prefill():
1319
+ logger.debug_once("Using FlashInfer prefill for MLA")
1320
+ self._run_prefill_context_chunk = self._run_prefill_context_chunk_fi
1321
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_fi
1322
+ self._pad_v = False
1323
+ elif use_trtllm_ragged_deepseek_prefill():
1324
+ logger.debug_once("Using TRT-LLM ragged DeepSeek prefill for MLA")
1325
+ self._run_prefill_context_chunk = (
1326
+ self._run_prefill_context_chunk_trtllm_ragged
1327
+ )
1328
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_trtllm_ragged
1329
+ self._pad_v = False
1330
+ elif use_cudnn_prefill():
1331
+ logger.debug_once("Using CUDNN prefill for MLA")
1332
+ self._run_prefill_context_chunk = self._run_prefill_context_chunk_cudnn
1333
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_cudnn
1334
+ self._pad_v = False
1335
+ else: # Use FlashAttention
1336
+ logger.debug_once("Using FlashAttention prefill for MLA")
1337
+ self._run_prefill_context_chunk = self._run_prefill_context_chunk_fa
1338
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_fa
1339
+
1340
+ # Handle the differences between the flash_attn_varlen from
1341
+ # flash_attn and the one from vllm_flash_attn. The former is used on
1342
+ # RoCM and the latter has an additional parameter to control
1343
+ # FA2 vs FA3
1344
+ self.flash_attn_varlen_func = flash_attn_varlen_func
1345
+ self.vllm_flash_attn_version = get_flash_attn_version()
1346
+ if self.vllm_flash_attn_version is not None:
1347
+ self.flash_attn_varlen_func = functools.partial(
1348
+ flash_attn_varlen_func, fa_version=self.vllm_flash_attn_version
1349
+ )
1350
+
1351
+ # For MLA the v head dim is smaller than qk head dim so we pad out
1352
+ # v with 0s to match the qk head dim for attention backends that do
1353
+ # not support different headdims
1354
+ # We don't need to pad V if we are on a hopper system with FA3
1355
+ device_capability = current_platform.get_device_capability()
1356
+ self._pad_v = self.vllm_flash_attn_version is None or not (
1357
+ self.vllm_flash_attn_version == 3
1358
+ and device_capability is not None
1359
+ and device_capability[0] == 9
1360
+ )
1361
+
1362
+ self.dcp_world_size: int = -1
1363
+
1364
+ self.chunked_prefill_workspace_size = (
1365
+ MLACommonMetadataBuilder.determine_chunked_prefill_workspace_size(
1366
+ get_current_vllm_config()
1367
+ )
1368
+ )
1369
+ self.cp_kv_cache_interleave_size: int = (
1370
+ get_current_vllm_config().parallel_config.cp_kv_cache_interleave_size
1371
+ )
1372
+
1373
+ def _flash_attn_varlen_diff_headdims(
1374
+ self, q, k, v, return_softmax_lse=False, softmax_scale=None, **kwargs
1375
+ ):
1376
+ maybe_padded_v = v
1377
+ if self._pad_v:
1378
+ maybe_padded_v = torch.nn.functional.pad(
1379
+ v, [0, q.shape[-1] - v.shape[-1]], value=0
1380
+ )
1381
+
1382
+ if is_vllm_fa:
1383
+ kwargs["return_softmax_lse"] = return_softmax_lse
1384
+ else:
1385
+ # ROCm leverages the upstream flash_attn, which takes a parameter
1386
+ # called "return_attn_probs" instead of return_softmax_lse
1387
+ kwargs["return_attn_probs"] = return_softmax_lse
1388
+ if vllm_is_batch_invariant():
1389
+ kwargs["num_splits"] = 1
1390
+
1391
+ attn_out = self.flash_attn_varlen_func(
1392
+ q=q,
1393
+ k=k,
1394
+ v=maybe_padded_v,
1395
+ softmax_scale=softmax_scale,
1396
+ **kwargs,
1397
+ )
1398
+
1399
+ # Unpack the output if there is multiple results
1400
+ lse = None
1401
+ if isinstance(attn_out, tuple):
1402
+ attn_out, lse = attn_out[0], attn_out[1]
1403
+
1404
+ # Remain consistent with old `flash_attn_varlen_func` where there
1405
+ # is only one output tensor if `return_softmax_lse` is False.
1406
+ if return_softmax_lse:
1407
+ return attn_out, lse
1408
+ return attn_out
1409
+
1410
+ def _run_prefill_new_tokens_fa(
1411
+ self, prefill: MLACommonPrefillMetadata, q, k, v, return_softmax_lse
1412
+ ):
1413
+ return self._flash_attn_varlen_diff_headdims(
1414
+ q=q,
1415
+ k=k,
1416
+ v=v,
1417
+ cu_seqlens_q=prefill.query_start_loc,
1418
+ cu_seqlens_k=prefill.query_start_loc,
1419
+ max_seqlen_q=prefill.max_query_len,
1420
+ max_seqlen_k=prefill.max_query_len,
1421
+ softmax_scale=self.scale,
1422
+ causal=True,
1423
+ return_softmax_lse=return_softmax_lse,
1424
+ )
1425
+
1426
+ def _run_prefill_new_tokens_fi(
1427
+ self, prefill: MLACommonPrefillMetadata, q, k, v, return_softmax_lse
1428
+ ):
1429
+ assert isinstance(prefill, FlashInferPrefillMetadata)
1430
+ assert prefill.prefill_main is not None
1431
+
1432
+ ret = prefill.prefill_main.run(
1433
+ q=q,
1434
+ k=k,
1435
+ v=v,
1436
+ return_lse=return_softmax_lse,
1437
+ )
1438
+
1439
+ if isinstance(ret, tuple):
1440
+ return ret[0], ret[1].transpose(0, 1).contiguous()
1441
+ return ret
1442
+
1443
+ def _run_prefill_new_tokens_cudnn(
1444
+ self, prefill: MLACommonPrefillMetadata, q, k, v, return_softmax_lse
1445
+ ):
1446
+ assert isinstance(prefill, CudnnPrefillMetadata)
1447
+ assert prefill.query_seq_lens is not None
1448
+ output, lse = cudnn_batch_prefill_with_kv_cache(
1449
+ q=q,
1450
+ k_cache=k,
1451
+ v_cache=v,
1452
+ scale=self.scale,
1453
+ workspace_buffer=prefill.cudnn_workspace,
1454
+ max_token_per_sequence=prefill.max_query_len,
1455
+ max_sequence_kv=prefill.max_query_len,
1456
+ actual_seq_lens_q=prefill.query_seq_lens.view(-1, 1, 1, 1),
1457
+ actual_seq_lens_kv=prefill.query_seq_lens.view(-1, 1, 1, 1),
1458
+ causal=True,
1459
+ # Do not support False for now
1460
+ return_lse=True,
1461
+ # Indicates actual_seq_lens are on GPU or CPU.
1462
+ is_cuda_graph_compatible=True,
1463
+ )
1464
+ if return_softmax_lse:
1465
+ return output, lse
1466
+ return output
1467
+
1468
+ def _run_prefill_context_chunk_fa(
1469
+ self, prefill: MLACommonPrefillMetadata, chunk_idx: int, q, k, v
1470
+ ):
1471
+ assert prefill.chunked_context is not None
1472
+ return self._flash_attn_varlen_diff_headdims(
1473
+ q=q,
1474
+ k=k,
1475
+ v=v,
1476
+ cu_seqlens_q=prefill.query_start_loc,
1477
+ cu_seqlens_k=prefill.chunked_context.cu_seq_lens[chunk_idx],
1478
+ max_seqlen_q=prefill.max_query_len,
1479
+ max_seqlen_k=prefill.chunked_context.max_seq_lens[chunk_idx],
1480
+ softmax_scale=self.scale,
1481
+ causal=False, # Context is unmasked
1482
+ return_softmax_lse=True,
1483
+ )
1484
+
1485
+ def _run_prefill_context_chunk_fi(
1486
+ self, prefill: MLACommonPrefillMetadata, chunk_idx: int, q, k, v
1487
+ ):
1488
+ assert isinstance(prefill, FlashInferPrefillMetadata)
1489
+
1490
+ attn_out, lse = prefill.prefill_chunks[chunk_idx].run(
1491
+ q=q,
1492
+ k=k,
1493
+ v=v,
1494
+ return_lse=True,
1495
+ )
1496
+
1497
+ # Convert from (q_len, num_heads) to (num_heads, q_len)
1498
+ return attn_out, lse.transpose(0, 1).contiguous()
1499
+
1500
+ def _run_prefill_context_chunk_cudnn(
1501
+ self, prefill: MLACommonPrefillMetadata, chunk_idx: int, q, k, v
1502
+ ):
1503
+ assert isinstance(prefill, CudnnPrefillMetadata)
1504
+ assert prefill.chunked_context is not None
1505
+ assert prefill.chunked_context.seq_lens[chunk_idx] is not None
1506
+ assert prefill.query_seq_lens is not None
1507
+ return cudnn_batch_prefill_with_kv_cache(
1508
+ q=q,
1509
+ k_cache=k,
1510
+ v_cache=v,
1511
+ scale=self.scale,
1512
+ workspace_buffer=prefill.cudnn_workspace,
1513
+ max_token_per_sequence=prefill.max_query_len,
1514
+ max_sequence_kv=prefill.chunked_context.max_seq_lens[chunk_idx],
1515
+ actual_seq_lens_q=prefill.query_seq_lens.view(-1, 1, 1, 1),
1516
+ actual_seq_lens_kv=prefill.chunked_context.seq_lens[chunk_idx].view(
1517
+ -1, 1, 1, 1
1518
+ ),
1519
+ causal=False,
1520
+ return_lse=True,
1521
+ # Indicates actual_seq_lens are on GPU or CPU.
1522
+ is_cuda_graph_compatible=True,
1523
+ )
1524
+
1525
+ def _run_prefill_new_tokens_trtllm_ragged(
1526
+ self, prefill: MLACommonPrefillMetadata, q, k, v, return_softmax_lse
1527
+ ):
1528
+ """TRT-LLM ragged attention for new tokens (causal)."""
1529
+ from flashinfer.prefill import trtllm_ragged_attention_deepseek
1530
+
1531
+ assert prefill.query_seq_lens is not None
1532
+ assert prefill.workspace_buffer is not None
1533
+
1534
+ ret = trtllm_ragged_attention_deepseek(
1535
+ query=q,
1536
+ key=k,
1537
+ value=v,
1538
+ workspace_buffer=prefill.workspace_buffer,
1539
+ seq_lens=prefill.query_seq_lens,
1540
+ max_q_len=prefill.max_query_len,
1541
+ max_kv_len=prefill.max_query_len,
1542
+ bmm1_scale=self.scale,
1543
+ bmm2_scale=1.0,
1544
+ o_sf_scale=1.0,
1545
+ batch_size=prefill.query_seq_lens.shape[0],
1546
+ window_left=-1,
1547
+ cum_seq_lens_q=prefill.query_start_loc,
1548
+ cum_seq_lens_kv=prefill.query_start_loc,
1549
+ enable_pdl=False,
1550
+ is_causal=True,
1551
+ return_lse=return_softmax_lse,
1552
+ )
1553
+
1554
+ if isinstance(ret, tuple):
1555
+ # Convert from (q_len, num_heads) to (num_heads, q_len)
1556
+ return ret[0], ret[1].transpose(0, 1).contiguous()
1557
+ return ret
1558
+
1559
+ def _run_prefill_context_chunk_trtllm_ragged(
1560
+ self, prefill: MLACommonPrefillMetadata, chunk_idx: int, q, k, v
1561
+ ):
1562
+ """TRT-LLM ragged attention for context chunks (non-causal)."""
1563
+ from flashinfer.prefill import trtllm_ragged_attention_deepseek
1564
+
1565
+ assert prefill.chunked_context is not None
1566
+ assert prefill.chunked_context.seq_lens[chunk_idx] is not None
1567
+ assert prefill.workspace_buffer is not None
1568
+
1569
+ out = torch.zeros(
1570
+ q.shape[0],
1571
+ q.shape[1],
1572
+ v.shape[2],
1573
+ device=q.device,
1574
+ dtype=q.dtype,
1575
+ )
1576
+ prefill.workspace_buffer.fill_(0)
1577
+
1578
+ attn_out, lse = trtllm_ragged_attention_deepseek(
1579
+ query=q,
1580
+ key=k,
1581
+ value=v,
1582
+ workspace_buffer=prefill.workspace_buffer,
1583
+ seq_lens=prefill.chunked_context.seq_lens[chunk_idx],
1584
+ max_q_len=prefill.max_query_len,
1585
+ max_kv_len=prefill.chunked_context.max_seq_lens[chunk_idx],
1586
+ bmm1_scale=self.scale,
1587
+ bmm2_scale=1.0,
1588
+ o_sf_scale=1.0,
1589
+ batch_size=prefill.chunked_context.seq_lens[chunk_idx].shape[0],
1590
+ window_left=-1,
1591
+ cum_seq_lens_q=prefill.query_start_loc,
1592
+ cum_seq_lens_kv=prefill.chunked_context.cu_seq_lens[chunk_idx],
1593
+ enable_pdl=False,
1594
+ is_causal=False,
1595
+ return_lse=True,
1596
+ out=out,
1597
+ )
1598
+
1599
+ # Convert from (q_len, num_heads) to (num_heads, q_len)
1600
+ return attn_out, lse.transpose(0, 1).contiguous()
1601
+
1602
+ def process_weights_after_loading(self, act_dtype: torch.dtype):
1603
+ def get_layer_weight(layer):
1604
+ WEIGHT_NAMES = ("weight", "qweight", "weight_packed")
1605
+ for attr in WEIGHT_NAMES:
1606
+ if hasattr(layer, attr):
1607
+ return getattr(layer, attr)
1608
+ raise AttributeError(
1609
+ f"Layer '{layer}' has no recognized weight attribute: {WEIGHT_NAMES}."
1610
+ )
1611
+
1612
+ def get_and_maybe_dequant_weights(layer: LinearBase):
1613
+ if layer.quant_method is not None and not isinstance(
1614
+ layer.quant_method, UnquantizedLinearMethod
1615
+ ):
1616
+ # NOTE: This should only be used offline, since it's O(N^3)
1617
+ eye = torch.eye(
1618
+ layer.input_size_per_partition,
1619
+ dtype=act_dtype,
1620
+ device=get_layer_weight(layer).device,
1621
+ )
1622
+ dequant_weights = layer.quant_method.apply(layer, eye, bias=None)
1623
+ del eye
1624
+ # standardize to (output, input)
1625
+ return dequant_weights.T
1626
+ return layer.weight
1627
+
1628
+ # we currently do not have quantized bmm's which are needed for
1629
+ # `W_UV` and `W_UK_T`, we just store fp16/bf16 copies and perform
1630
+ # the bmm's in 16-bit, the extra memory overhead of this is fairly low
1631
+ kv_b_proj_weight = get_and_maybe_dequant_weights(self.kv_b_proj).T
1632
+ assert kv_b_proj_weight.shape == (
1633
+ self.kv_lora_rank,
1634
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
1635
+ ), (
1636
+ f"{kv_b_proj_weight.shape=}, "
1637
+ f"{self.kv_lora_rank=}, "
1638
+ f"{self.num_heads=}, "
1639
+ f"{self.qk_nope_head_dim=}, "
1640
+ f"{self.v_head_dim=}"
1641
+ )
1642
+ kv_b_proj_weight = kv_b_proj_weight.view(
1643
+ self.kv_lora_rank,
1644
+ self.num_heads,
1645
+ self.qk_nope_head_dim + self.v_head_dim,
1646
+ )
1647
+
1648
+ W_UK, W_UV = kv_b_proj_weight.split(
1649
+ [self.qk_nope_head_dim, self.v_head_dim], dim=-1
1650
+ )
1651
+
1652
+ if self.is_aiter_triton_fp8_bmm_enabled:
1653
+ W_K = W_UK.transpose(0, 1) # 16 512 128
1654
+ W_V = W_UV.permute(1, 2, 0) # 16 128 512
1655
+ self.W_K, self.W_K_scale = dynamic_per_batched_tensor_quant(
1656
+ W_K, dtype=current_platform.fp8_dtype()
1657
+ )
1658
+ self.W_V, self.W_V_scale = dynamic_per_batched_tensor_quant(
1659
+ W_V, dtype=current_platform.fp8_dtype()
1660
+ )
1661
+
1662
+ # The kernel operates on non-padded inputs. Hence, pre-compiling
1663
+ # triton kernel to avoid runtime compilation for unseen batch sizes
1664
+ # Pre-compile for batch sizes 1 to 1024 to cover most use-cases.
1665
+ # On DS-R1, this step adds roughly 50s to the model loading time.
1666
+ max_batch_size = 1024 # [ToDo] Find the optimal upper limit
1667
+ pre_compilation_list = list(range(1, max_batch_size + 1))
1668
+ if is_global_first_rank():
1669
+ pre_compilation_list = tqdm(
1670
+ pre_compilation_list,
1671
+ desc="[Aiter Triton] Pre-compiling fp8 BMM kernel",
1672
+ total=max_batch_size,
1673
+ )
1674
+
1675
+ for m in pre_compilation_list:
1676
+ x = torch.empty(
1677
+ (self.W_K.shape[0], m, self.W_K.shape[2]),
1678
+ dtype=torch.bfloat16,
1679
+ device=self.W_K.device,
1680
+ )
1681
+ rocm_aiter_ops.triton_fp8_bmm(
1682
+ x, self.W_K, self.W_K_scale, group_size=128, transpose_bm=True
1683
+ )
1684
+
1685
+ x = torch.empty(
1686
+ (self.W_V.shape[0], m, self.W_V.shape[2]),
1687
+ dtype=torch.bfloat16,
1688
+ device=self.W_V.device,
1689
+ )
1690
+ rocm_aiter_ops.triton_fp8_bmm(
1691
+ x, self.W_V, self.W_V_scale, group_size=128, transpose_bm=True
1692
+ )
1693
+ else:
1694
+ # Convert from (L, N, V) to (N, L, V)
1695
+ self.W_UV = W_UV.transpose(0, 1)
1696
+ # Convert from (L, N, P) to (N, P, L)
1697
+ self.W_UK_T = W_UK.permute(1, 2, 0)
1698
+
1699
+ def _concat_k_nope_k_pe(
1700
+ self, k_nope: torch.Tensor, k_pe: torch.Tensor
1701
+ ) -> torch.Tensor:
1702
+ """
1703
+ Efficiently concatenate k_nope and k_pe tensors along the last dimension.
1704
+
1705
+ This function avoids the performance penalty of torch.cat with expanded
1706
+ non-contiguous tensors by pre-allocating the output and using direct copies.
1707
+
1708
+ Args:
1709
+ k_nope: Tensor of shape [..., nope_dim]
1710
+ k_pe: Tensor to broadcast and concatenate, typically shape [..., 1, pe_dim]
1711
+ or [..., pe_dim]
1712
+
1713
+ Returns:
1714
+ Tensor of shape [..., nope_dim + pe_dim]
1715
+ """
1716
+ k = torch.empty(
1717
+ (*k_nope.shape[:-1], k_nope.shape[-1] + k_pe.shape[-1]),
1718
+ dtype=k_nope.dtype,
1719
+ device=k_nope.device,
1720
+ )
1721
+ # Direct copies with efficient broadcasting
1722
+ k[..., : k_nope.shape[-1]] = k_nope
1723
+ k[..., k_nope.shape[-1] :] = k_pe
1724
+ return k
1725
+
1726
+ def _compute_prefill_context(
1727
+ self,
1728
+ q: torch.Tensor,
1729
+ kv_c_and_k_pe_cache: torch.Tensor,
1730
+ attn_metadata: MLACommonMetadata,
1731
+ k_scale: torch.Tensor,
1732
+ ):
1733
+ assert attn_metadata.prefill is not None
1734
+ prefill_metadata = attn_metadata.prefill
1735
+ assert prefill_metadata.chunked_context is not None
1736
+
1737
+ output = None
1738
+ iters = len(prefill_metadata.chunked_context.seq_tot)
1739
+ workspace = prefill_metadata.chunked_context.workspace
1740
+ for i in range(iters):
1741
+ toks = prefill_metadata.chunked_context.seq_tot[i]
1742
+ ops.gather_and_maybe_dequant_cache(
1743
+ src_cache=kv_c_and_k_pe_cache,
1744
+ dst=workspace,
1745
+ block_table=prefill_metadata.block_table,
1746
+ cu_seq_lens=prefill_metadata.chunked_context.cu_seq_lens[i],
1747
+ token_to_seq=prefill_metadata.chunked_context.token_to_seq[i],
1748
+ num_tokens=prefill_metadata.chunked_context.chunk_total_token[i],
1749
+ kv_cache_dtype=self.kv_cache_dtype,
1750
+ scale=k_scale,
1751
+ seq_starts=prefill_metadata.chunked_context.starts[i],
1752
+ )
1753
+
1754
+ kv_c_normed = workspace[:toks][..., : self.kv_lora_rank]
1755
+ k_pe = workspace[:toks][..., self.kv_lora_rank :].unsqueeze(1)
1756
+
1757
+ kv_nope = self.kv_b_proj(kv_c_normed)[0].view(
1758
+ -1, self.num_heads, self.qk_nope_head_dim + self.v_head_dim
1759
+ )
1760
+ k_nope, v = kv_nope.split([self.qk_nope_head_dim, self.v_head_dim], dim=-1)
1761
+
1762
+ k = self._concat_k_nope_k_pe(k_nope, k_pe)
1763
+
1764
+ attn_output, attn_softmax_lse = self._run_prefill_context_chunk(
1765
+ prefill=prefill_metadata,
1766
+ chunk_idx=i,
1767
+ q=q,
1768
+ k=k,
1769
+ v=v,
1770
+ )
1771
+
1772
+ if output is None:
1773
+ output = attn_output
1774
+ output_lse = attn_softmax_lse
1775
+ else:
1776
+ output_tmp = torch.empty_like(output)
1777
+ output_lse_tmp = torch.empty_like(output_lse)
1778
+ merge_attn_states(
1779
+ output=output_tmp,
1780
+ output_lse=output_lse_tmp,
1781
+ prefix_output=output,
1782
+ prefix_lse=output_lse,
1783
+ suffix_output=attn_output,
1784
+ suffix_lse=attn_softmax_lse,
1785
+ )
1786
+ output = output_tmp
1787
+ output_lse = output_lse_tmp
1788
+
1789
+ return output, output_lse
1790
+
1791
+ def _context_parallel_compute_prefill_context(
1792
+ self,
1793
+ q: torch.Tensor,
1794
+ kv_c_and_k_pe_cache: torch.Tensor,
1795
+ attn_metadata: MLACommonMetadata,
1796
+ k_scale: torch.Tensor,
1797
+ dcp_world_size: int,
1798
+ ):
1799
+ assert k_scale is None, "DCP not support scaled kvcache now."
1800
+ assert attn_metadata.prefill is not None
1801
+ prefill_metadata = attn_metadata.prefill
1802
+ assert prefill_metadata.chunked_context is not None
1803
+ assert prefill_metadata.chunked_context.padded_local_chunk_seq_lens is not None
1804
+ assert prefill_metadata.chunked_context.local_context_lens_allranks is not None
1805
+ assert prefill_metadata.chunked_context.padded_local_cu_seq_lens is not None
1806
+ assert prefill_metadata.chunked_context.cu_seq_lens_lst is not None
1807
+ assert prefill_metadata.chunked_context.chunk_size is not None
1808
+
1809
+ output = None
1810
+ iters = len(prefill_metadata.chunked_context.seq_tot)
1811
+ workspace = prefill_metadata.chunked_context.workspace
1812
+
1813
+ for i in range(iters):
1814
+ toks = prefill_metadata.chunked_context.seq_tot[i]
1815
+ ops.cp_gather_cache(
1816
+ src_cache=kv_c_and_k_pe_cache,
1817
+ dst=workspace,
1818
+ block_table=prefill_metadata.block_table,
1819
+ cu_seq_lens=prefill_metadata.chunked_context.padded_local_cu_seq_lens[
1820
+ i
1821
+ ],
1822
+ batch_size=attn_metadata.num_prefills,
1823
+ seq_starts=prefill_metadata.chunked_context.starts[i],
1824
+ )
1825
+ # workspace
1826
+ # |------- N tokens --------|--------- N*dcp_size tokens ----------|
1827
+ # |<- use for loca_gather ->|<--------- use for allgather -------->|
1828
+ allgather_offset = workspace.shape[0] // (dcp_world_size + 1)
1829
+ assert allgather_offset * (dcp_world_size + 1) == workspace.shape[0]
1830
+ assert toks <= allgather_offset
1831
+ local_gathered_kvcache = workspace[:toks]
1832
+ cur_allgather_workspace = workspace[
1833
+ allgather_offset : allgather_offset * (1 + dcp_world_size)
1834
+ ]
1835
+ assert toks * dcp_world_size <= cur_allgather_workspace.shape[0]
1836
+ cur_allgather_kvcache = cur_allgather_workspace[: toks * dcp_world_size]
1837
+ cur_allgather_kvcache.copy_(
1838
+ get_dcp_group().all_gather(local_gathered_kvcache, dim=0)
1839
+ )
1840
+ assert (
1841
+ cur_allgather_kvcache.shape[-1]
1842
+ == self.kv_lora_rank + self.qk_rope_head_dim
1843
+ )
1844
+ allgatered_kv_c_normed, allgatered_k_pe = cur_allgather_kvcache.unsqueeze(
1845
+ 1
1846
+ ).split([self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
1847
+
1848
+ kv_c_normed, k_pe = reorg_kvcache(
1849
+ allgatered_kv_c_normed,
1850
+ allgatered_k_pe,
1851
+ padded_local_chunk_seq_lens_lst=prefill_metadata.chunked_context.padded_local_chunk_seq_lens[
1852
+ i
1853
+ ],
1854
+ local_context_lens_allranks=prefill_metadata.chunked_context.local_context_lens_allranks,
1855
+ sum_seq_len=prefill_metadata.chunked_context.cu_seq_lens_lst[i][-1],
1856
+ max_seq_len=prefill_metadata.chunked_context.max_seq_lens[i],
1857
+ chunk_size=prefill_metadata.chunked_context.chunk_size,
1858
+ chunk_idx=i,
1859
+ toks=toks,
1860
+ )
1861
+
1862
+ kv_nope = self.kv_b_proj(kv_c_normed)[0].view(
1863
+ -1, self.num_heads, self.qk_nope_head_dim + self.v_head_dim
1864
+ )
1865
+ k_nope, v = kv_nope.split([self.qk_nope_head_dim, self.v_head_dim], dim=-1)
1866
+ k = self._concat_k_nope_k_pe(k_nope, k_pe)
1867
+
1868
+ attn_output, attn_softmax_lse = self._run_prefill_context_chunk(
1869
+ prefill=prefill_metadata,
1870
+ chunk_idx=i,
1871
+ q=q,
1872
+ k=k,
1873
+ v=v,
1874
+ )
1875
+
1876
+ if output is None:
1877
+ output = attn_output
1878
+ output_lse = attn_softmax_lse
1879
+ else:
1880
+ output_tmp = torch.empty_like(output)
1881
+ output_lse_tmp = torch.empty_like(output_lse)
1882
+ merge_attn_states(
1883
+ output=output_tmp,
1884
+ output_lse=output_lse_tmp,
1885
+ prefix_output=output,
1886
+ prefix_lse=output_lse,
1887
+ suffix_output=attn_output,
1888
+ suffix_lse=attn_softmax_lse,
1889
+ )
1890
+ output = output_tmp
1891
+ output_lse = output_lse_tmp
1892
+
1893
+ return output, output_lse
1894
+
1895
+ def _forward_prefill(
1896
+ self,
1897
+ q: torch.Tensor,
1898
+ kv_c_normed: torch.Tensor,
1899
+ k_pe: torch.Tensor,
1900
+ kv_c_and_k_pe_cache: torch.Tensor,
1901
+ attn_metadata: MLACommonMetadata,
1902
+ k_scale: torch.Tensor,
1903
+ output: torch.Tensor,
1904
+ ) -> None:
1905
+ # TODO (zyongye): Prefill function here
1906
+ assert attn_metadata.prefill is not None
1907
+ assert self.dcp_world_size != -1
1908
+
1909
+ has_context = attn_metadata.prefill.chunked_context is not None
1910
+ kv_nope = self.kv_b_proj(kv_c_normed)[0].view(
1911
+ -1, self.num_heads, self.qk_nope_head_dim + self.v_head_dim
1912
+ )
1913
+ k_nope, v = kv_nope.split([self.qk_nope_head_dim, self.v_head_dim], dim=-1)
1914
+
1915
+ k = self._concat_k_nope_k_pe(k_nope, k_pe)
1916
+
1917
+ output_prefill = self._run_prefill_new_tokens(
1918
+ prefill=attn_metadata.prefill,
1919
+ q=q,
1920
+ k=k,
1921
+ v=v,
1922
+ return_softmax_lse=has_context,
1923
+ )
1924
+
1925
+ if has_context:
1926
+ suffix_output, suffix_lse = output_prefill
1927
+ if self.dcp_world_size > 1:
1928
+ context_output, context_lse = (
1929
+ self._context_parallel_compute_prefill_context(
1930
+ q,
1931
+ kv_c_and_k_pe_cache,
1932
+ attn_metadata,
1933
+ k_scale=None,
1934
+ dcp_world_size=self.dcp_world_size,
1935
+ )
1936
+ )
1937
+ else:
1938
+ context_output, context_lse = self._compute_prefill_context(
1939
+ q, kv_c_and_k_pe_cache, attn_metadata, k_scale
1940
+ )
1941
+
1942
+ # unpad if necessary
1943
+ if self._pad_v:
1944
+ context_output = context_output[..., : v.shape[-1]]
1945
+ suffix_output = suffix_output[..., : v.shape[-1]]
1946
+
1947
+ output = output.view(-1, self.num_heads, self.v_head_dim)
1948
+ merge_attn_states(
1949
+ output=output,
1950
+ prefix_output=context_output,
1951
+ prefix_lse=context_lse,
1952
+ suffix_output=suffix_output,
1953
+ suffix_lse=suffix_lse,
1954
+ )
1955
+ else:
1956
+ output_prefill = output_prefill[..., : v.shape[-1]].flatten(start_dim=-2)
1957
+ output.copy_(output_prefill)
1958
+
1959
+ @abstractmethod
1960
+ def _forward_decode(
1961
+ self,
1962
+ q: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
1963
+ kv_c_and_k_pe_cache: torch.Tensor,
1964
+ attn_metadata: M,
1965
+ layer: AttentionLayer,
1966
+ ) -> tuple[torch.Tensor, torch.Tensor | None]:
1967
+ raise NotImplementedError
1968
+
1969
+ def forward(
1970
+ self,
1971
+ layer: AttentionLayer,
1972
+ q: torch.Tensor,
1973
+ k_c_normed: torch.Tensor, # key in unified attn
1974
+ k_pe: torch.Tensor, # value in unified attn
1975
+ kv_cache: torch.Tensor,
1976
+ attn_metadata: M,
1977
+ output: torch.Tensor | None = None,
1978
+ output_scale: torch.Tensor | None = None,
1979
+ output_block_scale: torch.Tensor | None = None,
1980
+ ) -> torch.Tensor:
1981
+ assert output is not None, "Output tensor must be provided."
1982
+
1983
+ if output_scale is not None or output_block_scale is not None:
1984
+ raise NotImplementedError(
1985
+ "fused output quantization is not yet supported for MLACommonImpl"
1986
+ )
1987
+
1988
+ if attn_metadata is None:
1989
+ # During the profile run try to simulate to worse case output size
1990
+ # for `self.kv_b_proj(kv_c_normed)` in `_compute_prefill_context`
1991
+ # since this can be large
1992
+ _ = torch.empty(
1993
+ (
1994
+ self.chunked_prefill_workspace_size,
1995
+ self.num_heads,
1996
+ self.qk_nope_head_dim + self.v_head_dim,
1997
+ ),
1998
+ device=k_c_normed.device,
1999
+ dtype=k_c_normed.dtype,
2000
+ )
2001
+
2002
+ # The zero fill is required when used with DP + EP
2003
+ # to ensure all ranks within a DP group compute the
2004
+ # same expert outputs.
2005
+ return output.fill_(0)
2006
+
2007
+ if self.dcp_world_size == -1:
2008
+ self.dcp_world_size = get_dcp_group().world_size
2009
+
2010
+ fp8_attention = self.kv_cache_dtype.startswith("fp8")
2011
+
2012
+ num_actual_toks = attn_metadata.num_actual_tokens
2013
+
2014
+ # Inputs and outputs may be padded for CUDA graphs
2015
+ output_padded = output
2016
+ output = output[:num_actual_toks, ...]
2017
+ q = q[:num_actual_toks, ...]
2018
+ k_c_normed = k_c_normed[:num_actual_toks, ...]
2019
+ k_pe = k_pe[:num_actual_toks, ...]
2020
+
2021
+ assert (
2022
+ attn_metadata.num_decodes is not None
2023
+ and attn_metadata.num_prefills is not None
2024
+ and attn_metadata.num_decode_tokens is not None
2025
+ )
2026
+
2027
+ has_decode = attn_metadata.num_decodes > 0
2028
+ has_prefill = attn_metadata.num_prefills > 0
2029
+ num_decode_tokens = attn_metadata.num_decode_tokens
2030
+
2031
+ decode_q = q[:num_decode_tokens]
2032
+
2033
+ prefill_q = q[num_decode_tokens:]
2034
+ prefill_k_pe = k_pe[num_decode_tokens:]
2035
+ prefill_k_c_normed = k_c_normed[num_decode_tokens:]
2036
+
2037
+ # write the latent and rope to kv cache
2038
+ if kv_cache.numel() > 0:
2039
+ ops.concat_and_cache_mla(
2040
+ k_c_normed,
2041
+ k_pe.squeeze(1),
2042
+ kv_cache,
2043
+ attn_metadata.slot_mapping.flatten(),
2044
+ kv_cache_dtype=self.kv_cache_dtype,
2045
+ scale=layer._k_scale,
2046
+ )
2047
+
2048
+ if fp8_attention:
2049
+ kv_cache = kv_cache.view(current_platform.fp8_dtype())
2050
+
2051
+ if has_prefill:
2052
+ self._forward_prefill(
2053
+ prefill_q,
2054
+ prefill_k_c_normed,
2055
+ prefill_k_pe,
2056
+ kv_cache,
2057
+ attn_metadata,
2058
+ layer._k_scale,
2059
+ output=output[num_decode_tokens:],
2060
+ )
2061
+
2062
+ if has_decode:
2063
+ assert attn_metadata.decode is not None
2064
+
2065
+ decode_q_nope, decode_q_pe = decode_q.split(
2066
+ [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
2067
+ )
2068
+
2069
+ # Convert from (B, N, P) to (N, B, P)
2070
+ decode_q_nope = decode_q_nope.transpose(0, 1)
2071
+
2072
+ if self.q_pad_num_heads is not None:
2073
+ B, N, L = decode_q_pe.shape
2074
+ decode_pe_padded = decode_q_pe.new_empty((B, self.q_pad_num_heads, L))
2075
+ decode_pe_padded.resize_((B, N, L))
2076
+ decode_pe_padded.copy_(decode_q_pe)
2077
+ decode_q_pe = decode_pe_padded
2078
+
2079
+ if self.is_aiter_triton_fp8_bmm_enabled:
2080
+ # Multiply+Transpose (N, B, P)x(N, P, L)->(N, B, L)->(B, N, L)
2081
+ decode_ql_nope = rocm_aiter_ops.triton_fp8_bmm(
2082
+ decode_q_nope,
2083
+ self.W_K,
2084
+ self.W_K_scale,
2085
+ group_size=128,
2086
+ transpose_bm=True,
2087
+ )
2088
+ else:
2089
+ # Pads the head_dim if necessary (for the underlying kernel)
2090
+ N, B, P = decode_q_nope.shape
2091
+ _, _, L = self.W_UK_T.shape
2092
+
2093
+ if self.q_pad_num_heads is not None:
2094
+ decode_ql_nope = decode_q_nope.new_empty(
2095
+ (self.q_pad_num_heads, B, L)
2096
+ )
2097
+ decode_ql_nope.resize_((N, B, L))
2098
+ else:
2099
+ decode_ql_nope = decode_q_nope.new_empty((N, B, L))
2100
+
2101
+ # Multiply (N, B, P) x (N, P, L) -> (N, B, L)
2102
+ torch.bmm(decode_q_nope, self.W_UK_T, out=decode_ql_nope)
2103
+
2104
+ # Convert from (N, B, L) to (B, N, L)
2105
+ decode_ql_nope = decode_ql_nope.transpose(0, 1)
2106
+
2107
+ if fp8_attention:
2108
+ ql_nope_shape = decode_ql_nope.shape
2109
+ q_pe_shape = decode_q_pe.shape
2110
+ assert decode_ql_nope.shape[0] == decode_q_pe.shape[0]
2111
+ assert decode_ql_nope.shape[1] == decode_q_pe.shape[1]
2112
+ decode_q_shape = (
2113
+ ql_nope_shape[0],
2114
+ ql_nope_shape[1],
2115
+ ql_nope_shape[2] + q_pe_shape[2],
2116
+ )
2117
+ # Using empty and copy since torch.cat introduces significant overhead.
2118
+ decode_q0 = torch.empty(
2119
+ decode_q_shape,
2120
+ device=decode_ql_nope.device,
2121
+ dtype=decode_ql_nope.dtype,
2122
+ )
2123
+ decode_q0[..., : ql_nope_shape[2]].copy_(decode_ql_nope)
2124
+ decode_q0[..., ql_nope_shape[2] :].copy_(decode_q_pe)
2125
+
2126
+ decode_q, _ = ops.scaled_fp8_quant(
2127
+ decode_q0.view(decode_q_shape[0], -1),
2128
+ layer._q_scale,
2129
+ )
2130
+ decode_q = decode_q.view(decode_q_shape)
2131
+ else:
2132
+ decode_q = (decode_ql_nope, decode_q_pe)
2133
+ if self.dcp_world_size > 1:
2134
+ assert not fp8_attention, "DCP not support fp8 kvcache now."
2135
+ # concatenate decode_ql_nope and decode_q_pe -> (B, N, L + P)
2136
+ decode_q = torch.cat(decode_q, dim=-1)
2137
+ # decode_q do allgather in head dim.
2138
+ decode_q = get_dcp_group().all_gather(decode_q, dim=1)
2139
+
2140
+ # call decode attn
2141
+ attn_out, lse = self._forward_decode(
2142
+ decode_q, kv_cache, attn_metadata, layer
2143
+ )
2144
+
2145
+ # correct dcp attn_out with lse.
2146
+ if self.dcp_world_size > 1:
2147
+ attn_out = cp_lse_ag_out_rs(
2148
+ attn_out,
2149
+ lse,
2150
+ get_dcp_group(),
2151
+ is_lse_base_on_e=not getattr(self, "_use_fi_prefill", False),
2152
+ )
2153
+
2154
+ # v_up projection
2155
+ self._v_up_proj(attn_out, out=output[:num_decode_tokens])
2156
+ return output_padded