vllm-cpu-avx512vnni 0.10.2.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.

Files changed (1395) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2022 -0
  5. vllm/_ipex_ops.py +404 -0
  6. vllm/_version.py +34 -0
  7. vllm/adapter_commons/__init__.py +0 -0
  8. vllm/adapter_commons/layers.py +16 -0
  9. vllm/adapter_commons/models.py +106 -0
  10. vllm/adapter_commons/request.py +26 -0
  11. vllm/adapter_commons/utils.py +93 -0
  12. vllm/adapter_commons/worker_manager.py +39 -0
  13. vllm/assets/__init__.py +0 -0
  14. vllm/assets/audio.py +45 -0
  15. vllm/assets/base.py +41 -0
  16. vllm/assets/image.py +50 -0
  17. vllm/assets/video.py +138 -0
  18. vllm/attention/__init__.py +19 -0
  19. vllm/attention/backends/__init__.py +0 -0
  20. vllm/attention/backends/abstract.py +348 -0
  21. vllm/attention/backends/differential_flash_attn.py +935 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1499 -0
  23. vllm/attention/backends/flash_attn.py +933 -0
  24. vllm/attention/backends/flashmla.py +238 -0
  25. vllm/attention/backends/mla/__init__.py +0 -0
  26. vllm/attention/backends/mla/common.py +1310 -0
  27. vllm/attention/backends/placeholder_attn.py +340 -0
  28. vllm/attention/backends/rocm_aiter_mla.py +410 -0
  29. vllm/attention/backends/rocm_flash_attn.py +953 -0
  30. vllm/attention/backends/triton_mla.py +111 -0
  31. vllm/attention/backends/utils.py +610 -0
  32. vllm/attention/backends/xformers.py +805 -0
  33. vllm/attention/layer.py +552 -0
  34. vllm/attention/layers/__init__.py +0 -0
  35. vllm/attention/layers/chunked_local_attention.py +91 -0
  36. vllm/attention/layers/cross_attention.py +159 -0
  37. vllm/attention/layers/encoder_only_attention.py +86 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  40. vllm/attention/ops/common.py +139 -0
  41. vllm/attention/ops/flashmla.py +123 -0
  42. vllm/attention/ops/merge_attn_states.py +43 -0
  43. vllm/attention/ops/paged_attn.py +261 -0
  44. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  45. vllm/attention/ops/prefix_prefill.py +928 -0
  46. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  47. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  48. vllm/attention/ops/triton_decode_attention.py +676 -0
  49. vllm/attention/ops/triton_flash_attention.py +984 -0
  50. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  51. vllm/attention/ops/triton_unified_attention.py +854 -0
  52. vllm/attention/selector.py +243 -0
  53. vllm/attention/utils/__init__.py +0 -0
  54. vllm/attention/utils/fa_utils.py +85 -0
  55. vllm/attention/utils/kv_sharing_utils.py +33 -0
  56. vllm/beam_search.py +87 -0
  57. vllm/benchmarks/__init__.py +0 -0
  58. vllm/benchmarks/datasets.py +2651 -0
  59. vllm/benchmarks/latency.py +170 -0
  60. vllm/benchmarks/lib/__init__.py +3 -0
  61. vllm/benchmarks/lib/endpoint_request_func.py +510 -0
  62. vllm/benchmarks/lib/ready_checker.py +72 -0
  63. vllm/benchmarks/lib/utils.py +80 -0
  64. vllm/benchmarks/serve.py +1247 -0
  65. vllm/benchmarks/throughput.py +696 -0
  66. vllm/collect_env.py +823 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/activation_quant_fusion.py +193 -0
  69. vllm/compilation/backends.py +641 -0
  70. vllm/compilation/base_static_graph.py +51 -0
  71. vllm/compilation/collective_fusion.py +1190 -0
  72. vllm/compilation/compiler_interface.py +572 -0
  73. vllm/compilation/counter.py +47 -0
  74. vllm/compilation/cuda_graph.py +193 -0
  75. vllm/compilation/cuda_piecewise_backend.py +117 -0
  76. vllm/compilation/decorators.py +316 -0
  77. vllm/compilation/fix_functionalization.py +208 -0
  78. vllm/compilation/fusion.py +600 -0
  79. vllm/compilation/fusion_attn.py +303 -0
  80. vllm/compilation/fx_utils.py +84 -0
  81. vllm/compilation/inductor_pass.py +136 -0
  82. vllm/compilation/monitor.py +57 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +165 -0
  85. vllm/compilation/pass_manager.py +88 -0
  86. vllm/compilation/sequence_parallelism.py +484 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +50 -0
  89. vllm/compilation/wrapper.py +138 -0
  90. vllm/config/__init__.py +3921 -0
  91. vllm/config/cache.py +214 -0
  92. vllm/config/compilation.py +580 -0
  93. vllm/config/kv_events.py +50 -0
  94. vllm/config/kv_transfer.py +111 -0
  95. vllm/config/load.py +113 -0
  96. vllm/config/lora.py +132 -0
  97. vllm/config/parallel.py +446 -0
  98. vllm/config/scheduler.py +304 -0
  99. vllm/config/utils.py +29 -0
  100. vllm/connections.py +174 -0
  101. vllm/core/__init__.py +0 -0
  102. vllm/core/block/__init__.py +0 -0
  103. vllm/core/block/block_table.py +399 -0
  104. vllm/core/block/common.py +371 -0
  105. vllm/core/block/cpu_gpu_block_allocator.py +439 -0
  106. vllm/core/block/interfaces.py +319 -0
  107. vllm/core/block/naive_block.py +466 -0
  108. vllm/core/block/prefix_caching_block.py +1135 -0
  109. vllm/core/block/utils.py +28 -0
  110. vllm/core/block_manager.py +523 -0
  111. vllm/core/evictor.py +157 -0
  112. vllm/core/interfaces.py +139 -0
  113. vllm/core/placeholder_block_space_manager.py +103 -0
  114. vllm/core/scheduler.py +2028 -0
  115. vllm/device_allocator/__init__.py +0 -0
  116. vllm/device_allocator/cumem.py +286 -0
  117. vllm/distributed/__init__.py +6 -0
  118. vllm/distributed/communication_op.py +41 -0
  119. vllm/distributed/device_communicators/__init__.py +0 -0
  120. vllm/distributed/device_communicators/all2all.py +259 -0
  121. vllm/distributed/device_communicators/all_reduce_utils.py +292 -0
  122. vllm/distributed/device_communicators/base_device_communicator.py +277 -0
  123. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  124. vllm/distributed/device_communicators/cuda_communicator.py +294 -0
  125. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  126. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  127. vllm/distributed/device_communicators/pynccl.py +290 -0
  128. vllm/distributed/device_communicators/pynccl_wrapper.py +382 -0
  129. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  130. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  131. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  132. vllm/distributed/device_communicators/symm_mem.py +136 -0
  133. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  134. vllm/distributed/device_communicators/xpu_communicator.py +69 -0
  135. vllm/distributed/eplb/__init__.py +8 -0
  136. vllm/distributed/eplb/eplb_state.py +619 -0
  137. vllm/distributed/eplb/rebalance_algo.py +234 -0
  138. vllm/distributed/eplb/rebalance_execute.py +424 -0
  139. vllm/distributed/kv_events.py +362 -0
  140. vllm/distributed/kv_transfer/README.md +29 -0
  141. vllm/distributed/kv_transfer/__init__.py +13 -0
  142. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  143. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  145. vllm/distributed/kv_transfer/kv_connector/factory.py +108 -0
  146. vllm/distributed/kv_transfer/kv_connector/utils.py +246 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/base.py +356 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +266 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1319 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +484 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +542 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +266 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +414 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  158. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  159. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  160. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  161. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  162. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  163. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  164. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  165. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  166. vllm/distributed/parallel_state.py +1489 -0
  167. vllm/distributed/tpu_distributed_utils.py +178 -0
  168. vllm/distributed/utils.py +536 -0
  169. vllm/engine/__init__.py +0 -0
  170. vllm/engine/arg_utils.py +1857 -0
  171. vllm/engine/async_llm_engine.py +1044 -0
  172. vllm/engine/async_timeout.py +173 -0
  173. vllm/engine/llm_engine.py +1849 -0
  174. vllm/engine/metrics.py +577 -0
  175. vllm/engine/metrics_types.py +84 -0
  176. vllm/engine/multiprocessing/__init__.py +145 -0
  177. vllm/engine/multiprocessing/client.py +643 -0
  178. vllm/engine/multiprocessing/engine.py +470 -0
  179. vllm/engine/output_processor/__init__.py +0 -0
  180. vllm/engine/output_processor/interfaces.py +61 -0
  181. vllm/engine/output_processor/single_step.py +145 -0
  182. vllm/engine/output_processor/stop_checker.py +131 -0
  183. vllm/engine/output_processor/util.py +28 -0
  184. vllm/engine/protocol.py +343 -0
  185. vllm/entrypoints/__init__.py +0 -0
  186. vllm/entrypoints/api_server.py +178 -0
  187. vllm/entrypoints/chat_utils.py +1535 -0
  188. vllm/entrypoints/cli/__init__.py +12 -0
  189. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  190. vllm/entrypoints/cli/benchmark/base.py +25 -0
  191. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  192. vllm/entrypoints/cli/benchmark/main.py +58 -0
  193. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  194. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  195. vllm/entrypoints/cli/collect_env.py +36 -0
  196. vllm/entrypoints/cli/main.py +60 -0
  197. vllm/entrypoints/cli/openai.py +214 -0
  198. vllm/entrypoints/cli/run_batch.py +69 -0
  199. vllm/entrypoints/cli/serve.py +232 -0
  200. vllm/entrypoints/cli/types.py +29 -0
  201. vllm/entrypoints/constants.py +10 -0
  202. vllm/entrypoints/context.py +444 -0
  203. vllm/entrypoints/harmony_utils.py +431 -0
  204. vllm/entrypoints/launcher.py +168 -0
  205. vllm/entrypoints/llm.py +1579 -0
  206. vllm/entrypoints/logger.py +79 -0
  207. vllm/entrypoints/openai/__init__.py +0 -0
  208. vllm/entrypoints/openai/api_server.py +2011 -0
  209. vllm/entrypoints/openai/cli_args.py +281 -0
  210. vllm/entrypoints/openai/logits_processors.py +90 -0
  211. vllm/entrypoints/openai/protocol.py +2590 -0
  212. vllm/entrypoints/openai/run_batch.py +497 -0
  213. vllm/entrypoints/openai/serving_chat.py +1591 -0
  214. vllm/entrypoints/openai/serving_classification.py +176 -0
  215. vllm/entrypoints/openai/serving_completion.py +688 -0
  216. vllm/entrypoints/openai/serving_embedding.py +632 -0
  217. vllm/entrypoints/openai/serving_engine.py +996 -0
  218. vllm/entrypoints/openai/serving_models.py +288 -0
  219. vllm/entrypoints/openai/serving_pooling.py +277 -0
  220. vllm/entrypoints/openai/serving_responses.py +1690 -0
  221. vllm/entrypoints/openai/serving_score.py +479 -0
  222. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  223. vllm/entrypoints/openai/serving_transcription.py +136 -0
  224. vllm/entrypoints/openai/speech_to_text.py +388 -0
  225. vllm/entrypoints/openai/tool_parsers/__init__.py +51 -0
  226. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  227. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  228. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  229. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  230. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  231. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  232. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +418 -0
  233. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  234. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  235. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  236. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  237. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  238. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  239. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  240. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  241. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +73 -0
  242. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  243. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  244. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  245. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  246. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  247. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  248. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  249. vllm/entrypoints/renderer.py +395 -0
  250. vllm/entrypoints/score_utils.py +232 -0
  251. vllm/entrypoints/ssl.py +75 -0
  252. vllm/entrypoints/tool.py +139 -0
  253. vllm/entrypoints/tool_server.py +195 -0
  254. vllm/entrypoints/utils.py +328 -0
  255. vllm/env_override.py +23 -0
  256. vllm/envs.py +1354 -0
  257. vllm/executor/__init__.py +0 -0
  258. vllm/executor/executor_base.py +378 -0
  259. vllm/executor/mp_distributed_executor.py +244 -0
  260. vllm/executor/msgspec_utils.py +35 -0
  261. vllm/executor/multiproc_worker_utils.py +279 -0
  262. vllm/executor/ray_distributed_executor.py +699 -0
  263. vllm/executor/ray_utils.py +410 -0
  264. vllm/executor/uniproc_executor.py +152 -0
  265. vllm/forward_context.py +273 -0
  266. vllm/inputs/__init__.py +44 -0
  267. vllm/inputs/data.py +356 -0
  268. vllm/inputs/parse.py +151 -0
  269. vllm/inputs/preprocess.py +973 -0
  270. vllm/inputs/registry.py +251 -0
  271. vllm/logger.py +229 -0
  272. vllm/logging_utils/__init__.py +8 -0
  273. vllm/logging_utils/dump_input.py +81 -0
  274. vllm/logging_utils/formatter.py +79 -0
  275. vllm/logits_process.py +119 -0
  276. vllm/logprobs.py +28 -0
  277. vllm/lora/__init__.py +0 -0
  278. vllm/lora/layers/__init__.py +34 -0
  279. vllm/lora/layers/base.py +69 -0
  280. vllm/lora/layers/base_linear.py +184 -0
  281. vllm/lora/layers/column_parallel_linear.py +622 -0
  282. vllm/lora/layers/logits_processor.py +247 -0
  283. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  284. vllm/lora/layers/replicated_linear.py +61 -0
  285. vllm/lora/layers/row_parallel_linear.py +201 -0
  286. vllm/lora/layers/utils.py +60 -0
  287. vllm/lora/layers/vocal_parallel_embedding.py +172 -0
  288. vllm/lora/lora.py +199 -0
  289. vllm/lora/models.py +792 -0
  290. vllm/lora/ops/__init__.py +0 -0
  291. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  292. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  293. vllm/lora/ops/torch_ops/__init__.py +16 -0
  294. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  295. vllm/lora/ops/triton_ops/__init__.py +12 -0
  296. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  297. vllm/lora/ops/triton_ops/lora_expand_op.py +291 -0
  298. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  299. vllm/lora/ops/triton_ops/lora_shrink_op.py +245 -0
  300. vllm/lora/ops/triton_ops/utils.py +126 -0
  301. vllm/lora/ops/xla_ops/__init__.py +7 -0
  302. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  303. vllm/lora/peft_helper.py +127 -0
  304. vllm/lora/punica_wrapper/__init__.py +10 -0
  305. vllm/lora/punica_wrapper/punica_base.py +458 -0
  306. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  307. vllm/lora/punica_wrapper/punica_gpu.py +279 -0
  308. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  309. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  310. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  311. vllm/lora/punica_wrapper/utils.py +136 -0
  312. vllm/lora/request.py +99 -0
  313. vllm/lora/resolver.py +85 -0
  314. vllm/lora/utils.py +246 -0
  315. vllm/lora/worker_manager.py +256 -0
  316. vllm/model_executor/__init__.py +16 -0
  317. vllm/model_executor/custom_op.py +194 -0
  318. vllm/model_executor/layers/__init__.py +0 -0
  319. vllm/model_executor/layers/activation.py +575 -0
  320. vllm/model_executor/layers/attention_layer_base.py +23 -0
  321. vllm/model_executor/layers/fla/__init__.py +8 -0
  322. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  323. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  324. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  325. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  326. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  327. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  328. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  329. vllm/model_executor/layers/fla/ops/index.py +39 -0
  330. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  331. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  332. vllm/model_executor/layers/fla/ops/op.py +39 -0
  333. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  334. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  335. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  336. vllm/model_executor/layers/fused_moe/__init__.py +80 -0
  337. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +304 -0
  338. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +164 -0
  339. vllm/model_executor/layers/fused_moe/config.py +497 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  560. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +297 -0
  561. vllm/model_executor/layers/fused_moe/cutlass_moe.py +996 -0
  562. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +370 -0
  563. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  564. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +280 -0
  565. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +229 -0
  566. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +243 -0
  567. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +97 -0
  568. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1042 -0
  569. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +240 -0
  570. vllm/model_executor/layers/fused_moe/fused_moe.py +2081 -0
  571. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +247 -0
  572. vllm/model_executor/layers/fused_moe/layer.py +1951 -0
  573. vllm/model_executor/layers/fused_moe/modular_kernel.py +892 -0
  574. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  575. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  576. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  577. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  578. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +321 -0
  579. vllm/model_executor/layers/fused_moe/prepare_finalize.py +72 -0
  580. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +431 -0
  581. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  582. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  583. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +171 -0
  584. vllm/model_executor/layers/fused_moe/trtllm_moe.py +197 -0
  585. vllm/model_executor/layers/fused_moe/utils.py +270 -0
  586. vllm/model_executor/layers/layernorm.py +381 -0
  587. vllm/model_executor/layers/lightning_attn.py +661 -0
  588. vllm/model_executor/layers/linear.py +1567 -0
  589. vllm/model_executor/layers/logits_processor.py +199 -0
  590. vllm/model_executor/layers/mamba/__init__.py +0 -0
  591. vllm/model_executor/layers/mamba/abstract.py +45 -0
  592. vllm/model_executor/layers/mamba/linear_attn.py +432 -0
  593. vllm/model_executor/layers/mamba/mamba2_metadata.py +186 -0
  594. vllm/model_executor/layers/mamba/mamba_mixer.py +517 -0
  595. vllm/model_executor/layers/mamba/mamba_mixer2.py +803 -0
  596. vllm/model_executor/layers/mamba/mamba_utils.py +202 -0
  597. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  598. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +982 -0
  599. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  600. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  601. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  602. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +574 -0
  603. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  604. vllm/model_executor/layers/mamba/ops/ssd_combined.py +248 -0
  605. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +248 -0
  606. vllm/model_executor/layers/mamba/short_conv.py +270 -0
  607. vllm/model_executor/layers/mla.py +158 -0
  608. vllm/model_executor/layers/pooler.py +732 -0
  609. vllm/model_executor/layers/quantization/__init__.py +157 -0
  610. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  611. vllm/model_executor/layers/quantization/awq.py +228 -0
  612. vllm/model_executor/layers/quantization/awq_marlin.py +548 -0
  613. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  614. vllm/model_executor/layers/quantization/base_config.py +164 -0
  615. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  616. vllm/model_executor/layers/quantization/bitsandbytes.py +621 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +795 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1651 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  625. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +161 -0
  626. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  627. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  628. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  629. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +156 -0
  630. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  631. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  632. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +227 -0
  633. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +135 -0
  634. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +21 -0
  635. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  636. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  637. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  638. vllm/model_executor/layers/quantization/deepgemm.py +81 -0
  639. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  640. vllm/model_executor/layers/quantization/experts_int8.py +215 -0
  641. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  642. vllm/model_executor/layers/quantization/fp8.py +1179 -0
  643. vllm/model_executor/layers/quantization/gguf.py +597 -0
  644. vllm/model_executor/layers/quantization/gptq.py +300 -0
  645. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  646. vllm/model_executor/layers/quantization/gptq_marlin.py +700 -0
  647. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  648. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  649. vllm/model_executor/layers/quantization/inc.py +61 -0
  650. vllm/model_executor/layers/quantization/input_quant_fp8.py +103 -0
  651. vllm/model_executor/layers/quantization/ipex_quant.py +410 -0
  652. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  653. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  654. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  655. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  656. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  657. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  658. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  659. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  660. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  661. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  662. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  663. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  664. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  665. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +163 -0
  666. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  667. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  668. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  669. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  670. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  671. vllm/model_executor/layers/quantization/modelopt.py +1548 -0
  672. vllm/model_executor/layers/quantization/moe_wna16.py +473 -0
  673. vllm/model_executor/layers/quantization/mxfp4.py +951 -0
  674. vllm/model_executor/layers/quantization/petit.py +306 -0
  675. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  676. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  677. vllm/model_executor/layers/quantization/quark/quark.py +431 -0
  678. vllm/model_executor/layers/quantization/quark/quark_moe.py +434 -0
  679. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  680. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  681. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +112 -0
  682. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  683. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  684. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  685. vllm/model_executor/layers/quantization/rtn.py +456 -0
  686. vllm/model_executor/layers/quantization/schema.py +86 -0
  687. vllm/model_executor/layers/quantization/torchao.py +214 -0
  688. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  689. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  690. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  691. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  902. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  903. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +85 -0
  904. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +258 -0
  905. vllm/model_executor/layers/quantization/utils/fp8_utils.py +795 -0
  906. vllm/model_executor/layers/quantization/utils/gptq_utils.py +96 -0
  907. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  908. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  909. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  910. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  911. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  912. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  913. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  914. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  915. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +132 -0
  916. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  917. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  918. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  919. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  920. vllm/model_executor/layers/quantization/utils/quant_utils.py +627 -0
  921. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  922. vllm/model_executor/layers/resampler.py +270 -0
  923. vllm/model_executor/layers/rotary_embedding/__init__.py +190 -0
  924. vllm/model_executor/layers/rotary_embedding/base.py +156 -0
  925. vllm/model_executor/layers/rotary_embedding/common.py +105 -0
  926. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +140 -0
  927. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  928. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  929. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  930. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  931. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  932. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  933. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  934. vllm/model_executor/layers/rotary_embedding/mrope.py +1140 -0
  935. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  936. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  937. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  938. vllm/model_executor/layers/sampler.py +1198 -0
  939. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  940. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  941. vllm/model_executor/layers/utils.py +196 -0
  942. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  943. vllm/model_executor/model_loader/__init__.py +138 -0
  944. vllm/model_executor/model_loader/base_loader.py +52 -0
  945. vllm/model_executor/model_loader/bitsandbytes_loader.py +787 -0
  946. vllm/model_executor/model_loader/default_loader.py +278 -0
  947. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  948. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  949. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  950. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  951. vllm/model_executor/model_loader/tensorizer.py +743 -0
  952. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  953. vllm/model_executor/model_loader/tpu.py +114 -0
  954. vllm/model_executor/model_loader/utils.py +271 -0
  955. vllm/model_executor/model_loader/weight_utils.py +946 -0
  956. vllm/model_executor/models/__init__.py +30 -0
  957. vllm/model_executor/models/adapters.py +542 -0
  958. vllm/model_executor/models/aimv2.py +246 -0
  959. vllm/model_executor/models/apertus.py +582 -0
  960. vllm/model_executor/models/arcee.py +423 -0
  961. vllm/model_executor/models/arctic.py +560 -0
  962. vllm/model_executor/models/aria.py +662 -0
  963. vllm/model_executor/models/aya_vision.py +470 -0
  964. vllm/model_executor/models/baichuan.py +475 -0
  965. vllm/model_executor/models/bailing_moe.py +529 -0
  966. vllm/model_executor/models/bamba.py +582 -0
  967. vllm/model_executor/models/bart.py +1343 -0
  968. vllm/model_executor/models/bert.py +613 -0
  969. vllm/model_executor/models/bert_with_rope.py +687 -0
  970. vllm/model_executor/models/blip.py +339 -0
  971. vllm/model_executor/models/blip2.py +716 -0
  972. vllm/model_executor/models/bloom.py +374 -0
  973. vllm/model_executor/models/chameleon.py +1141 -0
  974. vllm/model_executor/models/chatglm.py +479 -0
  975. vllm/model_executor/models/clip.py +407 -0
  976. vllm/model_executor/models/cohere2_vision.py +484 -0
  977. vllm/model_executor/models/commandr.py +467 -0
  978. vllm/model_executor/models/config.py +434 -0
  979. vllm/model_executor/models/constant_size_cache.py +137 -0
  980. vllm/model_executor/models/dbrx.py +473 -0
  981. vllm/model_executor/models/deepseek.py +491 -0
  982. vllm/model_executor/models/deepseek_eagle.py +241 -0
  983. vllm/model_executor/models/deepseek_mtp.py +282 -0
  984. vllm/model_executor/models/deepseek_v2.py +1058 -0
  985. vllm/model_executor/models/deepseek_vl2.py +661 -0
  986. vllm/model_executor/models/donut.py +387 -0
  987. vllm/model_executor/models/dots1.py +547 -0
  988. vllm/model_executor/models/ernie45.py +43 -0
  989. vllm/model_executor/models/ernie45_moe.py +608 -0
  990. vllm/model_executor/models/ernie45_vl.py +1510 -0
  991. vllm/model_executor/models/ernie45_vl_moe.py +728 -0
  992. vllm/model_executor/models/ernie_mtp.py +287 -0
  993. vllm/model_executor/models/exaone.py +552 -0
  994. vllm/model_executor/models/exaone4.py +535 -0
  995. vllm/model_executor/models/fairseq2_llama.py +154 -0
  996. vllm/model_executor/models/falcon.py +511 -0
  997. vllm/model_executor/models/falcon_h1.py +739 -0
  998. vllm/model_executor/models/florence2.py +1107 -0
  999. vllm/model_executor/models/fuyu.py +401 -0
  1000. vllm/model_executor/models/gemma.py +428 -0
  1001. vllm/model_executor/models/gemma2.py +425 -0
  1002. vllm/model_executor/models/gemma3.py +542 -0
  1003. vllm/model_executor/models/gemma3_mm.py +723 -0
  1004. vllm/model_executor/models/gemma3n.py +830 -0
  1005. vllm/model_executor/models/gemma3n_mm.py +767 -0
  1006. vllm/model_executor/models/glm.py +23 -0
  1007. vllm/model_executor/models/glm4.py +305 -0
  1008. vllm/model_executor/models/glm4_1v.py +1669 -0
  1009. vllm/model_executor/models/glm4_moe.py +703 -0
  1010. vllm/model_executor/models/glm4_moe_mtp.py +306 -0
  1011. vllm/model_executor/models/glm4v.py +654 -0
  1012. vllm/model_executor/models/gpt2.py +383 -0
  1013. vllm/model_executor/models/gpt_bigcode.py +346 -0
  1014. vllm/model_executor/models/gpt_j.py +340 -0
  1015. vllm/model_executor/models/gpt_neox.py +333 -0
  1016. vllm/model_executor/models/gpt_oss.py +687 -0
  1017. vllm/model_executor/models/granite.py +498 -0
  1018. vllm/model_executor/models/granite_speech.py +799 -0
  1019. vllm/model_executor/models/granitemoe.py +541 -0
  1020. vllm/model_executor/models/granitemoehybrid.py +684 -0
  1021. vllm/model_executor/models/granitemoeshared.py +342 -0
  1022. vllm/model_executor/models/gritlm.py +262 -0
  1023. vllm/model_executor/models/grok1.py +550 -0
  1024. vllm/model_executor/models/h2ovl.py +536 -0
  1025. vllm/model_executor/models/hunyuan_v1.py +937 -0
  1026. vllm/model_executor/models/hyperclovax_vision.py +1206 -0
  1027. vllm/model_executor/models/idefics2_vision_model.py +416 -0
  1028. vllm/model_executor/models/idefics3.py +758 -0
  1029. vllm/model_executor/models/interfaces.py +854 -0
  1030. vllm/model_executor/models/interfaces_base.py +195 -0
  1031. vllm/model_executor/models/intern_vit.py +481 -0
  1032. vllm/model_executor/models/internlm2.py +453 -0
  1033. vllm/model_executor/models/internlm2_ve.py +148 -0
  1034. vllm/model_executor/models/interns1.py +832 -0
  1035. vllm/model_executor/models/interns1_vit.py +418 -0
  1036. vllm/model_executor/models/internvl.py +1423 -0
  1037. vllm/model_executor/models/jais.py +374 -0
  1038. vllm/model_executor/models/jamba.py +630 -0
  1039. vllm/model_executor/models/jina_vl.py +144 -0
  1040. vllm/model_executor/models/keye.py +1684 -0
  1041. vllm/model_executor/models/keye_vl1_5.py +601 -0
  1042. vllm/model_executor/models/kimi_vl.py +620 -0
  1043. vllm/model_executor/models/lfm2.py +558 -0
  1044. vllm/model_executor/models/llama.py +671 -0
  1045. vllm/model_executor/models/llama4.py +732 -0
  1046. vllm/model_executor/models/llama4_eagle.py +241 -0
  1047. vllm/model_executor/models/llama_eagle.py +171 -0
  1048. vllm/model_executor/models/llama_eagle3.py +292 -0
  1049. vllm/model_executor/models/llava.py +872 -0
  1050. vllm/model_executor/models/llava_next.py +572 -0
  1051. vllm/model_executor/models/llava_next_video.py +479 -0
  1052. vllm/model_executor/models/llava_onevision.py +945 -0
  1053. vllm/model_executor/models/mamba.py +310 -0
  1054. vllm/model_executor/models/mamba2.py +346 -0
  1055. vllm/model_executor/models/mamba_cache.py +83 -0
  1056. vllm/model_executor/models/medusa.py +219 -0
  1057. vllm/model_executor/models/midashenglm.py +788 -0
  1058. vllm/model_executor/models/mimo.py +191 -0
  1059. vllm/model_executor/models/mimo_mtp.py +273 -0
  1060. vllm/model_executor/models/minicpm.py +593 -0
  1061. vllm/model_executor/models/minicpm3.py +230 -0
  1062. vllm/model_executor/models/minicpm_eagle.py +391 -0
  1063. vllm/model_executor/models/minicpmo.py +804 -0
  1064. vllm/model_executor/models/minicpmv.py +1786 -0
  1065. vllm/model_executor/models/minimax_cache.py +36 -0
  1066. vllm/model_executor/models/minimax_text_01.py +1027 -0
  1067. vllm/model_executor/models/minimax_vl_01.py +431 -0
  1068. vllm/model_executor/models/mistral3.py +628 -0
  1069. vllm/model_executor/models/mixtral.py +494 -0
  1070. vllm/model_executor/models/mllama.py +1697 -0
  1071. vllm/model_executor/models/mllama4.py +1079 -0
  1072. vllm/model_executor/models/mlp_speculator.py +206 -0
  1073. vllm/model_executor/models/modernbert.py +374 -0
  1074. vllm/model_executor/models/module_mapping.py +72 -0
  1075. vllm/model_executor/models/molmo.py +1569 -0
  1076. vllm/model_executor/models/moonvit.py +663 -0
  1077. vllm/model_executor/models/motif.py +345 -0
  1078. vllm/model_executor/models/mpt.py +332 -0
  1079. vllm/model_executor/models/nano_nemotron_vl.py +1395 -0
  1080. vllm/model_executor/models/nemotron.py +509 -0
  1081. vllm/model_executor/models/nemotron_h.py +633 -0
  1082. vllm/model_executor/models/nemotron_nas.py +484 -0
  1083. vllm/model_executor/models/nemotron_vl.py +655 -0
  1084. vllm/model_executor/models/nvlm_d.py +203 -0
  1085. vllm/model_executor/models/olmo.py +406 -0
  1086. vllm/model_executor/models/olmo2.py +428 -0
  1087. vllm/model_executor/models/olmoe.py +485 -0
  1088. vllm/model_executor/models/opt.py +413 -0
  1089. vllm/model_executor/models/orion.py +350 -0
  1090. vllm/model_executor/models/ovis.py +572 -0
  1091. vllm/model_executor/models/ovis2_5.py +644 -0
  1092. vllm/model_executor/models/paligemma.py +414 -0
  1093. vllm/model_executor/models/persimmon.py +345 -0
  1094. vllm/model_executor/models/phi.py +357 -0
  1095. vllm/model_executor/models/phi3.py +19 -0
  1096. vllm/model_executor/models/phi3v.py +701 -0
  1097. vllm/model_executor/models/phi4_multimodal.py +1478 -0
  1098. vllm/model_executor/models/phi4flash.py +737 -0
  1099. vllm/model_executor/models/phi4mm.py +1281 -0
  1100. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1101. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1102. vllm/model_executor/models/phimoe.py +681 -0
  1103. vllm/model_executor/models/pixtral.py +1348 -0
  1104. vllm/model_executor/models/plamo2.py +1126 -0
  1105. vllm/model_executor/models/qwen.py +363 -0
  1106. vllm/model_executor/models/qwen2.py +526 -0
  1107. vllm/model_executor/models/qwen2_5_omni_thinker.py +985 -0
  1108. vllm/model_executor/models/qwen2_5_vl.py +1256 -0
  1109. vllm/model_executor/models/qwen2_audio.py +492 -0
  1110. vllm/model_executor/models/qwen2_moe.py +558 -0
  1111. vllm/model_executor/models/qwen2_rm.py +122 -0
  1112. vllm/model_executor/models/qwen2_vl.py +1512 -0
  1113. vllm/model_executor/models/qwen3.py +344 -0
  1114. vllm/model_executor/models/qwen3_moe.py +704 -0
  1115. vllm/model_executor/models/qwen3_next.py +1298 -0
  1116. vllm/model_executor/models/qwen3_next_mtp.py +285 -0
  1117. vllm/model_executor/models/qwen_vl.py +795 -0
  1118. vllm/model_executor/models/registry.py +891 -0
  1119. vllm/model_executor/models/roberta.py +252 -0
  1120. vllm/model_executor/models/rvl.py +103 -0
  1121. vllm/model_executor/models/seed_oss.py +488 -0
  1122. vllm/model_executor/models/siglip.py +524 -0
  1123. vllm/model_executor/models/siglip2navit.py +688 -0
  1124. vllm/model_executor/models/skyworkr1v.py +914 -0
  1125. vllm/model_executor/models/smolvlm.py +44 -0
  1126. vllm/model_executor/models/solar.py +506 -0
  1127. vllm/model_executor/models/stablelm.py +344 -0
  1128. vllm/model_executor/models/starcoder2.py +357 -0
  1129. vllm/model_executor/models/step3_text.py +521 -0
  1130. vllm/model_executor/models/step3_vl.py +1091 -0
  1131. vllm/model_executor/models/swin.py +475 -0
  1132. vllm/model_executor/models/tarsier.py +649 -0
  1133. vllm/model_executor/models/telechat2.py +151 -0
  1134. vllm/model_executor/models/teleflm.py +79 -0
  1135. vllm/model_executor/models/terratorch.py +294 -0
  1136. vllm/model_executor/models/transformers.py +883 -0
  1137. vllm/model_executor/models/ultravox.py +667 -0
  1138. vllm/model_executor/models/utils.py +770 -0
  1139. vllm/model_executor/models/vision.py +125 -0
  1140. vllm/model_executor/models/voxtral.py +789 -0
  1141. vllm/model_executor/models/whisper.py +966 -0
  1142. vllm/model_executor/models/zamba2.py +1056 -0
  1143. vllm/model_executor/parameter.py +599 -0
  1144. vllm/model_executor/sampling_metadata.py +597 -0
  1145. vllm/model_executor/utils.py +97 -0
  1146. vllm/model_executor/warmup/__init__.py +0 -0
  1147. vllm/model_executor/warmup/deep_gemm_warmup.py +223 -0
  1148. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1149. vllm/multimodal/__init__.py +35 -0
  1150. vllm/multimodal/audio.py +116 -0
  1151. vllm/multimodal/base.py +219 -0
  1152. vllm/multimodal/cache.py +507 -0
  1153. vllm/multimodal/hasher.py +110 -0
  1154. vllm/multimodal/image.py +130 -0
  1155. vllm/multimodal/inputs.py +979 -0
  1156. vllm/multimodal/parse.py +496 -0
  1157. vllm/multimodal/processing.py +1921 -0
  1158. vllm/multimodal/profiling.py +313 -0
  1159. vllm/multimodal/registry.py +375 -0
  1160. vllm/multimodal/utils.py +754 -0
  1161. vllm/multimodal/video.py +312 -0
  1162. vllm/outputs.py +517 -0
  1163. vllm/platforms/__init__.py +263 -0
  1164. vllm/platforms/cpu.py +353 -0
  1165. vllm/platforms/cuda.py +731 -0
  1166. vllm/platforms/interface.py +599 -0
  1167. vllm/platforms/rocm.py +504 -0
  1168. vllm/platforms/tpu.py +236 -0
  1169. vllm/platforms/xpu.py +243 -0
  1170. vllm/plugins/__init__.py +72 -0
  1171. vllm/plugins/io_processors/__init__.py +68 -0
  1172. vllm/plugins/io_processors/interface.py +67 -0
  1173. vllm/plugins/lora_resolvers/README.md +16 -0
  1174. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1175. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1176. vllm/pooling_params.py +183 -0
  1177. vllm/profiler/__init__.py +0 -0
  1178. vllm/profiler/layerwise_profile.py +375 -0
  1179. vllm/profiler/utils.py +148 -0
  1180. vllm/py.typed +2 -0
  1181. vllm/ray/__init__.py +0 -0
  1182. vllm/ray/lazy_utils.py +22 -0
  1183. vllm/ray/ray_env.py +72 -0
  1184. vllm/reasoning/__init__.py +25 -0
  1185. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1186. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1187. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1188. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1189. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1190. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1191. vllm/reasoning/mistral_reasoning_parser.py +47 -0
  1192. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1193. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1194. vllm/sampling_params.py +577 -0
  1195. vllm/scalar_type.py +349 -0
  1196. vllm/scripts.py +15 -0
  1197. vllm/sequence.py +1465 -0
  1198. vllm/tasks.py +11 -0
  1199. vllm/test_utils.py +130 -0
  1200. vllm/third_party/__init__.py +0 -0
  1201. vllm/third_party/pynvml.py +6140 -0
  1202. vllm/tracing.py +136 -0
  1203. vllm/transformers_utils/__init__.py +24 -0
  1204. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1205. vllm/transformers_utils/chat_templates/registry.py +71 -0
  1206. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1207. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1208. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1209. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1210. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1211. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1212. vllm/transformers_utils/config.py +1043 -0
  1213. vllm/transformers_utils/config_parser_base.py +20 -0
  1214. vllm/transformers_utils/configs/__init__.py +55 -0
  1215. vllm/transformers_utils/configs/arctic.py +207 -0
  1216. vllm/transformers_utils/configs/chatglm.py +72 -0
  1217. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1218. vllm/transformers_utils/configs/eagle.py +84 -0
  1219. vllm/transformers_utils/configs/falcon.py +90 -0
  1220. vllm/transformers_utils/configs/jais.py +238 -0
  1221. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1222. vllm/transformers_utils/configs/medusa.py +63 -0
  1223. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1224. vllm/transformers_utils/configs/mistral.py +165 -0
  1225. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1226. vllm/transformers_utils/configs/moonvit.py +33 -0
  1227. vllm/transformers_utils/configs/nemotron.py +205 -0
  1228. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1229. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1230. vllm/transformers_utils/configs/ovis.py +176 -0
  1231. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1232. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1233. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1234. vllm/transformers_utils/configs/speculators/base.py +91 -0
  1235. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1236. vllm/transformers_utils/configs/ultravox.py +120 -0
  1237. vllm/transformers_utils/detokenizer.py +169 -0
  1238. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1239. vllm/transformers_utils/dynamic_module.py +60 -0
  1240. vllm/transformers_utils/processor.py +245 -0
  1241. vllm/transformers_utils/processors/__init__.py +16 -0
  1242. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1243. vllm/transformers_utils/processors/ovis.py +420 -0
  1244. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1245. vllm/transformers_utils/runai_utils.py +99 -0
  1246. vllm/transformers_utils/s3_utils.py +90 -0
  1247. vllm/transformers_utils/tokenizer.py +293 -0
  1248. vllm/transformers_utils/tokenizer_base.py +149 -0
  1249. vllm/transformers_utils/tokenizer_group.py +132 -0
  1250. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1251. vllm/transformers_utils/tokenizers/mistral.py +520 -0
  1252. vllm/transformers_utils/utils.py +99 -0
  1253. vllm/triton_utils/__init__.py +16 -0
  1254. vllm/triton_utils/importing.py +95 -0
  1255. vllm/usage/__init__.py +0 -0
  1256. vllm/usage/usage_lib.py +259 -0
  1257. vllm/utils/__init__.py +3438 -0
  1258. vllm/utils/deep_gemm.py +212 -0
  1259. vllm/utils/flashinfer.py +372 -0
  1260. vllm/utils/jsontree.py +90 -0
  1261. vllm/utils/tensor_schema.py +236 -0
  1262. vllm/v1/__init__.py +0 -0
  1263. vllm/v1/attention/__init__.py +0 -0
  1264. vllm/v1/attention/backends/__init__.py +0 -0
  1265. vllm/v1/attention/backends/cpu_attn.py +922 -0
  1266. vllm/v1/attention/backends/flash_attn.py +800 -0
  1267. vllm/v1/attention/backends/flashinfer.py +1128 -0
  1268. vllm/v1/attention/backends/flex_attention.py +796 -0
  1269. vllm/v1/attention/backends/gdn_attn.py +320 -0
  1270. vllm/v1/attention/backends/linear_attn.py +68 -0
  1271. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1272. vllm/v1/attention/backends/mamba2_attn.py +224 -0
  1273. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1274. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1275. vllm/v1/attention/backends/mla/common.py +1608 -0
  1276. vllm/v1/attention/backends/mla/cutlass_mla.py +301 -0
  1277. vllm/v1/attention/backends/mla/flashattn_mla.py +273 -0
  1278. vllm/v1/attention/backends/mla/flashinfer_mla.py +110 -0
  1279. vllm/v1/attention/backends/mla/flashmla.py +213 -0
  1280. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1281. vllm/v1/attention/backends/mla/triton_mla.py +175 -0
  1282. vllm/v1/attention/backends/pallas.py +413 -0
  1283. vllm/v1/attention/backends/rocm_aiter_fa.py +548 -0
  1284. vllm/v1/attention/backends/short_conv_attn.py +82 -0
  1285. vllm/v1/attention/backends/tree_attn.py +450 -0
  1286. vllm/v1/attention/backends/triton_attn.py +430 -0
  1287. vllm/v1/attention/backends/utils.py +834 -0
  1288. vllm/v1/attention/backends/xformers.py +437 -0
  1289. vllm/v1/core/__init__.py +0 -0
  1290. vllm/v1/core/block_pool.py +330 -0
  1291. vllm/v1/core/encoder_cache_manager.py +333 -0
  1292. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1293. vllm/v1/core/kv_cache_manager.py +398 -0
  1294. vllm/v1/core/kv_cache_utils.py +1169 -0
  1295. vllm/v1/core/sched/__init__.py +0 -0
  1296. vllm/v1/core/sched/async_scheduler.py +47 -0
  1297. vllm/v1/core/sched/interface.py +158 -0
  1298. vllm/v1/core/sched/output.py +162 -0
  1299. vllm/v1/core/sched/request_queue.py +224 -0
  1300. vllm/v1/core/sched/scheduler.py +1287 -0
  1301. vllm/v1/core/sched/utils.py +69 -0
  1302. vllm/v1/core/single_type_kv_cache_manager.py +670 -0
  1303. vllm/v1/cudagraph_dispatcher.py +121 -0
  1304. vllm/v1/engine/__init__.py +202 -0
  1305. vllm/v1/engine/async_llm.py +757 -0
  1306. vllm/v1/engine/coordinator.py +357 -0
  1307. vllm/v1/engine/core.py +1245 -0
  1308. vllm/v1/engine/core_client.py +1333 -0
  1309. vllm/v1/engine/detokenizer.py +300 -0
  1310. vllm/v1/engine/exceptions.py +17 -0
  1311. vllm/v1/engine/llm_engine.py +332 -0
  1312. vllm/v1/engine/logprobs.py +201 -0
  1313. vllm/v1/engine/output_processor.py +558 -0
  1314. vllm/v1/engine/parallel_sampling.py +133 -0
  1315. vllm/v1/engine/processor.py +524 -0
  1316. vllm/v1/engine/utils.py +857 -0
  1317. vllm/v1/executor/__init__.py +0 -0
  1318. vllm/v1/executor/abstract.py +126 -0
  1319. vllm/v1/executor/multiproc_executor.py +683 -0
  1320. vllm/v1/executor/ray_distributed_executor.py +109 -0
  1321. vllm/v1/kv_cache_interface.py +275 -0
  1322. vllm/v1/metrics/__init__.py +0 -0
  1323. vllm/v1/metrics/loggers.py +717 -0
  1324. vllm/v1/metrics/prometheus.py +82 -0
  1325. vllm/v1/metrics/ray_wrappers.py +133 -0
  1326. vllm/v1/metrics/reader.py +246 -0
  1327. vllm/v1/metrics/stats.py +248 -0
  1328. vllm/v1/outputs.py +147 -0
  1329. vllm/v1/pool/__init__.py +0 -0
  1330. vllm/v1/pool/metadata.py +77 -0
  1331. vllm/v1/request.py +237 -0
  1332. vllm/v1/sample/__init__.py +0 -0
  1333. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1334. vllm/v1/sample/logits_processor/builtin.py +273 -0
  1335. vllm/v1/sample/logits_processor/interface.py +97 -0
  1336. vllm/v1/sample/logits_processor/state.py +161 -0
  1337. vllm/v1/sample/metadata.py +43 -0
  1338. vllm/v1/sample/ops/__init__.py +0 -0
  1339. vllm/v1/sample/ops/bad_words.py +39 -0
  1340. vllm/v1/sample/ops/logprobs.py +26 -0
  1341. vllm/v1/sample/ops/penalties.py +43 -0
  1342. vllm/v1/sample/ops/topk_topp_sampler.py +254 -0
  1343. vllm/v1/sample/rejection_sampler.py +623 -0
  1344. vllm/v1/sample/sampler.py +281 -0
  1345. vllm/v1/sample/tpu/__init__.py +0 -0
  1346. vllm/v1/sample/tpu/metadata.py +124 -0
  1347. vllm/v1/sample/tpu/sampler.py +213 -0
  1348. vllm/v1/serial_utils.py +395 -0
  1349. vllm/v1/spec_decode/__init__.py +0 -0
  1350. vllm/v1/spec_decode/eagle.py +740 -0
  1351. vllm/v1/spec_decode/medusa.py +66 -0
  1352. vllm/v1/spec_decode/metadata.py +62 -0
  1353. vllm/v1/spec_decode/metrics.py +191 -0
  1354. vllm/v1/spec_decode/ngram_proposer.py +157 -0
  1355. vllm/v1/spec_decode/utils.py +14 -0
  1356. vllm/v1/structured_output/__init__.py +297 -0
  1357. vllm/v1/structured_output/backend_guidance.py +245 -0
  1358. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1359. vllm/v1/structured_output/backend_outlines.py +320 -0
  1360. vllm/v1/structured_output/backend_types.py +134 -0
  1361. vllm/v1/structured_output/backend_xgrammar.py +323 -0
  1362. vllm/v1/structured_output/request.py +86 -0
  1363. vllm/v1/structured_output/utils.py +373 -0
  1364. vllm/v1/utils.py +382 -0
  1365. vllm/v1/worker/__init__.py +0 -0
  1366. vllm/v1/worker/block_table.py +221 -0
  1367. vllm/v1/worker/cpu_model_runner.py +163 -0
  1368. vllm/v1/worker/cpu_worker.py +183 -0
  1369. vllm/v1/worker/gpu_input_batch.py +821 -0
  1370. vllm/v1/worker/gpu_model_runner.py +3743 -0
  1371. vllm/v1/worker/gpu_worker.py +697 -0
  1372. vllm/v1/worker/kv_connector_model_runner_mixin.py +122 -0
  1373. vllm/v1/worker/lora_model_runner_mixin.py +192 -0
  1374. vllm/v1/worker/tpu_input_batch.py +585 -0
  1375. vllm/v1/worker/tpu_model_runner.py +1947 -0
  1376. vllm/v1/worker/tpu_worker.py +340 -0
  1377. vllm/v1/worker/utils.py +290 -0
  1378. vllm/v1/worker/worker_base.py +65 -0
  1379. vllm/v1/worker/xpu_model_runner.py +53 -0
  1380. vllm/v1/worker/xpu_worker.py +179 -0
  1381. vllm/version.py +41 -0
  1382. vllm/vllm_flash_attn/.gitkeep +0 -0
  1383. vllm/worker/__init__.py +0 -0
  1384. vllm/worker/cache_engine.py +145 -0
  1385. vllm/worker/enc_dec_model_runner.py +553 -0
  1386. vllm/worker/model_runner.py +2016 -0
  1387. vllm/worker/model_runner_base.py +307 -0
  1388. vllm/worker/utils.py +49 -0
  1389. vllm/worker/worker.py +670 -0
  1390. vllm/worker/worker_base.py +651 -0
  1391. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/METADATA +326 -0
  1392. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/RECORD +1395 -0
  1393. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/WHEEL +5 -0
  1394. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/entry_points.txt +5 -0
  1395. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2016 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import dataclasses
5
+ import gc
6
+ import inspect
7
+ import itertools
8
+ import time
9
+ import weakref
10
+ from contextlib import contextmanager
11
+ from dataclasses import dataclass
12
+ from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set,
13
+ Tuple, Type, TypeVar, Union)
14
+
15
+ import numpy as np
16
+ import torch
17
+ import torch.distributed
18
+ import torch.nn as nn
19
+ from tqdm.auto import tqdm
20
+
21
+ import vllm.envs as envs
22
+ from vllm.attention import AttentionMetadata, get_attn_backend
23
+ from vllm.attention.backends.abstract import AttentionState
24
+ from vllm.attention.backends.utils import CommonAttentionState
25
+ from vllm.compilation.counter import compilation_counter
26
+ from vllm.config import CompilationLevel, VllmConfig
27
+ from vllm.core.scheduler import SchedulerOutputs
28
+ from vllm.distributed import broadcast_tensor_dict, get_pp_group
29
+ from vllm.distributed.kv_transfer import get_kv_transfer_group
30
+ from vllm.distributed.parallel_state import (get_tensor_model_parallel_rank,
31
+ graph_capture)
32
+ from vllm.forward_context import get_forward_context, set_forward_context
33
+ from vllm.inputs import INPUT_REGISTRY, InputRegistry
34
+ from vllm.logger import init_logger
35
+ from vllm.lora.layers import LoRAMapping
36
+ from vllm.lora.request import LoRARequest
37
+ from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
38
+ from vllm.model_executor import SamplingMetadata, SamplingMetadataCache
39
+ from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding
40
+ from vllm.model_executor.layers.sampler import (Sampler, SamplerOutput,
41
+ get_sampler)
42
+ from vllm.model_executor.model_loader import get_model
43
+ from vllm.model_executor.model_loader.tensorizer import TensorizerConfig
44
+ from vllm.model_executor.models import supports_lora, supports_multimodal
45
+ from vllm.model_executor.models.utils import set_cpu_offload_max_bytes
46
+ from vllm.multimodal import (MULTIMODAL_REGISTRY, BatchedTensorInputs,
47
+ MultiModalKwargs, MultiModalPlaceholderMap,
48
+ MultiModalRegistry)
49
+ from vllm.sampling_params import SamplingParams
50
+ from vllm.sequence import IntermediateTensors, SequenceGroupMetadata
51
+ from vllm.utils import (DeviceMemoryProfiler, GiB_bytes, PyObjectCache,
52
+ async_tensor_h2d, flatten_2d_lists,
53
+ is_pin_memory_available, supports_dynamo,
54
+ weak_ref_tensor)
55
+ from vllm.worker.model_runner_base import (
56
+ InputProcessingError, ModelRunnerBase, ModelRunnerInputBase,
57
+ ModelRunnerInputBuilderBase, _add_attn_metadata_broadcastable_dict,
58
+ _add_sampling_metadata_broadcastable_dict,
59
+ _init_attn_metadata_from_tensor_dict,
60
+ _init_sampling_metadata_from_tensor_dict)
61
+
62
+ if TYPE_CHECKING:
63
+ from vllm.attention.backends.abstract import AttentionBackend
64
+
65
+ logger = init_logger(__name__)
66
+
67
+ LORA_WARMUP_RANK = 8
68
+
69
+ _NUM_WARMUP_ITERS = 2
70
+
71
+ TModelInputForGPU = TypeVar('TModelInputForGPU', bound="ModelInputForGPU")
72
+
73
+ # For now, bump up cache limits for recompilations during CUDA graph warmups.
74
+ torch._dynamo.config.cache_size_limit = 128
75
+ torch._dynamo.config.accumulated_cache_size_limit = 128
76
+
77
+
78
+ @dataclass(frozen=True)
79
+ class ModelInputForGPU(ModelRunnerInputBase):
80
+ """
81
+ This base class contains metadata needed for the base model forward pass
82
+ but not metadata for possible additional steps, e.g., sampling. Model
83
+ runners that run additional steps should subclass this method to add
84
+ additional fields.
85
+ """
86
+ input_tokens: Optional[torch.Tensor] = None
87
+ inputs_embeds: Optional[torch.Tensor] = None
88
+ input_positions: Optional[torch.Tensor] = None
89
+ seq_lens: Optional[List[int]] = None
90
+ query_lens: Optional[List[int]] = None
91
+ lora_mapping: Optional["LoRAMapping"] = None
92
+ lora_requests: Optional[Set[LoRARequest]] = None
93
+ attn_metadata: Optional["AttentionMetadata"] = None
94
+ multi_modal_kwargs: Optional[BatchedTensorInputs] = None
95
+ request_ids_to_seq_ids: Optional[Dict[str, List[int]]] = None
96
+ finished_requests_ids: Optional[List[str]] = None
97
+ virtual_engine: int = 0
98
+ async_callback: Optional[Callable] = None
99
+ scheduler_outputs: Optional[SchedulerOutputs] = None
100
+ previous_hidden_states: Optional[torch.Tensor] = None
101
+
102
+ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
103
+ tensor_dict = {
104
+ "input_tokens": self.input_tokens,
105
+ "inputs_embeds": self.inputs_embeds,
106
+ "input_positions": self.input_positions,
107
+ "lora_requests": self.lora_requests,
108
+ "lora_mapping": self.lora_mapping,
109
+ "multi_modal_kwargs": self.multi_modal_kwargs,
110
+ "virtual_engine": self.virtual_engine,
111
+ "request_ids_to_seq_ids": self.request_ids_to_seq_ids,
112
+ "finished_requests_ids": self.finished_requests_ids,
113
+ }
114
+ _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata)
115
+ return tensor_dict
116
+
117
+ @classmethod
118
+ def from_broadcasted_tensor_dict(
119
+ cls: Type[TModelInputForGPU],
120
+ tensor_dict: Dict[str, Any],
121
+ attn_backend: Optional["AttentionBackend"] = None,
122
+ ) -> TModelInputForGPU:
123
+ if attn_backend is not None:
124
+ tensor_dict = _init_attn_metadata_from_tensor_dict(
125
+ attn_backend, tensor_dict)
126
+ return cls(**tensor_dict)
127
+
128
+ # Exclude `async_callback` to be able to pickle this object
129
+ def __getstate__(self):
130
+ state = self.__dict__.copy()
131
+ del state["async_callback"]
132
+ return state
133
+
134
+ # TODO: What happens when we depickle this object?
135
+ # How can we update this callback to properly pass it to the engine?
136
+ def __setstate__(self, state):
137
+ self.__dict__.update(state)
138
+ self.__dict__.update({'async_callback': None})
139
+
140
+
141
+ @dataclass(frozen=True)
142
+ class ModelInputForGPUWithSamplingMetadata(ModelInputForGPU):
143
+ """
144
+ Used by the ModelRunner.
145
+ """
146
+ sampling_metadata: Optional["SamplingMetadata"] = None
147
+ # Used for speculative decoding. We do not broadcast it because it is only
148
+ # used by the driver worker.
149
+ is_prompt: Optional[bool] = None
150
+
151
+ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
152
+ tensor_dict = {
153
+ "input_tokens": self.input_tokens,
154
+ "inputs_embeds": self.inputs_embeds,
155
+ "input_positions": self.input_positions,
156
+ "lora_requests": self.lora_requests,
157
+ "lora_mapping": self.lora_mapping,
158
+ "multi_modal_kwargs": self.multi_modal_kwargs,
159
+ "virtual_engine": self.virtual_engine,
160
+ "request_ids_to_seq_ids": self.request_ids_to_seq_ids,
161
+ "finished_requests_ids": self.finished_requests_ids,
162
+ }
163
+ _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata)
164
+ _add_sampling_metadata_broadcastable_dict(tensor_dict,
165
+ self.sampling_metadata)
166
+ return tensor_dict
167
+
168
+ @classmethod
169
+ def from_broadcasted_tensor_dict(
170
+ cls,
171
+ tensor_dict: Dict[str, Any],
172
+ attn_backend: Optional["AttentionBackend"] = None,
173
+ ) -> "ModelInputForGPUWithSamplingMetadata":
174
+ tensor_dict = _init_sampling_metadata_from_tensor_dict(tensor_dict)
175
+ if attn_backend is not None:
176
+ tensor_dict = _init_attn_metadata_from_tensor_dict(
177
+ attn_backend, tensor_dict)
178
+ return cls(**tensor_dict)
179
+
180
+
181
+ class ModelInputForGPUBuilder(ModelRunnerInputBuilderBase[ModelInputForGPU]):
182
+ """Build ModelInputForGPU from SequenceGroupMetadata."""
183
+
184
+ # Note: ideally we would be using a dataclass(kw_only=True)
185
+ # here, so that this can be subclassed easily,
186
+ # but kw_only is not supported in python<3.10.
187
+ class InterDataForSeqGroup:
188
+ """Intermediate data for the current sequence group."""
189
+
190
+ def simple_reinit(self):
191
+ self.input_tokens[0].clear() # type: ignore
192
+ self.inputs_embeds = None # type: ignore
193
+ self.input_positions[0].clear() # type: ignore
194
+ self.mrope_input_positions = None # type: ignore
195
+ self.seq_lens[0] = 0 # type: ignore
196
+ self.orig_seq_lens[0] = 0 # type: ignore
197
+ self.prompt_lens[0] = 0 # type: ignore
198
+ self.query_lens[0] = 0 # type: ignore
199
+ self.context_lens[0] = 0 # type: ignore
200
+ self.curr_sliding_window_blocks[0] = 0 # type: ignore
201
+ self.lora_index_mapping.clear() # type: ignore
202
+ self.lora_prompt_mapping.clear() # type: ignore
203
+ self.lora_requests.clear() # type: ignore
204
+
205
+ def __init__(
206
+ self,
207
+ *,
208
+ # From sequence group metadata.
209
+ request_id: str,
210
+ seq_ids: List[int],
211
+ is_prompt: bool,
212
+ block_tables: Optional[Dict[int, List[int]]],
213
+ computed_block_nums: List[int],
214
+ n_seqs: int = 0,
215
+
216
+ # Input tokens and positions.
217
+ input_tokens: Optional[List[List[int]]] = None,
218
+ inputs_embeds: Optional[torch.Tensor] = None,
219
+ input_positions: Optional[List[List[int]]] = None,
220
+ mrope_input_positions: Optional[List[List[List[int]]]] = None,
221
+
222
+ # The sequence length (may be capped to the sliding window).
223
+ seq_lens: Optional[List[int]] = None,
224
+ # The original sequence length (before applying sliding window).
225
+ # This is used to compute slot mapping.
226
+ orig_seq_lens: Optional[List[int]] = None,
227
+ # This is used in the dual-chunk flash attention backend.
228
+ prompt_lens: Optional[List[int]] = None,
229
+ # The query length.
230
+ query_lens: Optional[List[int]] = None,
231
+ # The number of tokens that are already computed.
232
+ context_lens: Optional[List[int]] = None,
233
+ # The current sliding window block.
234
+ curr_sliding_window_blocks: Optional[List[int]] = None,
235
+
236
+ # LoRA inputs.
237
+ lora_index_mapping: Optional[List[List[int]]] = None,
238
+ lora_prompt_mapping: Optional[List[List[int]]] = None,
239
+ lora_requests: Optional[Set[LoRARequest]] = None,
240
+
241
+ # Multi-modal inputs.
242
+ multi_modal_kwargs: Optional[MultiModalKwargs] = None,
243
+ multi_modal_placeholder_maps: Optional[Dict[
244
+ str, MultiModalPlaceholderMap]] = None,
245
+
246
+ # Whether the prefix cache is hit (prefill only).
247
+ prefix_cache_hit: bool = False,
248
+ reinit: bool = False,
249
+ reinit_use_defaults: bool = False,
250
+ encoder_seq_len: int = 0,
251
+ ):
252
+ if reinit:
253
+ assert len(self.seq_ids) == len(seq_ids) # type: ignore
254
+ for i, seq_id in enumerate(seq_ids):
255
+ self.seq_ids[i] = seq_id # type: ignore
256
+ else:
257
+ self.seq_ids = seq_ids
258
+
259
+ self.request_id = request_id
260
+ self.is_prompt = is_prompt
261
+ self.block_tables = block_tables
262
+ self.computed_block_nums = computed_block_nums
263
+ self.n_seqs = n_seqs
264
+ self.encoder_seq_len = encoder_seq_len
265
+
266
+ if reinit:
267
+ if len(self.seq_ids) == 1 and reinit_use_defaults:
268
+ self.simple_reinit()
269
+ else:
270
+ if input_tokens:
271
+ self.input_tokens = input_tokens
272
+ else:
273
+ for seq_id in range(len(self.seq_ids)):
274
+ self.input_tokens[seq_id].clear()
275
+
276
+ self.inputs_embeds = inputs_embeds
277
+
278
+ if input_positions:
279
+ self.input_positions = input_positions
280
+ else:
281
+ for seq_id in range(len(self.seq_ids)):
282
+ self.input_positions[seq_id].clear()
283
+
284
+ self.mrope_input_positions = None
285
+
286
+ if seq_lens:
287
+ self.seq_lens = seq_lens
288
+ else:
289
+ for seq_id in range(len(self.seq_ids)):
290
+ self.seq_lens[seq_id] = 0
291
+
292
+ if orig_seq_lens:
293
+ self.orig_seq_lens = orig_seq_lens
294
+ else:
295
+ for seq_id in range(len(self.seq_ids)):
296
+ self.orig_seq_lens[seq_id] = 0
297
+
298
+ if prompt_lens:
299
+ self.prompt_lens = prompt_lens
300
+ else:
301
+ for seq_id in range(len(self.seq_ids)):
302
+ self.prompt_lens[seq_id] = 0
303
+
304
+ if query_lens:
305
+ self.query_lens = query_lens
306
+ else:
307
+ for seq_id in range(len(self.seq_ids)):
308
+ self.query_lens[seq_id] = 0
309
+
310
+ if context_lens:
311
+ self.context_lens = context_lens
312
+ else:
313
+ for seq_id in range(len(self.seq_ids)):
314
+ self.context_lens[seq_id] = 0
315
+
316
+ if curr_sliding_window_blocks:
317
+ self.curr_sliding_window_blocks = \
318
+ curr_sliding_window_blocks
319
+ else:
320
+ for seq_id in range(len(self.seq_ids)):
321
+ self.curr_sliding_window_blocks[seq_id] = 0
322
+
323
+ if lora_index_mapping:
324
+ self.lora_index_mapping = lora_index_mapping
325
+ else:
326
+ self.lora_index_mapping.clear()
327
+
328
+ if lora_prompt_mapping:
329
+ self.lora_prompt_mapping = lora_prompt_mapping
330
+ else:
331
+ self.lora_prompt_mapping.clear()
332
+
333
+ if lora_requests:
334
+ self.lora_requests = lora_requests
335
+ else:
336
+ self.lora_requests.clear()
337
+
338
+ else:
339
+ self.input_tokens = input_tokens or []
340
+ self.inputs_embeds = inputs_embeds
341
+ self.input_positions = input_positions or []
342
+ self.mrope_input_positions = mrope_input_positions or None
343
+ self.seq_lens = seq_lens or []
344
+ self.orig_seq_lens = orig_seq_lens or []
345
+ self.prompt_lens = prompt_lens or []
346
+ self.query_lens = query_lens or []
347
+ self.context_lens = context_lens or []
348
+ self.curr_sliding_window_blocks = \
349
+ curr_sliding_window_blocks or []
350
+
351
+ self.lora_index_mapping = lora_index_mapping or []
352
+ self.lora_prompt_mapping = lora_prompt_mapping or []
353
+ self.lora_requests = lora_requests or set()
354
+
355
+ self.multi_modal_kwargs = multi_modal_kwargs
356
+ self.multi_modal_placeholder_maps = multi_modal_placeholder_maps
357
+ self.prefix_cache_hit = prefix_cache_hit
358
+
359
+ self.n_seqs = len(self.seq_ids)
360
+
361
+ if not reinit:
362
+ self.__post_init__()
363
+
364
+ def __post_init__(self):
365
+ self.n_seqs = len(self.seq_ids)
366
+
367
+ self.input_tokens = [[] for _ in range(self.n_seqs)]
368
+ self.input_positions = [[] for _ in range(self.n_seqs)]
369
+ self.mrope_input_positions = None
370
+ self.seq_lens = [0] * self.n_seqs
371
+ self.orig_seq_lens = [0] * self.n_seqs
372
+ self.prompt_lens = [0] * self.n_seqs
373
+ self.query_lens = [0] * self.n_seqs
374
+ self.context_lens = [0] * self.n_seqs
375
+ self.curr_sliding_window_blocks = [0] * self.n_seqs
376
+
377
+ self.lora_index_mapping = []
378
+ self.lora_prompt_mapping = []
379
+
380
+ def __repr__(self) -> str:
381
+ return (f"InterDataForSeqGroup("
382
+ f"request_id={self.request_id}, "
383
+ f"seq_ids={self.seq_ids}, "
384
+ f"is_prompt={self.is_prompt}, "
385
+ f"block_tables={self.block_tables}, "
386
+ f"computed_block_nums={self.computed_block_nums}, "
387
+ f"n_seqs={self.n_seqs}, "
388
+ f"input_tokens={self.input_tokens}, "
389
+ f"inputs_embeds.shape="
390
+ f"{getattr(self.inputs_embeds, 'shape', None)}, "
391
+ f"input_positions={self.input_positions}, "
392
+ f"mrope_input_positions={self.mrope_input_positions}, "
393
+ f"seq_lens={self.seq_lens}, "
394
+ f"orig_seq_lens={self.orig_seq_lens}, "
395
+ f"query_lens={self.query_lens}, "
396
+ f"context_lens={self.context_lens}, "
397
+ f"multi_modal_kwargs={self.multi_modal_kwargs}")
398
+
399
+ def gen_inter_data_builder(self, num_seqs: int):
400
+ return lambda: ModelInputForGPUBuilder.InterDataForSeqGroup(
401
+ request_id="",
402
+ seq_ids=[0] * num_seqs,
403
+ is_prompt=True,
404
+ block_tables=None,
405
+ computed_block_nums=[])
406
+
407
+ def init_cached_inter_data(self, *args, **kwargs):
408
+ assert len(args) == 0
409
+ assert "seq_ids" in kwargs
410
+ seq_ids = kwargs["seq_ids"]
411
+ num_seqs = len(seq_ids)
412
+
413
+ # The inter-data cache is per model_runner
414
+ inter_data_cache = self.runner.inter_data_cache
415
+ if num_seqs not in inter_data_cache:
416
+ inter_data_cache[num_seqs] = PyObjectCache(
417
+ self.gen_inter_data_builder(num_seqs))
418
+
419
+ obj = inter_data_cache[num_seqs].get_object()
420
+ obj.__init__(*args, **kwargs)
421
+ return obj
422
+
423
+ def reset_cached_inter_data(self):
424
+ for cache in self.runner.inter_data_cache.values():
425
+ cache.reset()
426
+
427
+ def __init__(self,
428
+ runner: "GPUModelRunnerBase",
429
+ finished_requests_ids: Optional[List[str]] = None):
430
+ super().__init__()
431
+ # Compute functions for each sequence in a sequence group.
432
+ # WARNING: The order of the functions matters!
433
+ self.per_seq_compute_fns = [
434
+ self._compute_lens,
435
+ self._compute_for_prefix_cache_hit,
436
+ self._compute_for_sliding_window,
437
+ self._compute_lora_input,
438
+ ]
439
+ # Compute functions for each sequence group.
440
+ # WARNING: The order of the functions matters!
441
+ self.per_seq_group_compute_fns = [
442
+ self._compute_multi_modal_input,
443
+ ]
444
+
445
+ self.runner = runner
446
+ self.model_input_cls = self.runner._model_input_cls
447
+ self.attn_backend = self.runner.attn_backend
448
+ self.scheduler_config = self.runner.scheduler_config
449
+ self.sliding_window = self.runner.sliding_window
450
+ self.block_size = self.runner.block_size
451
+ self.enable_lora = self.runner.lora_config is not None
452
+
453
+ # Attention metadata inputs.
454
+ if self.attn_backend is not None:
455
+ # spec decode (e.g. Medusa) does not have atten backend
456
+ self.attn_metadata_builder = self.attn_backend.get_builder_cls()(
457
+ weakref.proxy(self))
458
+
459
+ # Engine/Model configurations.
460
+ self.chunked_prefill_enabled = (
461
+ self.scheduler_config is not None
462
+ and self.scheduler_config.chunked_prefill_enabled)
463
+ if self.sliding_window is not None:
464
+ self.sliding_window_blocks = (
465
+ self.sliding_window + self.block_size - 1) // self.block_size
466
+ self.block_aligned_sliding_window = \
467
+ self.sliding_window_blocks * self.block_size
468
+
469
+ def prepare(self,
470
+ finished_requests_ids: Optional[List[str]] = None) -> None:
471
+ self.finished_requests_ids = finished_requests_ids
472
+
473
+ # if the current batch is decode-only.
474
+ # will be set to False if there is any non-decode request.
475
+ self.decode_only = True
476
+
477
+ # Intermediate data (data in CPU before going to GPU) for
478
+ # the current sequence group.
479
+ self.inter_data_list: List[
480
+ ModelInputForGPUBuilder.InterDataForSeqGroup] = []
481
+
482
+ self.attn_metadata_builder.prepare()
483
+
484
+ def _compute_lens(self, inter_data: InterDataForSeqGroup, seq_idx: int,
485
+ seq_group_metadata: SequenceGroupMetadata):
486
+ """Compute context length, sequence length and tokens
487
+ for the given sequence data.
488
+ """
489
+ seq_data = seq_group_metadata.seq_data[inter_data.seq_ids[seq_idx]]
490
+ token_chunk_size = seq_group_metadata.token_chunk_size
491
+
492
+ # Compute context length (the number of tokens that are
493
+ # already computed) and sequence length (total number of tokens).
494
+
495
+ seq_len = seq_data.get_len()
496
+ if inter_data.is_prompt:
497
+ context_len = seq_data.get_num_computed_tokens()
498
+ seq_len = min(seq_len, context_len + token_chunk_size)
499
+ elif self.runner.model_config.is_encoder_decoder:
500
+ context_len = seq_len - 1
501
+ else:
502
+ context_len = seq_data.get_num_computed_tokens()
503
+
504
+ # Compute tokens.
505
+ if seq_data.prompt_embeds is None:
506
+ tokens = seq_data.get_token_ids()[context_len:seq_len]
507
+ prompt_embeds = None
508
+ else:
509
+ tokens = [0] * (seq_len - context_len)
510
+ prompt_embeds = seq_data.get_token_embeddings(
511
+ )[context_len:seq_len]
512
+
513
+ inter_data.seq_lens[seq_idx] = seq_len
514
+ inter_data.orig_seq_lens[seq_idx] = seq_len
515
+ inter_data.prompt_lens[seq_idx] = seq_data.get_prompt_len()
516
+ inter_data.context_lens[seq_idx] = context_len
517
+ inter_data.input_tokens[seq_idx].extend(tokens)
518
+ inter_data.inputs_embeds = prompt_embeds
519
+ inter_data.input_positions[seq_idx].extend(range(context_len, seq_len))
520
+ inter_data.query_lens[seq_idx] = seq_len - context_len
521
+
522
+ if seq_data.mrope_position_delta is not None:
523
+ if inter_data.mrope_input_positions is None:
524
+ inter_data.mrope_input_positions = [None] * inter_data.n_seqs
525
+
526
+ inter_data.mrope_input_positions[
527
+ seq_idx] = MRotaryEmbedding.get_next_input_positions(
528
+ seq_data.mrope_position_delta,
529
+ context_len,
530
+ seq_len,
531
+ )
532
+
533
+ def _compute_for_prefix_cache_hit(
534
+ self, inter_data: InterDataForSeqGroup, seq_idx: int,
535
+ seq_group_metadata: SequenceGroupMetadata):
536
+ """Check if hit prefix cache (i.e., some blocks are already computed).
537
+ If hit, update input tokens and positions to only compute the
538
+ remaining blocks.
539
+ """
540
+ computed_block_nums = inter_data.computed_block_nums
541
+
542
+ # Note that prefix caching does not support sliding window.
543
+ prefix_cache_hit = (computed_block_nums is not None
544
+ and len(computed_block_nums) > 0
545
+ and self.sliding_window is None
546
+ and inter_data.is_prompt)
547
+ inter_data.prefix_cache_hit = prefix_cache_hit
548
+
549
+ if not prefix_cache_hit:
550
+ return
551
+
552
+ assert computed_block_nums is not None
553
+ # The cache hit prompt tokens in this sequence. Note that
554
+ # this may be larger than the sequence length if chunked
555
+ # prefill is enabled.
556
+ prefix_cache_len = len(computed_block_nums) * self.block_size
557
+ seq_group_metadata.seq_data[inter_data.seq_ids[
558
+ seq_idx]].update_num_cached_tokens(prefix_cache_len)
559
+
560
+ # The number of so far computed prompt tokens in this sequence.
561
+ context_len = inter_data.context_lens[seq_idx]
562
+ # The total number of prompt tokens in this sequence.
563
+ # When chunked prefill is enabled, this is the token number of
564
+ # computed chunks + current chunk.
565
+ seq_len = inter_data.seq_lens[seq_idx]
566
+ if prefix_cache_len <= context_len:
567
+ # We already passed the cache hit region,
568
+ # so do normal computation.
569
+ pass
570
+ elif context_len < prefix_cache_len < seq_len:
571
+ # Partial hit. Compute the missing part.
572
+ uncomputed_start = prefix_cache_len - context_len
573
+ inter_data.input_tokens[seq_idx] = inter_data.input_tokens[
574
+ seq_idx][uncomputed_start:]
575
+ inter_data.input_positions[seq_idx] = inter_data.input_positions[
576
+ seq_idx][uncomputed_start:]
577
+ context_len = prefix_cache_len
578
+
579
+ inter_data.context_lens[seq_idx] = context_len
580
+ inter_data.query_lens[
581
+ seq_idx] = inter_data.seq_lens[seq_idx] - context_len
582
+ elif seq_len <= prefix_cache_len:
583
+ # Full hit. Only compute the last token to avoid
584
+ # erroneous behavior. FIXME: Ideally we should directly
585
+ # mark all tokens as computed in the scheduler and do not
586
+ # schedule this sequence, so this case should not happen.
587
+ inter_data.input_tokens[seq_idx] = inter_data.input_tokens[
588
+ seq_idx][-1:]
589
+ inter_data.input_positions[seq_idx] = inter_data.input_positions[
590
+ seq_idx][-1:]
591
+ inter_data.query_lens[seq_idx] = 1
592
+ inter_data.context_lens[seq_idx] = inter_data.seq_lens[seq_idx] - 1
593
+
594
+ def _compute_for_sliding_window(self, inter_data: InterDataForSeqGroup,
595
+ seq_idx: int,
596
+ seq_group_metadata: SequenceGroupMetadata):
597
+ """Update seq_len and curr_sliding_window_block for the given
598
+ sequence data (only required by decoding) if sliding window is enabled.
599
+ """
600
+ curr_sliding_window_block = 0
601
+ sliding_seq_len = inter_data.seq_lens[seq_idx]
602
+ if not inter_data.is_prompt and self.sliding_window is not None:
603
+ # TODO(sang): This is a hack to make sliding window work with
604
+ # paged attn. We can remove it if we make paged attn kernel
605
+ # to properly handle slinding window attn.
606
+ curr_sliding_window_block = self.sliding_window_blocks
607
+ # number of elements in last block
608
+ suff_len = inter_data.seq_lens[seq_idx] % self.block_size
609
+ sliding_seq_len = min(inter_data.seq_lens[seq_idx],
610
+ self.block_aligned_sliding_window + suff_len)
611
+ if suff_len > 0:
612
+ curr_sliding_window_block += 1
613
+
614
+ inter_data.curr_sliding_window_blocks[
615
+ seq_idx] = curr_sliding_window_block
616
+ inter_data.seq_lens[seq_idx] = sliding_seq_len
617
+
618
+ def _compute_lora_input(self, inter_data: InterDataForSeqGroup,
619
+ seq_idx: int,
620
+ seq_group_metadata: SequenceGroupMetadata):
621
+ """If LoRA is enabled, compute LoRA index and prompt mapping."""
622
+ if not self.enable_lora:
623
+ return
624
+
625
+ lora_id = seq_group_metadata.lora_int_id
626
+ if lora_id > 0:
627
+ inter_data.lora_requests.add(seq_group_metadata.lora_request)
628
+ query_len = inter_data.query_lens[seq_idx]
629
+ inter_data.lora_index_mapping.append([lora_id] * query_len)
630
+ sampling_params = seq_group_metadata.sampling_params
631
+ if sampling_params and sampling_params.prompt_logprobs is not None:
632
+ inter_data.lora_prompt_mapping.append([lora_id] * query_len)
633
+ elif not self.chunked_prefill_enabled or seq_group_metadata.do_sample:
634
+ inter_data.lora_prompt_mapping.append([lora_id])
635
+ else:
636
+ inter_data.lora_prompt_mapping.append([])
637
+
638
+ def _compute_multi_modal_input(self, inter_data: InterDataForSeqGroup,
639
+ seq_group_metadata: SequenceGroupMetadata):
640
+ """If multi-modal data is given, add it to the input."""
641
+ # NOTE: mm_kwargs only includes the subset of multi-modal items that
642
+ # intersect with the current prefill positions.
643
+ positions = inter_data.input_positions[0]
644
+ mm_kwargs, placeholder_maps = MultiModalPlaceholderMap.from_seq_group(
645
+ seq_group_metadata,
646
+ range(positions[0], positions[0] + len(positions)))
647
+
648
+ # M-RoPE requires mrope_positions even for plain text; return early
649
+ # when mm_kwargs is empty only if inter_data.is_prompt is False.
650
+ if not mm_kwargs and not inter_data.is_prompt:
651
+ return
652
+
653
+ inter_data.multi_modal_kwargs = mm_kwargs
654
+ inter_data.multi_modal_placeholder_maps = placeholder_maps
655
+
656
+ # special processing for mrope position deltas.
657
+ if self.runner.model_config.uses_mrope:
658
+ image_grid_thw = mm_kwargs.get("image_grid_thw", None)
659
+ video_grid_thw = mm_kwargs.get("video_grid_thw", None)
660
+ audio_feature_lengths = mm_kwargs.get("audio_feature_lengths",
661
+ None)
662
+
663
+ second_per_grid_ts = mm_kwargs.get("second_per_grid_ts", None)
664
+ use_audio_in_video = mm_kwargs.get("use_audio_in_video", False)
665
+ hf_config = self.runner.model_config.hf_config
666
+
667
+ inter_data.mrope_input_positions = [None] * inter_data.n_seqs
668
+ for seq_idx in range(inter_data.n_seqs):
669
+ seq_data = seq_group_metadata.seq_data[
670
+ inter_data.seq_ids[seq_idx]]
671
+ token_ids = seq_data.get_token_ids()
672
+
673
+ mrope_input_positions, mrope_position_delta = \
674
+ MRotaryEmbedding.get_input_positions(
675
+ token_ids,
676
+ hf_config=hf_config,
677
+ image_grid_thw=image_grid_thw,
678
+ video_grid_thw=video_grid_thw,
679
+ second_per_grid_ts=second_per_grid_ts,
680
+ context_len=inter_data.context_lens[seq_idx],
681
+ seq_len=inter_data.seq_lens[seq_idx],
682
+ audio_feature_lengths=audio_feature_lengths,
683
+ use_audio_in_video=use_audio_in_video,
684
+ )
685
+
686
+ seq_data.mrope_position_delta = mrope_position_delta
687
+ inter_data.mrope_input_positions[
688
+ seq_idx] = mrope_input_positions
689
+
690
+ def add_seq_group(self, seq_group_metadata: SequenceGroupMetadata):
691
+ """Add a sequence group to the builder."""
692
+ seq_ids = seq_group_metadata.seq_data.keys()
693
+ n_seqs = len(seq_ids)
694
+ is_prompt = seq_group_metadata.is_prompt
695
+
696
+ if is_prompt:
697
+ assert n_seqs == 1
698
+ self.decode_only = False
699
+
700
+ encoder_seq_len = 0
701
+
702
+ if self.runner.model_config.is_encoder_decoder:
703
+ encoder_seq_len = seq_group_metadata.encoder_seq_data.get_len()
704
+
705
+ inter_data = self.init_cached_inter_data(
706
+ request_id=seq_group_metadata.request_id,
707
+ seq_ids=seq_ids,
708
+ is_prompt=is_prompt,
709
+ block_tables=seq_group_metadata.block_tables,
710
+ computed_block_nums=seq_group_metadata.computed_block_nums,
711
+ reinit=True,
712
+ reinit_use_defaults=True,
713
+ encoder_seq_len=encoder_seq_len)
714
+
715
+ self.inter_data_list.append(inter_data)
716
+
717
+ for seq_idx in range(n_seqs):
718
+ for per_seq_fn in self.per_seq_compute_fns:
719
+ per_seq_fn(inter_data, seq_idx, seq_group_metadata)
720
+ for per_seq_group_fn in self.per_seq_group_compute_fns:
721
+ per_seq_group_fn(inter_data, seq_group_metadata)
722
+
723
+ def _use_captured_graph(self,
724
+ batch_size: int,
725
+ decode_only: bool,
726
+ max_decode_seq_len: int,
727
+ max_encoder_seq_len: int = 0) -> bool:
728
+ return (decode_only and not self.runner.model_config.enforce_eager
729
+ and max_decode_seq_len <= self.runner.max_seq_len_to_capture
730
+ and max_encoder_seq_len <= self.runner.max_seq_len_to_capture
731
+ and batch_size <= self.runner.max_batchsize_to_capture)
732
+
733
+ def _get_cuda_graph_pad_size(self,
734
+ num_seqs: int,
735
+ max_decode_seq_len: int,
736
+ max_encoder_seq_len: int = 0) -> int:
737
+ """
738
+ Determine the number of padding sequences required for running in
739
+ CUDA graph mode. Returns -1 if CUDA graphs cannot be used.
740
+
741
+ In the multi-step + chunked-prefill case, only the first step
742
+ has Prefills (if any). The rest of the steps are guaranteed to be all
743
+ decodes. In this case, we set up the padding as if all the sequences
744
+ are decodes so we may run all steps except the first step in CUDA graph
745
+ mode.
746
+
747
+ Args:
748
+ num_seqs (int): Number of sequences scheduled to run.
749
+ max_decode_seq_len (int): Greatest of all the decode sequence
750
+ lengths. Used only in checking the viablility of using
751
+ CUDA graphs.
752
+ max_encoder_seq_len (int, optional): Greatest of all the encode
753
+ sequence lengths. Defaults to 0. Used only in checking the
754
+ viability of using CUDA graphs.
755
+ Returns:
756
+ int: Returns the determined number of padding sequences. If
757
+ CUDA graphs is not viable, returns -1.
758
+ """
759
+ decode_only = self.decode_only
760
+ if not decode_only:
761
+ # Early exit so we can treat num_seqs as the batch_size below.
762
+ return -1
763
+
764
+ # batch_size out of this function refers to the number of input
765
+ # tokens being scheduled. This conflation of num_seqs as batch_size
766
+ # is valid as this is a decode-only case.
767
+ batch_size = num_seqs
768
+ if not self._use_captured_graph(batch_size, decode_only,
769
+ max_decode_seq_len,
770
+ max_encoder_seq_len):
771
+ return -1
772
+
773
+ graph_batch_size = self.runner.vllm_config.pad_for_cudagraph(
774
+ batch_size)
775
+ assert graph_batch_size >= batch_size
776
+ return graph_batch_size - batch_size
777
+
778
+ def build(self) -> ModelInputForGPU:
779
+ """Finalize the builder intermediate data and
780
+ create on-device tensors.
781
+ """
782
+ # Combine and flatten intermediate data.
783
+ input_tokens = list[int]()
784
+ inputs_embeds_list = list[torch.Tensor]()
785
+ for inter_data in self.inter_data_list:
786
+ for cur_input_tokens in inter_data.input_tokens:
787
+ input_tokens.extend(cur_input_tokens)
788
+ if inter_data.inputs_embeds is not None:
789
+ inputs_embeds_list.append(
790
+ inter_data.inputs_embeds.to(
791
+ dtype=self.runner.model_config.dtype,
792
+ device=self.runner.device))
793
+ inputs_embeds: Optional[torch.Tensor]
794
+ if len(inputs_embeds_list) == 0:
795
+ inputs_embeds = None
796
+ else:
797
+ inputs_embeds = torch.cat(inputs_embeds_list, dim=0).to(
798
+ dtype=self.runner.model_config.dtype,
799
+ device=self.runner.device)
800
+ assert len(inputs_embeds) == len(input_tokens)
801
+
802
+ if not input_tokens and inputs_embeds is None:
803
+ # This may happen when all prefill requests hit
804
+ # prefix caching and there is no decode request.
805
+ return self.model_input_cls()
806
+
807
+ mrope_input_positions: Optional[List[List[int]]] = None
808
+ if any(inter_data.mrope_input_positions is not None
809
+ for inter_data in self.inter_data_list):
810
+ mrope_input_positions = [[] for _ in range(3)]
811
+ for idx in range(3):
812
+ for inter_data in self.inter_data_list:
813
+ msections = inter_data.mrope_input_positions
814
+ if msections is None:
815
+ for _seq_input_positions in inter_data.input_positions:
816
+ mrope_input_positions[idx].extend(
817
+ _seq_input_positions)
818
+ else:
819
+ for _seq_mrope_input_positions in msections:
820
+ mrope_input_positions[idx].extend(
821
+ _seq_mrope_input_positions[idx])
822
+ input_positions = None
823
+ else:
824
+ input_positions = []
825
+ for inter_data in self.inter_data_list:
826
+ for cur_input_positions in inter_data.input_positions:
827
+ input_positions.extend(cur_input_positions)
828
+
829
+ seq_lens = []
830
+ query_lens = []
831
+ max_decode_seq_len = 0
832
+ max_encoder_seq_len = 0
833
+ for inter_data in self.inter_data_list:
834
+ seq_lens.extend(inter_data.seq_lens)
835
+ query_lens.extend(inter_data.query_lens)
836
+ if not inter_data.is_prompt:
837
+ max_decode_seq_len = max(max_decode_seq_len,
838
+ max(inter_data.seq_lens))
839
+ if self.runner.model_config.is_encoder_decoder:
840
+ max_encoder_seq_len = max(max_encoder_seq_len,
841
+ inter_data.encoder_seq_len)
842
+
843
+ # Mapping from request IDs to sequence IDs. Used for Jamba models
844
+ # that manages the cache by itself.
845
+ request_ids_to_seq_ids = {
846
+ data.request_id: data.seq_ids
847
+ for data in self.inter_data_list
848
+ }
849
+
850
+ cuda_graph_pad_size = self._get_cuda_graph_pad_size(
851
+ num_seqs=len(seq_lens),
852
+ max_decode_seq_len=max_decode_seq_len,
853
+ max_encoder_seq_len=max_encoder_seq_len)
854
+
855
+ batch_size = len(input_tokens)
856
+ if cuda_graph_pad_size != -1:
857
+ # If cuda graph can be used, pad tensors accordingly.
858
+ # See `capture_model` API for more details.
859
+ # vLLM uses cuda graph only for decoding requests.
860
+ batch_size += cuda_graph_pad_size
861
+
862
+ # Tokens and positions.
863
+ if cuda_graph_pad_size:
864
+ input_tokens.extend(itertools.repeat(0, cuda_graph_pad_size))
865
+ assert self.runner.device is not None
866
+ input_tokens_tensor = async_tensor_h2d(input_tokens, torch.long,
867
+ self.runner.device,
868
+ self.runner.pin_memory)
869
+
870
+ if mrope_input_positions is not None:
871
+ for idx in range(3):
872
+ mrope_input_positions[idx].extend(
873
+ itertools.repeat(0, cuda_graph_pad_size))
874
+ input_positions_tensor = async_tensor_h2d(mrope_input_positions,
875
+ torch.long,
876
+ self.runner.device,
877
+ self.runner.pin_memory)
878
+ else:
879
+ input_positions.extend(itertools.repeat(0, cuda_graph_pad_size))
880
+ input_positions_tensor = async_tensor_h2d(input_positions,
881
+ torch.long,
882
+ self.runner.device,
883
+ self.runner.pin_memory)
884
+ # Sequence and query lengths.
885
+ if cuda_graph_pad_size:
886
+ seq_lens.extend(itertools.repeat(1, cuda_graph_pad_size))
887
+
888
+ # Attention metadata.
889
+ attn_metadata = self.attn_metadata_builder.build(
890
+ seq_lens, query_lens, cuda_graph_pad_size, batch_size)
891
+
892
+ # LoRA data.
893
+ lora_requests = set()
894
+ lora_mapping = None
895
+ if self.enable_lora:
896
+ lora_requests = set(r for data in self.inter_data_list
897
+ for r in data.lora_requests)
898
+ lora_index_mapping = flatten_2d_lists([
899
+ flatten_2d_lists(inter_data.lora_index_mapping)
900
+ for inter_data in self.inter_data_list
901
+ ])
902
+ if cuda_graph_pad_size:
903
+ lora_index_mapping.extend(
904
+ itertools.repeat(0, cuda_graph_pad_size))
905
+ lora_prompt_mapping = flatten_2d_lists([
906
+ flatten_2d_lists(inter_data.lora_prompt_mapping)
907
+ for inter_data in self.inter_data_list
908
+ ])
909
+
910
+ lora_mapping = LoRAMapping(
911
+ **dict(index_mapping=lora_index_mapping,
912
+ prompt_mapping=lora_prompt_mapping,
913
+ is_prefill=not self.decode_only))
914
+
915
+ # Multi-modal data.
916
+ multi_modal_kwargs_list = [
917
+ data.multi_modal_kwargs for data in self.inter_data_list
918
+ if data.multi_modal_kwargs is not None
919
+ ]
920
+ multi_modal_kwargs = MultiModalKwargs.batch(multi_modal_kwargs_list)
921
+
922
+ return self.model_input_cls(
923
+ input_tokens=input_tokens_tensor,
924
+ inputs_embeds=inputs_embeds,
925
+ input_positions=input_positions_tensor,
926
+ attn_metadata=attn_metadata,
927
+ seq_lens=seq_lens,
928
+ query_lens=query_lens,
929
+ lora_mapping=lora_mapping,
930
+ lora_requests=lora_requests,
931
+ multi_modal_kwargs=multi_modal_kwargs,
932
+ request_ids_to_seq_ids=request_ids_to_seq_ids,
933
+ finished_requests_ids=self.finished_requests_ids)
934
+
935
+
936
+ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
937
+ """
938
+ Helper class for shared methods between GPU model runners.
939
+ """
940
+ _model_input_cls: Type[TModelInputForGPU]
941
+ _builder_cls: Type[ModelInputForGPUBuilder]
942
+ builder: ModelInputForGPUBuilder
943
+
944
+ def __init__(
945
+ self,
946
+ vllm_config: VllmConfig,
947
+ kv_cache_dtype: Optional[str] = "auto",
948
+ is_driver_worker: bool = False,
949
+ return_hidden_states: bool = False,
950
+ input_registry: InputRegistry = INPUT_REGISTRY,
951
+ mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
952
+ ):
953
+
954
+ ModelRunnerBase.__init__(self, vllm_config)
955
+ model_config = self.model_config
956
+ cache_config = self.cache_config
957
+
958
+ self.is_driver_worker = is_driver_worker
959
+ self.return_hidden_states = return_hidden_states
960
+
961
+ self.device = self.device_config.device
962
+ self.pin_memory = is_pin_memory_available()
963
+
964
+ self.kv_cache_dtype = kv_cache_dtype
965
+ self.sliding_window = model_config.get_sliding_window()
966
+ self.block_size = cache_config.block_size
967
+ self.max_seq_len_to_capture = self.model_config.max_seq_len_to_capture
968
+ self.max_batchsize_to_capture = \
969
+ self.vllm_config.compilation_config.max_capture_size
970
+
971
+ #
972
+ self.graph_runners: List[Dict[Tuple[int, bool], CUDAGraphRunner]] = [
973
+ {} for _ in range(self.parallel_config.pipeline_parallel_size)
974
+ ]
975
+ self.graph_memory_pool: Optional[Tuple[
976
+ int, int]] = None # Set during graph capture.
977
+
978
+ self.has_inner_state = model_config.has_inner_state
979
+
980
+ self.in_profile_run = False
981
+
982
+ # When using CUDA graph, the input block tables must be padded to
983
+ # max_seq_len_to_capture. However, creating the block table in
984
+ # Python can be expensive. To optimize this, we cache the block table
985
+ # in numpy and only copy the actual input content at every iteration.
986
+ # The shape of the cached block table will be
987
+ # (max batch size to capture, max seq len to capture / block size).
988
+ self.graph_block_tables = np.zeros(
989
+ (self.max_batchsize_to_capture, self.get_max_block_per_batch()),
990
+ dtype=np.int32)
991
+
992
+ self.cross_layer_shared_graph_block_tables = np.zeros(
993
+ (self.max_batchsize_to_capture, self.get_max_block_per_batch()),
994
+ dtype=np.int32)
995
+
996
+ # Attention-free but stateful models like Mamba need a placeholder attn
997
+ # backend, as the attention metadata is needed to manage internal state.
998
+ # However we must bypass attention selection altogether for some models
999
+ # used for speculative decoding to avoid a divide-by-zero in
1000
+ # model_config.get_head_size()
1001
+ num_attn_heads = self.model_config.get_num_attention_heads(
1002
+ self.parallel_config)
1003
+ needs_attn_backend = (num_attn_heads != 0
1004
+ or self.model_config.is_attention_free)
1005
+
1006
+ self.attn_backend = get_attn_backend(
1007
+ self.model_config.get_head_size(),
1008
+ self.model_config.dtype,
1009
+ self.kv_cache_dtype,
1010
+ self.block_size,
1011
+ self.model_config.is_attention_free,
1012
+ use_mla=self.model_config.use_mla,
1013
+ ) if needs_attn_backend else None
1014
+ if self.attn_backend:
1015
+ self.attn_state = self.attn_backend.get_state_cls()(
1016
+ weakref.proxy(self))
1017
+ else:
1018
+ self.attn_state = CommonAttentionState(weakref.proxy(self))
1019
+
1020
+ # Multi-modal data support
1021
+ self.input_registry = input_registry
1022
+ self.mm_registry = mm_registry
1023
+
1024
+ # Lazy initialization
1025
+ self.model: nn.Module # Set after load_model
1026
+ # Set after load_model.
1027
+ self.lora_manager: Optional[LRUCacheWorkerLoRAManager] = None
1028
+ self.sampler = get_sampler()
1029
+
1030
+ set_cpu_offload_max_bytes(
1031
+ int(self.cache_config.cpu_offload_gb * 1024**3))
1032
+
1033
+ # Used to cache python objects
1034
+ self.inter_data_cache: Dict[int, PyObjectCache] = {}
1035
+
1036
+ # Using the PythonizationCache in Pipeline-Parallel clobbers the
1037
+ # SequenceGroupToSample object. In Pipeline-Parallel, we have
1038
+ # more than 1 Scheduler, resulting in a potential back-to-back
1039
+ # prepare_model_inputs() call. This clobbers the cached
1040
+ # SequenceGroupToSample objects, as we reset the cache during
1041
+ # every prepare_model_inputs() call.
1042
+ self.sampling_metadata_cache: SamplingMetadataCache = \
1043
+ SamplingMetadataCache() \
1044
+ if self.parallel_config.pipeline_parallel_size == 1 else None
1045
+
1046
+ if hasattr(self, "_builder_cls"):
1047
+ # multi-step model runner does not have `_builder_cls`
1048
+ self.builder = self._builder_cls(weakref.proxy(self))
1049
+
1050
+ def load_model(self) -> None:
1051
+ logger.info("Starting to load model %s...", self.model_config.model)
1052
+ with DeviceMemoryProfiler(self.device) as m:
1053
+ time_before_load = time.perf_counter()
1054
+ self.model = get_model(vllm_config=self.vllm_config)
1055
+ if self.lora_config:
1056
+ assert supports_lora(
1057
+ self.model
1058
+ ), f"{self.model.__class__.__name__} does not support LoRA yet."
1059
+
1060
+ if supports_multimodal(self.model):
1061
+ logger.warning(
1062
+ "Regarding multimodal models, vLLM currently "
1063
+ "only supports adding LoRA to language model.")
1064
+
1065
+ # Use get_text_config() in case of multimodal models
1066
+ text_config = self.model_config.hf_config.get_text_config()
1067
+
1068
+ self.lora_manager = LRUCacheWorkerLoRAManager(
1069
+ self.scheduler_config.max_num_seqs,
1070
+ self.scheduler_config.max_num_batched_tokens,
1071
+ self.vocab_size,
1072
+ self.lora_config,
1073
+ self.device,
1074
+ self.model.embedding_modules,
1075
+ self.model.embedding_padding_modules,
1076
+ max_position_embeddings=text_config.
1077
+ max_position_embeddings,
1078
+ )
1079
+ self.model = self.lora_manager.create_lora_manager(self.model)
1080
+ time_after_load = time.perf_counter()
1081
+
1082
+ self.model_memory_usage = m.consumed_memory
1083
+ logger.info("Model loading took %.4f GiB and %.6f seconds",
1084
+ self.model_memory_usage / GiB_bytes,
1085
+ time_after_load - time_before_load)
1086
+
1087
+
1088
+ if self.vllm_config.compilation_config.level ==\
1089
+ CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
1090
+ backend = self.vllm_config.compilation_config.init_backend(
1091
+ self.vllm_config)
1092
+ compilation_counter.dynamo_as_is_count += 1
1093
+ self.model = torch.compile(
1094
+ self.model,
1095
+ fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE,
1096
+ backend=backend)
1097
+
1098
+ def get_model(self) -> nn.Module:
1099
+ return self.model
1100
+
1101
+ def save_sharded_state(
1102
+ self,
1103
+ path: str,
1104
+ pattern: Optional[str] = None,
1105
+ max_size: Optional[int] = None,
1106
+ ) -> None:
1107
+ from vllm.model_executor.model_loader import ShardedStateLoader
1108
+ ShardedStateLoader.save_model(
1109
+ self.model,
1110
+ path,
1111
+ pattern=pattern,
1112
+ max_size=max_size,
1113
+ )
1114
+
1115
+ def save_tensorized_model(
1116
+ self,
1117
+ tensorizer_config: TensorizerConfig,
1118
+ ) -> None:
1119
+ from vllm.model_executor.model_loader import TensorizerLoader
1120
+ TensorizerLoader.save_model(
1121
+ self.model,
1122
+ tensorizer_config=tensorizer_config,
1123
+ model_config=self.model_config,
1124
+ )
1125
+
1126
+ def get_max_block_per_batch(self) -> int:
1127
+ block_size = self.block_size
1128
+ return (self.max_seq_len_to_capture + block_size - 1) // block_size
1129
+
1130
+ def _prepare_model_input_tensors(
1131
+ self,
1132
+ seq_group_metadata_list: List[SequenceGroupMetadata],
1133
+ finished_requests_ids: Optional[List[str]] = None
1134
+ ) -> TModelInputForGPU:
1135
+ """Helper method to prepare the model input based on a given sequence
1136
+ group. Prepares metadata needed for the base model forward pass but not
1137
+ metadata for possible additional steps, e.g., sampling.
1138
+
1139
+ The API assumes seq_group_metadata_list is sorted by prefill -> decode.
1140
+
1141
+ The result tensors and data structure also batches input in prefill
1142
+ -> decode order. For example,
1143
+
1144
+ - input_tokens[:num_prefill_tokens] contains prefill tokens.
1145
+ - input_tokens[num_prefill_tokens:] contains decode tokens.
1146
+
1147
+ If cuda graph is required, this API automatically pads inputs.
1148
+ """
1149
+ self.builder.prepare(finished_requests_ids)
1150
+ for seq_group_metadata in seq_group_metadata_list:
1151
+ try:
1152
+ self.builder.add_seq_group(seq_group_metadata)
1153
+ except Exception as e:
1154
+ # Raise an exception that tracks the ID of the bad request
1155
+ raise InputProcessingError(seq_group_metadata.request_id,
1156
+ str(e)) from e
1157
+
1158
+ self.builder.reset_cached_inter_data()
1159
+
1160
+ return self.builder.build() # type: ignore
1161
+
1162
+ @contextmanager
1163
+ def set_in_profile_run(self):
1164
+ self.in_profile_run = True
1165
+ try:
1166
+ yield
1167
+ finally:
1168
+ self.in_profile_run = False
1169
+
1170
+ @torch.inference_mode()
1171
+ def profile_run(self) -> None:
1172
+ max_num_batched_tokens = \
1173
+ self.scheduler_config.max_num_batched_tokens
1174
+ max_num_seqs = self.scheduler_config.max_num_seqs
1175
+ self._dummy_run(max_num_batched_tokens, max_num_seqs)
1176
+
1177
+ def _add_dummy_loras(self, num_loras: int) -> list[LoRARequest]:
1178
+ assert num_loras > 0
1179
+ assert self.lora_manager is not None
1180
+
1181
+ dummy_lora_requests: list[LoRARequest] = []
1182
+ with self.lora_manager.dummy_lora_cache():
1183
+ for idx in range(num_loras):
1184
+ lora_id = idx + 1
1185
+ dummy_lora_request = LoRARequest(
1186
+ lora_name=f"warmup_{lora_id}",
1187
+ lora_int_id=lora_id,
1188
+ lora_path="/not/a/real/path",
1189
+ )
1190
+ self.lora_manager.add_dummy_lora(dummy_lora_request,
1191
+ rank=LORA_WARMUP_RANK)
1192
+ dummy_lora_requests.append(dummy_lora_request)
1193
+ return dummy_lora_requests
1194
+
1195
+ def _remove_dummy_loras(self):
1196
+ # Remove dummy loras.
1197
+ assert self.lora_manager is not None
1198
+ self.remove_all_loras()
1199
+
1200
+ def _dummy_run(self,
1201
+ max_num_batched_tokens: int,
1202
+ max_num_seqs: int = 1) -> None:
1203
+ with self.set_in_profile_run():
1204
+ # Enable top-k sampling to reflect the accurate memory usage.
1205
+ sampling_params = \
1206
+ SamplingParams(top_p=0.99, top_k=self.vocab_size - 1)
1207
+
1208
+ # This represents the maximum number of different requests
1209
+ # that will have unique loras, and therefore the max amount of
1210
+ # memory consumption. Create dummy lora request copies from the
1211
+ # lora request passed in, which contains a lora from the lora
1212
+ # warmup path.
1213
+ dummy_lora_requests: List[LoRARequest] = []
1214
+ dummy_lora_requests_per_seq: List[LoRARequest] = []
1215
+ if self.lora_config:
1216
+ dummy_lora_requests = self._add_dummy_loras(
1217
+ self.lora_config.max_loras)
1218
+ assert len(dummy_lora_requests) == self.lora_config.max_loras
1219
+ dummy_lora_requests_per_seq = [
1220
+ dummy_lora_requests[idx % len(dummy_lora_requests)]
1221
+ for idx in range(max_num_seqs)
1222
+ ]
1223
+
1224
+ # Profile memory usage with max_num_sequences sequences and the
1225
+ # total number of tokens equal to max_num_batched_tokens.
1226
+ seqs: List[SequenceGroupMetadata] = []
1227
+ # Additional GPU memory may be needed for multi-modal encoding,
1228
+ # which needs to be accounted for when calculating the GPU blocks
1229
+ # for vLLM blocker manager.
1230
+ # To exercise the worst scenario for GPU memory consumption,
1231
+ # the number of seqs (batch_size) is chosen to maximize the number
1232
+ # of images processed.
1233
+
1234
+ max_mm_tokens = self.mm_registry.get_max_multimodal_tokens(
1235
+ self.model_config)
1236
+ if max_mm_tokens > 0:
1237
+ max_num_seqs_orig = max_num_seqs
1238
+ max_num_seqs = min(max_num_seqs,
1239
+ max_num_batched_tokens // max_mm_tokens)
1240
+ if max_num_seqs < 1:
1241
+ expr = (f"min({max_num_seqs_orig}, "
1242
+ f"{max_num_batched_tokens} // {max_mm_tokens})")
1243
+ logger.warning(
1244
+ "Computed max_num_seqs (%s) to be less than 1. "
1245
+ "Setting it to the minimum value of 1.", expr)
1246
+ max_num_seqs = 1
1247
+
1248
+ batch_size = 0
1249
+ for group_id in range(max_num_seqs):
1250
+ seq_len = (max_num_batched_tokens // max_num_seqs +
1251
+ (group_id < max_num_batched_tokens % max_num_seqs))
1252
+ batch_size += seq_len
1253
+
1254
+ dummy_data = self.input_registry \
1255
+ .dummy_data_for_profiling(self.model_config,
1256
+ seq_len,
1257
+ self.mm_registry)
1258
+
1259
+ seq = SequenceGroupMetadata(
1260
+ request_id=str(group_id),
1261
+ is_prompt=True,
1262
+ seq_data={group_id: dummy_data.seq_data},
1263
+ sampling_params=sampling_params,
1264
+ block_tables=None,
1265
+ lora_request=dummy_lora_requests_per_seq[group_id]
1266
+ if dummy_lora_requests_per_seq else None,
1267
+ multi_modal_data=dummy_data.multi_modal_data,
1268
+ multi_modal_placeholders=dummy_data.
1269
+ multi_modal_placeholders,
1270
+ )
1271
+ seqs.append(seq)
1272
+
1273
+ # Run the model with the dummy inputs.
1274
+ num_layers = self.model_config.get_num_layers(self.parallel_config)
1275
+ # use an empty tensor instead of `None`` to force Dynamo to pass
1276
+ # it by reference, rather by specializing on the value ``None``.
1277
+ # the `dtype` argument does not matter, and we use `float32` as
1278
+ # a placeholder (it has wide hardware support).
1279
+ # it is important to create tensors inside the loop, rather than
1280
+ # multiplying the list, to avoid Dynamo from treating them as
1281
+ # tensor aliasing.
1282
+ kv_caches = [
1283
+ torch.tensor([], dtype=torch.float32, device=self.device)
1284
+ for _ in range(num_layers)
1285
+ ]
1286
+ finished_requests_ids = [seq.request_id for seq in seqs]
1287
+ model_input = self.prepare_model_input(
1288
+ seqs, finished_requests_ids=finished_requests_ids)
1289
+ intermediate_tensors = None
1290
+ if not get_pp_group().is_first_rank:
1291
+ intermediate_tensors = \
1292
+ self.model.make_empty_intermediate_tensors(
1293
+ batch_size=batch_size,
1294
+ dtype=self.model_config.dtype,
1295
+ device=self.device)
1296
+
1297
+ # Disable KV Scale Calculation for dummy data during profile run
1298
+ if model_input.attn_metadata is not None:
1299
+ model_input.attn_metadata.enable_kv_scales_calculation = False
1300
+
1301
+ self.execute_model(model_input, kv_caches, intermediate_tensors)
1302
+ torch.cuda.synchronize()
1303
+ if self.lora_config:
1304
+ self._remove_dummy_loras()
1305
+
1306
+ return
1307
+
1308
+ def remove_all_loras(self):
1309
+ if not self.lora_manager:
1310
+ raise RuntimeError("LoRA is not enabled.")
1311
+ self.lora_manager.remove_all_adapters()
1312
+
1313
+ def set_active_loras(self, lora_requests: Set[LoRARequest],
1314
+ lora_mapping: LoRAMapping) -> None:
1315
+ if not self.lora_manager:
1316
+ raise RuntimeError("LoRA is not enabled.")
1317
+ self.lora_manager.set_active_adapters(lora_requests, lora_mapping)
1318
+
1319
+ def add_lora(self, lora_request: LoRARequest) -> bool:
1320
+ if not self.lora_manager:
1321
+ raise RuntimeError("LoRA is not enabled.")
1322
+ return self.lora_manager.add_adapter(lora_request)
1323
+
1324
+ def remove_lora(self, lora_id: int) -> bool:
1325
+ if not self.lora_manager:
1326
+ raise RuntimeError("LoRA is not enabled.")
1327
+ return self.lora_manager.remove_adapter(lora_id)
1328
+
1329
+ def pin_lora(self, lora_id: int) -> bool:
1330
+ if not self.lora_manager:
1331
+ raise RuntimeError("LoRA is not enabled.")
1332
+ return self.lora_manager.pin_adapter(lora_id)
1333
+
1334
+ def list_loras(self) -> Set[int]:
1335
+ if not self.lora_manager:
1336
+ raise RuntimeError("LoRA is not enabled.")
1337
+ return self.lora_manager.list_adapters()
1338
+
1339
+ @torch.inference_mode()
1340
+ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> int:
1341
+ """Cuda graph capture a model and return cudagraph memory
1342
+ consumption in bytes.
1343
+
1344
+ Note that CUDA graph's performance gain is negligible if number
1345
+ of batched tokens are larger than 200. And since CUDA graph
1346
+ requires fixed sized tensors, supporting large/variable batch
1347
+ size requires high GPU memory overhead. Thus, vLLM only captures
1348
+ decoding requests. Mixed batch (chunked prefill + decoding) or
1349
+ prefill requests are not captured.
1350
+
1351
+ Since it is used for decoding-only, it assumes there's only 1 token
1352
+ per sequence in the batch.
1353
+ """
1354
+ assert not self.model_config.enforce_eager
1355
+ logger.info("Capturing cudagraphs for decoding. This may lead to "
1356
+ "unexpected consequences if the model is not static. To "
1357
+ "run the model in eager mode, set 'enforce_eager=True' or "
1358
+ "use '--enforce-eager' in the CLI. "
1359
+ "If out-of-memory error occurs during cudagraph capture,"
1360
+ " consider decreasing `gpu_memory_utilization` or "
1361
+ "switching to eager mode. You can also reduce the "
1362
+ "`max_num_seqs` as needed to decrease memory usage.")
1363
+ start_time = time.perf_counter()
1364
+ start_free_gpu_memory = torch.cuda.mem_get_info()[0]
1365
+
1366
+ # Prepare dummy inputs. These will be reused for all batch sizes.
1367
+ max_batch_size = self.max_batchsize_to_capture
1368
+ input_tokens = torch.zeros(max_batch_size,
1369
+ dtype=torch.long,
1370
+ device=self.device)
1371
+ input_positions = torch.zeros(max_batch_size,
1372
+ dtype=torch.long,
1373
+ device=self.device)
1374
+ inputs_embeds = torch.zeros(
1375
+ (max_batch_size, self.model_config.get_hidden_size()),
1376
+ dtype=self.model_config.dtype,
1377
+ device=self.device)
1378
+ if self.model_config.uses_mrope:
1379
+ input_positions = torch.tile(input_positions,
1380
+ (3, 1)).cuda(device=self.device)
1381
+ # Prepare dummy previous_hidden_states only if needed by the model.
1382
+ # This is used by draft models such as EAGLE.
1383
+ previous_hidden_states = None
1384
+ if "previous_hidden_states" in inspect.signature(
1385
+ self.model.forward).parameters:
1386
+ previous_hidden_states = torch.empty(
1387
+ [max_batch_size,
1388
+ self.model_config.get_hidden_size()],
1389
+ dtype=self.model_config.dtype,
1390
+ device=self.device)
1391
+
1392
+ intermediate_inputs = None
1393
+ if not get_pp_group().is_first_rank:
1394
+ intermediate_inputs = self.model.make_empty_intermediate_tensors(
1395
+ batch_size=max_batch_size,
1396
+ dtype=self.model_config.dtype,
1397
+ device=self.device)
1398
+
1399
+ dummy_lora_id: Optional[int] = None
1400
+ dummy_lora_request: LoRARequest = []
1401
+ if self.lora_config:
1402
+ # The goal is to capture the LoRA kernels in cuda graphs.
1403
+ # for this purpose, as single dummy lora is sufficient.
1404
+ dummy_lora_requests = self._add_dummy_loras(num_loras=1)
1405
+ assert len(dummy_lora_requests) == 1
1406
+ dummy_lora_request = dummy_lora_requests[0]
1407
+ dummy_lora_id = dummy_lora_request.lora_int_id
1408
+
1409
+ with self.attn_state.graph_capture(max_batch_size), graph_capture(
1410
+ self.device) as graph_capture_context:
1411
+ # NOTE: Capturing the largest batch size first may help reduce the
1412
+ # memory usage of CUDA graph.
1413
+ for virtual_engine in range(
1414
+ self.parallel_config.pipeline_parallel_size):
1415
+ # We need to not only iterate over batch sizes, but also whether
1416
+ # to use inputs_embeds or not, hence we use the cartesian
1417
+ # product.
1418
+ cudagraph_capture_sizes = self.vllm_config.compilation_config\
1419
+ .cudagraph_capture_sizes
1420
+ cudagraph_inputs_embeds = ((
1421
+ True, False) if self.model_config.enable_prompt_embeds else
1422
+ (False, ))
1423
+ compilation_cases = itertools.product(
1424
+ cudagraph_capture_sizes,
1425
+ cudagraph_inputs_embeds,
1426
+ )
1427
+ # Only rank 0 should print progress bar during capture
1428
+ if get_tensor_model_parallel_rank() == 0:
1429
+ compilation_cases = tqdm(
1430
+ list(compilation_cases),
1431
+ disable=not self.load_config.use_tqdm_on_load,
1432
+ desc="Capturing CUDA graph shapes")
1433
+ for batch_size, use_inputs_embeds in compilation_cases:
1434
+ attn_metadata = (
1435
+ self.attn_state.graph_capture_get_metadata_for_batch(
1436
+ batch_size,
1437
+ is_encoder_decoder_model=self.model_config.
1438
+ is_encoder_decoder))
1439
+ # Disable KV Scale Calculation for graph capture
1440
+ attn_metadata.enable_kv_scales_calculation = False
1441
+ if self.lora_config:
1442
+ lora_mapping = LoRAMapping(
1443
+ **dict(index_mapping=[dummy_lora_id] * batch_size,
1444
+ prompt_mapping=[dummy_lora_id] * batch_size,
1445
+ is_prefill=False))
1446
+ self.set_active_loras(set([dummy_lora_request]),
1447
+ lora_mapping)
1448
+
1449
+ graph_runner = CUDAGraphRunner(
1450
+ self.model, self.attn_backend.get_name(),
1451
+ self.attn_state.graph_clone(batch_size),
1452
+ self.model_config.is_encoder_decoder)
1453
+
1454
+ capture_inputs = {
1455
+ "input_ids":
1456
+ input_tokens[:batch_size],
1457
+ "inputs_embeds":
1458
+ inputs_embeds[:batch_size]
1459
+ if use_inputs_embeds else None,
1460
+ "positions":
1461
+ input_positions[..., :batch_size],
1462
+ "intermediate_inputs":
1463
+ intermediate_inputs[:batch_size]
1464
+ if intermediate_inputs is not None else None,
1465
+ "kv_caches":
1466
+ kv_caches[virtual_engine],
1467
+ "attn_metadata":
1468
+ attn_metadata,
1469
+ "memory_pool":
1470
+ self.graph_memory_pool,
1471
+ "stream":
1472
+ graph_capture_context.stream
1473
+ }
1474
+ if previous_hidden_states is not None:
1475
+ capture_inputs[
1476
+ "previous_hidden_states"] = previous_hidden_states[:
1477
+ batch_size]
1478
+
1479
+ if self.has_inner_state:
1480
+ # Only used by Mamba-based models CUDA graph atm (Jamba)
1481
+ capture_inputs.update({
1482
+ "seqlen_agnostic_capture_inputs":
1483
+ self.model.get_seqlen_agnostic_capture_inputs(
1484
+ batch_size)
1485
+ })
1486
+ if self.model_config.is_encoder_decoder:
1487
+ # add the additional inputs to capture for
1488
+ # encoder-decoder models.
1489
+ self._update_inputs_to_capture_for_enc_dec_model(
1490
+ capture_inputs)
1491
+
1492
+ with set_forward_context(attn_metadata, self.vllm_config,
1493
+ virtual_engine):
1494
+ graph_runner.capture(**capture_inputs)
1495
+ self.graph_memory_pool = graph_runner.graph.pool()
1496
+ self.graph_runners[virtual_engine][(
1497
+ batch_size, use_inputs_embeds)] = graph_runner
1498
+
1499
+ if self.lora_config:
1500
+ self._remove_dummy_loras()
1501
+
1502
+ end_time = time.perf_counter()
1503
+ end_free_gpu_memory = torch.cuda.mem_get_info()[0]
1504
+ elapsed_time = end_time - start_time
1505
+ cuda_graph_size = start_free_gpu_memory - end_free_gpu_memory
1506
+ # This usually takes < 10 seconds.
1507
+ logger.info("Graph capturing finished in %.0f secs, took %.2f GiB",
1508
+ elapsed_time, cuda_graph_size / GiB_bytes)
1509
+ return cuda_graph_size
1510
+
1511
+ def _update_inputs_to_capture_for_enc_dec_model(self,
1512
+ capture_inputs: Dict[str,
1513
+ Any]):
1514
+ """
1515
+ Updates the set of input tensors needed for CUDA graph capture in an
1516
+ encoder-decoder model.
1517
+
1518
+ This method modifies the provided `capture_inputs` dictionary by
1519
+ adding tensors specific to encoder-decoder specific models that
1520
+ need to be captured for CUDA Graph replay.
1521
+ """
1522
+ # During the decode phase encoder_input_ids and encoder_positions are
1523
+ # unset. Do the same thing for graph capture.
1524
+ capture_inputs["encoder_input_ids"] = torch.tensor([],
1525
+ dtype=torch.long,
1526
+ device=self.device)
1527
+ capture_inputs["encoder_positions"] = torch.tensor([],
1528
+ dtype=torch.long,
1529
+ device=self.device)
1530
+
1531
+ @property
1532
+ def vocab_size(self) -> int:
1533
+ return self.model_config.get_vocab_size()
1534
+
1535
+
1536
+ class ModelRunner(GPUModelRunnerBase[ModelInputForGPUWithSamplingMetadata]):
1537
+ """
1538
+ GPU model runner with sampling step.
1539
+ """
1540
+ _model_input_cls: Type[ModelInputForGPUWithSamplingMetadata] = (
1541
+ ModelInputForGPUWithSamplingMetadata)
1542
+ _builder_cls: Type[ModelInputForGPUBuilder] = ModelInputForGPUBuilder
1543
+
1544
+ def make_model_input_from_broadcasted_tensor_dict(
1545
+ self,
1546
+ tensor_dict: Dict[str, Any],
1547
+ ) -> ModelInputForGPUWithSamplingMetadata:
1548
+ model_input = \
1549
+ ModelInputForGPUWithSamplingMetadata.from_broadcasted_tensor_dict(
1550
+ tensor_dict,
1551
+ attn_backend=self.attn_backend,
1552
+ )
1553
+ return model_input
1554
+
1555
+ def prepare_model_input(
1556
+ self,
1557
+ seq_group_metadata_list: List[SequenceGroupMetadata],
1558
+ virtual_engine: int = 0,
1559
+ finished_requests_ids: Optional[List[str]] = None,
1560
+ ) -> ModelInputForGPUWithSamplingMetadata:
1561
+ """Prepare the model input based on a given sequence group, including
1562
+ metadata for the sampling step.
1563
+
1564
+ The API assumes seq_group_metadata_list is sorted by prefill -> decode.
1565
+
1566
+ The result tensors and data structure also batches input in prefill
1567
+ -> decode order. For example,
1568
+
1569
+ - input_tokens[:num_prefill_tokens] contains prefill tokens.
1570
+ - input_tokens[num_prefill_tokens:] contains decode tokens.
1571
+
1572
+ If cuda graph is required, this API automatically pads inputs.
1573
+ """
1574
+ model_input = self._prepare_model_input_tensors(
1575
+ seq_group_metadata_list, finished_requests_ids)
1576
+ if get_pp_group().is_last_rank:
1577
+ # Sampling metadata is only required for the final pp group
1578
+ generators = self.get_generators(finished_requests_ids)
1579
+ sampling_metadata = SamplingMetadata.prepare(
1580
+ seq_group_metadata_list, model_input.seq_lens,
1581
+ model_input.query_lens, self.device, self.pin_memory,
1582
+ generators, self.sampling_metadata_cache)
1583
+ else:
1584
+ sampling_metadata = None
1585
+ is_prompt = (seq_group_metadata_list[0].is_prompt
1586
+ if seq_group_metadata_list else None)
1587
+ return dataclasses.replace(model_input,
1588
+ sampling_metadata=sampling_metadata,
1589
+ is_prompt=is_prompt,
1590
+ virtual_engine=virtual_engine)
1591
+
1592
+ @torch.inference_mode()
1593
+ def execute_model(
1594
+ self,
1595
+ model_input: ModelInputForGPUWithSamplingMetadata,
1596
+ kv_caches: List[torch.Tensor],
1597
+ intermediate_tensors: Optional[IntermediateTensors] = None,
1598
+ num_steps: int = 1,
1599
+ **kwargs,
1600
+ ) -> Optional[Union[List[SamplerOutput], IntermediateTensors]]:
1601
+ if num_steps > 1:
1602
+ raise ValueError("num_steps > 1 is not supported in ModelRunner")
1603
+
1604
+ if self.lora_config:
1605
+ assert model_input.lora_requests is not None
1606
+ assert model_input.lora_mapping is not None
1607
+ self.set_active_loras(model_input.lora_requests,
1608
+ model_input.lora_mapping)
1609
+
1610
+ self.attn_state.begin_forward(model_input)
1611
+
1612
+ # Currently cuda graph is only supported by the decode phase.
1613
+ assert model_input.attn_metadata is not None
1614
+ prefill_meta = model_input.attn_metadata.prefill_metadata
1615
+ decode_meta = model_input.attn_metadata.decode_metadata
1616
+ # TODO(andoorve): We can remove this once all
1617
+ # virtual engines share the same kv cache.
1618
+ virtual_engine = model_input.virtual_engine
1619
+ previous_hidden_states = kwargs.get("previous_hidden_states")
1620
+ if prefill_meta is None and decode_meta.use_cuda_graph:
1621
+ assert model_input.input_tokens is not None
1622
+ graph_batch_size = model_input.input_tokens.shape[0]
1623
+ use_inputs_embeds = model_input.inputs_embeds is not None
1624
+ model_executable = self.graph_runners[virtual_engine][(
1625
+ graph_batch_size, use_inputs_embeds)]
1626
+ if previous_hidden_states is not None:
1627
+ previous_hidden_states = torch.cat([
1628
+ previous_hidden_states,
1629
+ torch.empty([
1630
+ graph_batch_size - previous_hidden_states.shape[0],
1631
+ *previous_hidden_states.shape[1:]
1632
+ ],
1633
+ dtype=previous_hidden_states.dtype,
1634
+ device=previous_hidden_states.device)
1635
+ ])
1636
+ else:
1637
+ model_executable = self.model
1638
+
1639
+ # Receive KV cache in distributed KV cache transfer setting
1640
+ # In disagg prefill setting, it will also recv hidden states and bypass
1641
+ # model forwarding
1642
+ # In KV cache database setting, it will change the model input so that
1643
+ # we can skip prefilling on tokens that successfully received KV caches
1644
+ # NOTE: The receive operation is blocking
1645
+ bypass_model_exec = False
1646
+ if self.need_recv_kv(model_input, kv_caches):
1647
+ hidden_or_intermediate_states, bypass_model_exec, model_input = \
1648
+ get_kv_transfer_group().recv_kv_caches_and_hidden_states(
1649
+ # model is used to know which layer the current worker
1650
+ # is working on, so that we can receive KV for only those
1651
+ # layers.
1652
+ model_executable,
1653
+ model_input,
1654
+ kv_caches=kv_caches
1655
+ )
1656
+
1657
+ multi_modal_kwargs = model_input.multi_modal_kwargs or {}
1658
+ seqlen_agnostic_kwargs = {
1659
+ "finished_requests_ids": model_input.finished_requests_ids,
1660
+ "request_ids_to_seq_ids": model_input.request_ids_to_seq_ids,
1661
+ } if self.has_inner_state else {}
1662
+ model_kwargs = {}
1663
+ if previous_hidden_states is not None:
1664
+ model_kwargs["previous_hidden_states"] = previous_hidden_states
1665
+ if (self.observability_config is not None
1666
+ and self.observability_config.collect_model_forward_time):
1667
+ model_forward_start = torch.cuda.Event(enable_timing=True)
1668
+ model_forward_end = torch.cuda.Event(enable_timing=True)
1669
+ model_forward_start.record()
1670
+
1671
+ if not bypass_model_exec:
1672
+ with set_forward_context(model_input.attn_metadata,
1673
+ self.vllm_config, virtual_engine):
1674
+ hidden_or_intermediate_states = model_executable(
1675
+ input_ids=model_input.input_tokens,
1676
+ inputs_embeds=model_input.inputs_embeds,
1677
+ positions=model_input.input_positions,
1678
+ intermediate_tensors=intermediate_tensors,
1679
+ **MultiModalKwargs.as_kwargs(
1680
+ multi_modal_kwargs,
1681
+ device=self.device,
1682
+ ),
1683
+ **seqlen_agnostic_kwargs,
1684
+ **model_kwargs,
1685
+ )
1686
+
1687
+ if (self.observability_config is not None
1688
+ and self.observability_config.collect_model_forward_time):
1689
+ model_forward_end.record()
1690
+
1691
+ # Sending KV cache in distributed KV cache transfer setting
1692
+ # NOTE: the send operation is non-blocking
1693
+ if self.need_send_kv(model_input, kv_caches):
1694
+ get_kv_transfer_group().send_kv_caches_and_hidden_states(
1695
+ # model_executable is used to know which layer the current
1696
+ # worker is working on, so that we can send KV for only those
1697
+ # layers.
1698
+ model_executable,
1699
+ model_input,
1700
+ kv_caches,
1701
+ hidden_or_intermediate_states,
1702
+ )
1703
+
1704
+ # Compute the logits in the last pipeline stage.
1705
+ if not get_pp_group().is_last_rank:
1706
+ if (self.is_driver_worker
1707
+ and hidden_or_intermediate_states is not None
1708
+ and isinstance(hidden_or_intermediate_states,
1709
+ IntermediateTensors)
1710
+ and self.observability_config is not None
1711
+ and self.observability_config.collect_model_forward_time):
1712
+ model_forward_end.synchronize()
1713
+ model_forward_time = model_forward_start.elapsed_time(
1714
+ model_forward_end)
1715
+ orig_model_forward_time = 0.0
1716
+ if intermediate_tensors is not None:
1717
+ orig_model_forward_time = intermediate_tensors.tensors.get(
1718
+ "model_forward_time", torch.tensor(0.0)).item()
1719
+ hidden_or_intermediate_states.tensors["model_forward_time"] = (
1720
+ torch.tensor(model_forward_time + orig_model_forward_time))
1721
+ return hidden_or_intermediate_states
1722
+
1723
+ logits = self.model.compute_logits(hidden_or_intermediate_states,
1724
+ model_input.sampling_metadata)
1725
+
1726
+ if self.is_driver_worker:
1727
+ if model_input.async_callback is not None:
1728
+ model_input.async_callback()
1729
+
1730
+ # Sample the next token.
1731
+ assert isinstance(self.sampler, Sampler)
1732
+ orig_include_gpu_probs = self.sampler.include_gpu_probs_tensor
1733
+ if model_input.inputs_embeds is not None:
1734
+ self.sampler.include_gpu_probs_tensor = True
1735
+
1736
+ output: SamplerOutput = self.sampler(
1737
+ logits=logits,
1738
+ sampling_metadata=model_input.sampling_metadata,
1739
+ )
1740
+ if (self.observability_config is not None
1741
+ and self.observability_config.collect_model_forward_time
1742
+ and output is not None):
1743
+ model_forward_end.synchronize()
1744
+ model_forward_time = model_forward_start.elapsed_time(
1745
+ model_forward_end)
1746
+ orig_model_forward_time = 0.0
1747
+ if intermediate_tensors is not None:
1748
+ orig_model_forward_time = intermediate_tensors.tensors.get(
1749
+ "model_forward_time", torch.tensor(0.0)).item()
1750
+ # If there are multiple workers, we are still tracking the
1751
+ # latency from the start time of the driver worker to the end
1752
+ # time of the driver worker. The model forward time will then
1753
+ # end up covering the communication time as well.
1754
+ output.model_forward_time = (orig_model_forward_time +
1755
+ model_forward_time)
1756
+
1757
+ if model_input.inputs_embeds is not None:
1758
+ if self.is_driver_worker:
1759
+ sampled_token_ids = []
1760
+ valid_outputs = []
1761
+ for sequence_group_output in output.outputs:
1762
+ if len(sequence_group_output.samples) == 0:
1763
+ continue
1764
+ assert len(sequence_group_output.samples) == 1
1765
+ valid_outputs.append(sequence_group_output)
1766
+ sampled_token_ids.append(
1767
+ sequence_group_output.samples[0].output_token)
1768
+ sampled_token_ids = torch.tensor(sampled_token_ids).to(
1769
+ self.device)
1770
+ sampled_token_ids = broadcast_tensor_dict(
1771
+ {"sampled_token_ids":
1772
+ sampled_token_ids})["sampled_token_ids"]
1773
+ else:
1774
+ sampled_token_ids = broadcast_tensor_dict(
1775
+ )["sampled_token_ids"]
1776
+ if len(sampled_token_ids) > 0:
1777
+ sampled_token_embeds = \
1778
+ self.model.get_input_embeddings(sampled_token_ids)
1779
+ if self.is_driver_worker:
1780
+ self.sampler.include_gpu_probs_tensor = \
1781
+ orig_include_gpu_probs
1782
+ for i, sequence_group_output in enumerate(valid_outputs):
1783
+ sequence_group_output.samples[0].output_embed = \
1784
+ sampled_token_embeds[i]
1785
+
1786
+ if not self.is_driver_worker:
1787
+ return []
1788
+
1789
+ if self.return_hidden_states:
1790
+ # we only need to pass hidden states of most recent token
1791
+ assert model_input.sampling_metadata is not None
1792
+ indices = model_input.sampling_metadata.selected_token_indices
1793
+ if model_input.is_prompt:
1794
+ hidden_states = hidden_or_intermediate_states.index_select(
1795
+ 0, indices)
1796
+ output.prefill_hidden_states = hidden_or_intermediate_states
1797
+ elif decode_meta.use_cuda_graph:
1798
+ hidden_states = hidden_or_intermediate_states[:len(indices)]
1799
+ else:
1800
+ hidden_states = hidden_or_intermediate_states
1801
+
1802
+ output.hidden_states = hidden_states
1803
+
1804
+ return [output]
1805
+
1806
+ def need_recv_kv(self, model_input, kv_caches) -> bool:
1807
+ """Check if we need to receive kv-cache from the other worker.
1808
+ We need to receive KV when
1809
+ 1. current vLLM instance is KV cache consumer/decode vLLM instance
1810
+ 2. this batch is not a profiling run
1811
+ 3. this batch is a prefill run
1812
+
1813
+ Args:
1814
+ model_input: input to the model executable
1815
+ kv_caches: vLLM's paged memory
1816
+ """
1817
+
1818
+ if self.vllm_config.kv_transfer_config is None:
1819
+ return False
1820
+
1821
+ prefill_meta = model_input.attn_metadata.prefill_metadata
1822
+
1823
+ # check if the current run is profiling
1824
+ is_profile_run = (kv_caches[0].numel() == 0)
1825
+ # check if the current run is prefill
1826
+ is_prefill_run = prefill_meta is not None
1827
+
1828
+ return self.vllm_config.kv_transfer_config.is_kv_consumer and (
1829
+ not is_profile_run) and is_prefill_run
1830
+
1831
+ def need_send_kv(self, model_input, kv_caches) -> bool:
1832
+ """Check if we need to send kv-cache to the other worker.
1833
+ We need to send KV when
1834
+ 1. current vLLM instance is KV cache producer/prefill vLLM instance
1835
+ 2. this batch is not a profiling run
1836
+ 3. this batch is a prefill run
1837
+
1838
+ Args:
1839
+ model_input: input to the model executable
1840
+ kv_caches: vLLM's paged memory
1841
+ """
1842
+
1843
+ if self.vllm_config.kv_transfer_config is None:
1844
+ return False
1845
+
1846
+ prefill_meta = model_input.attn_metadata.prefill_metadata
1847
+
1848
+ # check if the current run is profiling
1849
+ is_profile_run = (kv_caches[0].numel() == 0)
1850
+ # check if the current run is prefill
1851
+ is_prefill_run = prefill_meta is not None
1852
+
1853
+ return self.vllm_config.kv_transfer_config.is_kv_producer and (
1854
+ not is_profile_run) and is_prefill_run
1855
+
1856
+
1857
+ # NOTE: this is nn.Module so the profiler can properly capture/group
1858
+ # kernels calls made within the graph
1859
+ class CUDAGraphRunner(nn.Module):
1860
+
1861
+ def __init__(self, model: nn.Module, backend_name: str,
1862
+ attn_state: AttentionState, is_encoder_decoder_model: bool):
1863
+ super().__init__()
1864
+ self.model = model
1865
+ self.backend_name = backend_name
1866
+ self.attn_state = attn_state
1867
+
1868
+ self.input_buffers: Dict[str, torch.Tensor] = {}
1869
+ self.output_buffers: Dict[str, torch.Tensor] = {}
1870
+
1871
+ self._graph: Optional[torch.cuda.CUDAGraph] = None
1872
+ self._is_encoder_decoder_model = is_encoder_decoder_model
1873
+
1874
+ @property
1875
+ def graph(self):
1876
+ assert self._graph is not None
1877
+ return self._graph
1878
+
1879
+ def capture(
1880
+ self,
1881
+ input_ids: torch.Tensor,
1882
+ inputs_embeds: Optional[torch.Tensor],
1883
+ positions: torch.Tensor,
1884
+ intermediate_inputs: Optional[IntermediateTensors],
1885
+ kv_caches: List[torch.Tensor],
1886
+ attn_metadata: AttentionMetadata,
1887
+ memory_pool: Optional[Tuple[int, int]],
1888
+ stream: torch.cuda.Stream,
1889
+ **kwargs,
1890
+ ):
1891
+ assert self._graph is None
1892
+ # Run the model a few times without capturing the graph.
1893
+ # This is to make sure that the captured graph does not include the
1894
+ # kernel launches for initial benchmarking (e.g., Triton autotune).
1895
+ # Note one iteration is not enough for torch.compile
1896
+ for _ in range(_NUM_WARMUP_ITERS):
1897
+ self.model(
1898
+ input_ids=input_ids,
1899
+ inputs_embeds=inputs_embeds,
1900
+ positions=positions,
1901
+ intermediate_tensors=intermediate_inputs,
1902
+ **kwargs,
1903
+ )
1904
+ # Wait for the warm up operations to finish before proceeding with
1905
+ # Graph Capture.
1906
+ torch.cuda.synchronize()
1907
+ # Capture the graph.
1908
+ self._graph = torch.cuda.CUDAGraph()
1909
+ with torch.cuda.graph(self._graph, pool=memory_pool, stream=stream):
1910
+ output_hidden_or_intermediate_states = self.model(
1911
+ input_ids=input_ids,
1912
+ **({
1913
+ "inputs_embeds": inputs_embeds,
1914
+ } if inputs_embeds is not None else {}),
1915
+ positions=positions,
1916
+ intermediate_tensors=intermediate_inputs,
1917
+ **kwargs,
1918
+ )
1919
+
1920
+ if isinstance(output_hidden_or_intermediate_states, torch.Tensor):
1921
+ hidden_or_intermediate_states = weak_ref_tensor(
1922
+ output_hidden_or_intermediate_states)
1923
+ elif isinstance(output_hidden_or_intermediate_states,
1924
+ IntermediateTensors):
1925
+ hidden_or_intermediate_states = IntermediateTensors(
1926
+ tensors={
1927
+ key: weak_ref_tensor(value)
1928
+ for key, value in
1929
+ output_hidden_or_intermediate_states.tensors.items()
1930
+ })
1931
+
1932
+ del output_hidden_or_intermediate_states
1933
+ # make sure `output_hidden_or_intermediate_states` is deleted
1934
+ # in the graph's memory pool
1935
+ gc.collect()
1936
+ torch.cuda.synchronize()
1937
+
1938
+ # Save the input and output buffers.
1939
+ self.input_buffers = {
1940
+ "input_ids":
1941
+ input_ids,
1942
+ **({
1943
+ "inputs_embeds": inputs_embeds,
1944
+ } if inputs_embeds is not None else {}),
1945
+ "positions":
1946
+ positions,
1947
+ "kv_caches":
1948
+ kv_caches,
1949
+ **self.attn_state.get_graph_input_buffers(
1950
+ attn_metadata, self._is_encoder_decoder_model),
1951
+ **kwargs,
1952
+ }
1953
+ if intermediate_inputs is not None:
1954
+ self.input_buffers.update(intermediate_inputs.tensors)
1955
+ if get_pp_group().is_last_rank:
1956
+ self.output_buffers = {
1957
+ "hidden_states": hidden_or_intermediate_states
1958
+ }
1959
+ else:
1960
+ self.output_buffers = hidden_or_intermediate_states
1961
+
1962
+ def forward(
1963
+ self,
1964
+ input_ids: torch.Tensor,
1965
+ inputs_embeds: Optional[torch.Tensor],
1966
+ positions: torch.Tensor,
1967
+ intermediate_tensors: Optional[IntermediateTensors],
1968
+ **kwargs,
1969
+ ) -> torch.Tensor:
1970
+ attn_metadata: AttentionMetadata = get_forward_context().attn_metadata
1971
+
1972
+ # Copy the input tensors to the input buffers.
1973
+ self.input_buffers["input_ids"].copy_(input_ids, non_blocking=True)
1974
+ if positions is not None:
1975
+ # in some case like MLA, it will reuse positions in metadata
1976
+ # but truncate them to the original size
1977
+ # so the shape is not padded, we need to copy partial only
1978
+ self.input_buffers["positions"][:positions.shape[0]].copy_(
1979
+ positions, non_blocking=True)
1980
+ if inputs_embeds is not None:
1981
+ self.input_buffers["inputs_embeds"][:inputs_embeds.shape[0]].copy_(
1982
+ inputs_embeds, non_blocking=True)
1983
+
1984
+ if self.backend_name != "NO_ATTENTION":
1985
+ self.input_buffers["slot_mapping"].copy_(
1986
+ attn_metadata.slot_mapping, non_blocking=True)
1987
+
1988
+ self.attn_state.prepare_graph_input_buffers(
1989
+ self.input_buffers, attn_metadata, self._is_encoder_decoder_model)
1990
+
1991
+ if "seqlen_agnostic_capture_inputs" in self.input_buffers:
1992
+ self.model.copy_inputs_before_cuda_graphs(self.input_buffers,
1993
+ **kwargs)
1994
+
1995
+ if "previous_hidden_states" in self.input_buffers:
1996
+ self.input_buffers["previous_hidden_states"].copy_(
1997
+ kwargs["previous_hidden_states"], non_blocking=True)
1998
+
1999
+ if intermediate_tensors is not None:
2000
+ for key in intermediate_tensors.tensors:
2001
+ if key != "model_execute_time" and key != "model_forward_time":
2002
+ self.input_buffers[key].copy_(intermediate_tensors[key],
2003
+ non_blocking=True)
2004
+ if self._is_encoder_decoder_model:
2005
+ self.input_buffers["encoder_input_ids"].copy_(
2006
+ kwargs['encoder_input_ids'], non_blocking=True)
2007
+ self.input_buffers["encoder_positions"].copy_(
2008
+ kwargs['encoder_positions'], non_blocking=True)
2009
+
2010
+ # Run the graph.
2011
+ self.graph.replay()
2012
+ # Return the output tensor.
2013
+ if get_pp_group().is_last_rank:
2014
+ return self.output_buffers["hidden_states"]
2015
+
2016
+ return self.output_buffers