vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1776 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import gc
4
+ import time
5
+ import weakref
6
+ from typing import TYPE_CHECKING, Optional, Union
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.distributed
11
+ import torch.nn as nn
12
+
13
+ from vllm.attention import AttentionType, get_attn_backend
14
+ from vllm.attention.layer import Attention
15
+ from vllm.config import (CompilationLevel, VllmConfig,
16
+ get_layers_from_vllm_config)
17
+ from vllm.distributed.kv_transfer import (get_kv_transfer_group,
18
+ has_kv_transfer_group)
19
+ from vllm.distributed.parallel_state import get_pp_group, graph_capture
20
+ from vllm.forward_context import set_forward_context
21
+ from vllm.logger import init_logger
22
+ from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding
23
+ from vllm.model_executor.model_loader import get_model
24
+ from vllm.multimodal import MULTIMODAL_REGISTRY
25
+ from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange
26
+ from vllm.multimodal.utils import group_mm_inputs_by_modality
27
+ from vllm.sampling_params import SamplingType
28
+ from vllm.sequence import IntermediateTensors
29
+ from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler,
30
+ GiB_bytes, LayerBlockType, LazyLoader, cdiv,
31
+ check_use_alibi, is_pin_memory_available)
32
+ from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
33
+ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
34
+ from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
35
+ KVCacheConfig, KVCacheSpec,
36
+ SlidingWindowSpec)
37
+ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors,
38
+ ModelRunnerOutput)
39
+ from vllm.v1.sample.metadata import SamplingMetadata
40
+ from vllm.v1.sample.rejection_sampler import RejectionSampler
41
+ from vllm.v1.sample.sampler import Sampler
42
+ from vllm.v1.spec_decode.eagle import EagleProposer
43
+ from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
44
+ from vllm.v1.spec_decode.ngram_proposer import NgramProposer
45
+ from vllm.v1.spec_decode.utils import is_spec_decode_supported
46
+ from vllm.v1.utils import bind_kv_cache
47
+ from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
48
+ from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
49
+
50
+ from .utils import (gather_mm_placeholders, sanity_check_mm_encoder_outputs,
51
+ scatter_mm_placeholders)
52
+
53
+ if TYPE_CHECKING:
54
+ import xgrammar as xgr
55
+
56
+ from vllm.v1.core.sched.output import SchedulerOutput
57
+ else:
58
+ xgr = LazyLoader("xgr", globals(), "xgrammar")
59
+
60
+ logger = init_logger(__name__)
61
+
62
+
63
+ class GPUModelRunner(LoRAModelRunnerMixin):
64
+
65
+ def __init__(
66
+ self,
67
+ vllm_config: VllmConfig,
68
+ device: torch.device,
69
+ ):
70
+ self.vllm_config = vllm_config
71
+ self.model_config = vllm_config.model_config
72
+ self.cache_config = vllm_config.cache_config
73
+ self.lora_config = vllm_config.lora_config
74
+ self.load_config = vllm_config.load_config
75
+ self.parallel_config = vllm_config.parallel_config
76
+ self.scheduler_config = vllm_config.scheduler_config
77
+ self.speculative_config = vllm_config.speculative_config
78
+ self.prompt_adapter_config = vllm_config.prompt_adapter_config
79
+ self.observability_config = vllm_config.observability_config
80
+
81
+ from vllm.model_executor.models.utils import set_cpu_offload_max_bytes
82
+ set_cpu_offload_max_bytes(
83
+ int(self.cache_config.cpu_offload_gb * 1024**3))
84
+
85
+ model_config = self.model_config
86
+ cache_config = self.cache_config
87
+ scheduler_config = self.scheduler_config
88
+ parallel_config = self.parallel_config
89
+ self.device = device
90
+ self.pin_memory = is_pin_memory_available()
91
+ self.dtype = self.model_config.dtype
92
+ if cache_config.cache_dtype == "auto":
93
+ self.kv_cache_dtype = self.dtype
94
+ else:
95
+ self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
96
+ cache_config.cache_dtype]
97
+
98
+ # NOTE(woosuk): sliding_window is None for models with interleaved
99
+ # attention. Use interleaved_sliding_window instead.
100
+ self.sliding_window = model_config.get_sliding_window()
101
+ self.interleaved_sliding_window = getattr(
102
+ model_config.hf_text_config, "interleaved_sliding_window", None)
103
+ self.window_size = (self.sliding_window
104
+ or self.interleaved_sliding_window)
105
+
106
+ self.is_multimodal_model = model_config.is_multimodal_model
107
+ self.block_size = cache_config.block_size
108
+ self.max_model_len = model_config.max_model_len
109
+ self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
110
+ self.max_num_tokens = scheduler_config.max_num_batched_tokens
111
+ self.max_num_reqs = scheduler_config.max_num_seqs
112
+
113
+ # Model-related.
114
+ self.num_attn_layers = model_config.get_num_layers_by_block_type(
115
+ parallel_config, LayerBlockType.attention)
116
+ self.num_query_heads = model_config.get_num_attention_heads(
117
+ parallel_config)
118
+ self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
119
+ self.head_size = model_config.get_head_size()
120
+ self.hidden_size = model_config.get_hidden_size()
121
+ self.attention_chunk_size = model_config.attention_chunk_size
122
+
123
+ self.attn_backend = get_attn_backend(
124
+ self.head_size,
125
+ self.dtype,
126
+ self.kv_cache_dtype,
127
+ self.block_size,
128
+ self.model_config.is_attention_free,
129
+ use_mla=self.model_config.use_mla,
130
+ )
131
+ if self.attn_backend is None:
132
+ error_msg = (
133
+ f"Error with get_att_backend: {self.head_size=}, "
134
+ f"{self.dtype=}, {self.kv_cache_dtype=}, {self.block_size=}, "
135
+ f"{self.model_config.is_attention_free=}, "
136
+ f"{self.model_config.use_mla=}")
137
+ logger.error(error_msg)
138
+ raise NotImplementedError(
139
+ "Non-Attention backend is not supported by V1 GPUModelRunner.")
140
+
141
+ self.attn_metadata_builder = self.attn_backend.get_builder_cls()(
142
+ weakref.proxy(self))
143
+ self.cascade_attn_enabled = not self.model_config.disable_cascade_attn
144
+
145
+ # Multi-modal data support
146
+ self.mm_registry = MULTIMODAL_REGISTRY
147
+ self.uses_mrope = model_config.uses_mrope
148
+
149
+ encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
150
+ model_config=model_config,
151
+ scheduler_config=scheduler_config,
152
+ mm_registry=self.mm_registry,
153
+ )
154
+ self.max_num_encoder_input_tokens = encoder_compute_budget
155
+ self.encoder_cache_size = encoder_cache_size
156
+
157
+ # Sampler
158
+ self.sampler = Sampler()
159
+
160
+ # Lazy initialization
161
+ # self.model: nn.Module # Set after load_model
162
+ self.kv_caches: list[torch.Tensor] = []
163
+ # req_id -> (input_id -> encoder_output)
164
+ self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
165
+
166
+ # Set up speculative decoding.
167
+ self.use_spec_decode = False
168
+ self.use_aux_hidden_state_outputs = False
169
+ if self.speculative_config:
170
+ self.use_spec_decode = True
171
+ if get_pp_group().is_last_rank:
172
+ if self.speculative_config.method == "ngram":
173
+ self.drafter = NgramProposer(self.vllm_config)
174
+ elif self.speculative_config.use_eagle():
175
+ self.drafter = EagleProposer(self.vllm_config,
176
+ self.device) # type: ignore
177
+ if self.speculative_config.method == "eagle3":
178
+ self.use_aux_hidden_state_outputs = True
179
+ else:
180
+ raise ValueError("Unknown speculative decoding method: "
181
+ f"{self.speculative_config.method}")
182
+ self.rejection_sampler = RejectionSampler()
183
+
184
+ # Request states.
185
+ self.requests: dict[str, CachedRequestState] = {}
186
+ # Persistent batch.
187
+ self.input_batch = InputBatch(
188
+ max_num_reqs=self.max_num_reqs,
189
+ max_model_len=self.max_model_len,
190
+ max_num_blocks_per_req=self.max_num_blocks_per_req,
191
+ device=self.device,
192
+ pin_memory=self.pin_memory,
193
+ vocab_size=model_config.get_vocab_size(),
194
+ )
195
+
196
+ self.use_cuda_graph = (self.vllm_config.compilation_config.level
197
+ == CompilationLevel.PIECEWISE
198
+ and not self.model_config.enforce_eager)
199
+ # TODO(woosuk): Provide an option to tune the max cudagraph batch size.
200
+ # The convention is different.
201
+ # self.cudagraph_batch_sizes sorts in ascending order.
202
+ # The batch sizes in the config are in descending order.
203
+ self.cudagraph_batch_sizes = list(
204
+ reversed(
205
+ self.vllm_config.compilation_config.cudagraph_capture_sizes))
206
+
207
+ # Cache the device properties.
208
+ self.device_properties = torch.cuda.get_device_properties(self.device)
209
+ self.num_sms = self.device_properties.multi_processor_count
210
+
211
+ # Persistent buffers for CUDA graphs.
212
+ self.input_ids = torch.zeros(self.max_num_tokens,
213
+ dtype=torch.int32,
214
+ device=self.device)
215
+ self.positions = torch.zeros(self.max_num_tokens,
216
+ dtype=torch.int64,
217
+ device=self.device)
218
+ # None in the first PP rank. The rest are set after load_model.
219
+ self.intermediate_tensors: Optional[IntermediateTensors] = None
220
+
221
+ # Only relevant for models using M-RoPE (e.g, Qwen2-VL)
222
+ if self.uses_mrope:
223
+ # NOTE: `mrope_positions` is implemented with one additional dummy
224
+ # position on purpose to make it non-contiguous so that it can work
225
+ # with torch compile.
226
+ # See detailed explanation in https://github.com/vllm-project/vllm/pull/12128#discussion_r1926431923
227
+
228
+ # NOTE: When M-RoPE is enabled, position ids are 3D regardless of
229
+ # the modality of inputs. For text-only inputs, each dimension has
230
+ # identical position IDs, making M-RoPE functionally equivalent to
231
+ # 1D-RoPE.
232
+ # See page 5 of https://arxiv.org/abs/2409.12191
233
+ self.mrope_positions = torch.zeros((3, self.max_num_tokens + 1),
234
+ dtype=torch.int64,
235
+ device=self.device)
236
+ self.mrope_positions_cpu = torch.zeros(
237
+ (3, self.max_num_tokens + 1),
238
+ dtype=torch.int64,
239
+ device="cpu",
240
+ pin_memory=self.pin_memory)
241
+
242
+ # Only relevant for models using ALiBi (e.g, MPT)
243
+ self.use_alibi = check_use_alibi(model_config)
244
+
245
+ self.inputs_embeds = torch.zeros(
246
+ (self.max_num_tokens, self.hidden_size),
247
+ dtype=self.dtype,
248
+ device=self.device)
249
+
250
+ # OPTIMIZATION: Cache the tensors rather than creating them every step.
251
+ # Keep in int64 to avoid overflow with long context
252
+ self.arange_np = np.arange(max(self.max_num_reqs + 1,
253
+ self.max_model_len,
254
+ self.max_num_tokens),
255
+ dtype=np.int64)
256
+ # NOTE(woosuk): These tensors are "stateless", i.e., they are literally
257
+ # a faster version of creating a new tensor every time. Thus, we should
258
+ # not make any assumptions about the values in these tensors.
259
+ self.input_ids_cpu = torch.zeros(self.max_num_tokens,
260
+ dtype=torch.int32,
261
+ device="cpu",
262
+ pin_memory=self.pin_memory)
263
+ self.input_ids_np = self.input_ids_cpu.numpy()
264
+ self.positions_cpu = torch.zeros(self.max_num_tokens,
265
+ dtype=torch.int64,
266
+ device="cpu",
267
+ pin_memory=self.pin_memory)
268
+ self.positions_np = self.positions_cpu.numpy()
269
+ self.slot_mapping_cpu = torch.zeros(self.max_num_tokens,
270
+ dtype=torch.int32,
271
+ device="cpu",
272
+ pin_memory=self.pin_memory)
273
+ self.slot_mapping_np = self.slot_mapping_cpu.numpy()
274
+ self.query_start_loc_cpu = torch.zeros(self.max_num_reqs + 1,
275
+ dtype=torch.int32,
276
+ device="cpu",
277
+ pin_memory=self.pin_memory)
278
+ self.query_start_loc_np = self.query_start_loc_cpu.numpy()
279
+ self.seq_lens_cpu = torch.zeros(self.max_num_reqs,
280
+ dtype=torch.int32,
281
+ device="cpu",
282
+ pin_memory=self.pin_memory)
283
+ self.seq_lens_np = self.seq_lens_cpu.numpy()
284
+
285
+ def _update_states(self, scheduler_output: "SchedulerOutput") -> None:
286
+ """Update the cached states and the persistent batch with the scheduler
287
+ output.
288
+
289
+ The updated states are used by the `_prepare_inputs` function to create
290
+ the input GPU tensors for the model.
291
+
292
+ The SamplingMetadata is updated and copied to the GPU if there is a
293
+ new/resumed/paused/finished request in the batch.
294
+ """
295
+ # Remove finished requests from the cached states.
296
+ for req_id in scheduler_output.finished_req_ids:
297
+ self.requests.pop(req_id, None)
298
+ self.encoder_cache.pop(req_id, None)
299
+ # Remove the finished requests from the persistent batch.
300
+ # NOTE(woosuk): There could be an edge case where finished_req_ids and
301
+ # scheduled_req_ids overlap. This happens when a request is aborted and
302
+ # then resubmitted with the same ID. In this case, we treat them as two
303
+ # distinct requests - clearing the cached states for the first request
304
+ # and handling the second as a new request.
305
+ removed_req_indices: list[int] = []
306
+ for req_id in scheduler_output.finished_req_ids:
307
+ req_index = self.input_batch.remove_request(req_id)
308
+ if req_index is not None:
309
+ removed_req_indices.append(req_index)
310
+
311
+ # Free the cached encoder outputs.
312
+ for req_id, input_id in scheduler_output.free_encoder_input_ids:
313
+ encoder_outputs = self.encoder_cache.get(req_id)
314
+ if encoder_outputs is not None:
315
+ encoder_outputs.pop(input_id, None)
316
+ if not encoder_outputs:
317
+ self.encoder_cache.pop(req_id, None)
318
+
319
+ # Remove the unscheduled requests from the persistent batch.
320
+ # NOTE(woosuk): The unscheduled requests are either preempted requests
321
+ # or running requests that are not scheduled in this step. We remove
322
+ # them from the persistent batch but keep their cached states since
323
+ # they will be scheduled again sometime in the future.
324
+ scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
325
+ cached_req_ids = self.input_batch.req_id_to_index.keys()
326
+ unscheduled_req_ids = cached_req_ids - scheduled_req_ids
327
+ # NOTE(woosuk): The persistent batch optimization assumes that
328
+ # consecutive batches contain mostly the same requests. If batches
329
+ # have low request overlap (e.g., alternating between two distinct
330
+ # sets of requests), this optimization becomes very inefficient.
331
+ for req_id in unscheduled_req_ids:
332
+ req_index = self.input_batch.remove_request(req_id)
333
+ assert req_index is not None
334
+ removed_req_indices.append(req_index)
335
+
336
+ req_ids_to_add: list[str] = []
337
+ # Add new requests to the cached states.
338
+ for new_req_data in scheduler_output.scheduled_new_reqs:
339
+ req_id = new_req_data.req_id
340
+ sampling_params = new_req_data.sampling_params
341
+ if sampling_params.sampling_type == SamplingType.RANDOM_SEED:
342
+ generator = torch.Generator(device=self.device)
343
+ generator.manual_seed(sampling_params.seed)
344
+ else:
345
+ generator = None
346
+
347
+ self.requests[req_id] = CachedRequestState(
348
+ req_id=req_id,
349
+ prompt_token_ids=new_req_data.prompt_token_ids,
350
+ mm_inputs=new_req_data.mm_inputs,
351
+ mm_positions=new_req_data.mm_positions,
352
+ sampling_params=sampling_params,
353
+ generator=generator,
354
+ block_ids=new_req_data.block_ids,
355
+ num_computed_tokens=new_req_data.num_computed_tokens,
356
+ output_token_ids=[],
357
+ lora_request=new_req_data.lora_request,
358
+ )
359
+
360
+ # Only relevant for models using M-RoPE (e.g, Qwen2-VL)
361
+ if self.uses_mrope:
362
+ image_grid_thw = []
363
+ video_grid_thw = []
364
+ second_per_grid_ts = []
365
+ audio_feature_lengths = []
366
+ use_audio_in_video = False
367
+ for mm_input in self.requests[req_id].mm_inputs:
368
+ if mm_input.get("image_grid_thw") is not None:
369
+ image_grid_thw.extend(
370
+ mm_input["image_grid_thw"].tolist())
371
+ if mm_input.get("video_grid_thw") is not None:
372
+ video_grid_thw.extend(
373
+ mm_input["video_grid_thw"].tolist())
374
+ if mm_input.get("second_per_grid_ts") is not None:
375
+ second_per_grid_ts.extend(
376
+ mm_input["second_per_grid_ts"])
377
+ if mm_input.get("audio_feature_lengths") is not None:
378
+ audio_feature_lengths.extend(
379
+ mm_input["audio_feature_lengths"])
380
+ if mm_input.get("use_audio_in_video") is True:
381
+ use_audio_in_video = True
382
+
383
+ hf_config = self.model_config.hf_config
384
+
385
+ self.requests[req_id].mrope_positions, \
386
+ self.requests[req_id].mrope_position_delta = \
387
+ MRotaryEmbedding.get_input_positions_tensor(
388
+ self.requests[req_id].prompt_token_ids,
389
+ hf_config=hf_config,
390
+ image_grid_thw=image_grid_thw,
391
+ video_grid_thw=video_grid_thw,
392
+ second_per_grid_ts=second_per_grid_ts,
393
+ audio_feature_lengths=audio_feature_lengths,
394
+ use_audio_in_video=use_audio_in_video,
395
+ )
396
+
397
+ req_ids_to_add.append(req_id)
398
+
399
+ # Update the states of the running/resumed requests.
400
+ for req_data in scheduler_output.scheduled_cached_reqs:
401
+ req_id = req_data.req_id
402
+ req_state = self.requests[req_id]
403
+
404
+ # Update the cached states.
405
+ num_computed_tokens = req_data.num_computed_tokens
406
+ req_state.num_computed_tokens = num_computed_tokens
407
+ # Add the sampled token(s) from the previous step (if any).
408
+ # This doesn't include "unverified" tokens like spec decode tokens.
409
+ num_new_tokens = (num_computed_tokens +
410
+ len(req_data.new_token_ids) -
411
+ req_state.num_tokens)
412
+ if num_new_tokens == 1:
413
+ # Avoid slicing list in most common case.
414
+ req_state.output_token_ids.append(req_data.new_token_ids[-1])
415
+ elif num_new_tokens > 0:
416
+ req_state.output_token_ids.extend(
417
+ req_data.new_token_ids[-num_new_tokens:])
418
+ # Update the block IDs.
419
+ if not req_data.resumed_from_preemption:
420
+ # Append the new blocks to the existing block IDs.
421
+ req_state.block_ids.extend(req_data.new_block_ids)
422
+ else:
423
+ # The request is resumed from preemption.
424
+ # Replace the existing block IDs with the new ones.
425
+ req_state.block_ids = req_data.new_block_ids
426
+
427
+ req_index = self.input_batch.req_id_to_index.get(req_id)
428
+ if req_index is None:
429
+ # The request is not in the persistent batch.
430
+ # The request was either preempted and resumed later, or was not
431
+ # scheduled in the previous step and needs to be added again.
432
+ req_ids_to_add.append(req_id)
433
+ continue
434
+
435
+ # Update the persistent batch.
436
+ self.input_batch.num_computed_tokens_cpu[req_index] = (
437
+ num_computed_tokens)
438
+ self.input_batch.block_table.append_row(req_data.new_block_ids,
439
+ req_index)
440
+ # Add new_token_ids to token_ids_cpu.
441
+ start_token_index = num_computed_tokens
442
+ end_token_index = num_computed_tokens + len(req_data.new_token_ids)
443
+ self.input_batch.token_ids_cpu[
444
+ req_index,
445
+ start_token_index:end_token_index] = req_data.new_token_ids
446
+ self.input_batch.num_tokens_no_spec[req_index] = end_token_index
447
+ # Add spec_token_ids to token_ids_cpu.
448
+ spec_token_ids = scheduler_output.scheduled_spec_decode_tokens.get(
449
+ req_id, ())
450
+ if spec_token_ids:
451
+ start_index = end_token_index
452
+ end_token_index += len(spec_token_ids)
453
+ self.input_batch.token_ids_cpu[
454
+ req_index, start_index:end_token_index] = spec_token_ids
455
+ # NOTE(woosuk): `num_tokens` here may include spec decode tokens.
456
+ self.input_batch.num_tokens[req_index] = end_token_index
457
+
458
+ # Check if the batch has changed. If not, we can skip copying the
459
+ # sampling metadata from CPU to GPU.
460
+ batch_changed = len(removed_req_indices) > 0 or len(req_ids_to_add) > 0
461
+
462
+ # Add the new or resumed requests to the persistent batch.
463
+ # The smaller empty indices are filled first.
464
+ removed_req_indices.sort(reverse=True)
465
+ for req_id in req_ids_to_add:
466
+ req_state = self.requests[req_id]
467
+ if removed_req_indices:
468
+ # Fill the empty index.
469
+ req_index = removed_req_indices.pop()
470
+ else:
471
+ # Append to the end.
472
+ req_index = None
473
+ self.input_batch.add_request(req_state, req_index)
474
+
475
+ # Condense the batched states if there are empty indices.
476
+ if removed_req_indices:
477
+ self.input_batch.condense(removed_req_indices)
478
+
479
+ # Some attention backends (namely MLA) may want to separate requests
480
+ # based on if the attention computation will be compute-bound or
481
+ # memory-bound. This gives them a hook to do that.
482
+ batch_reordered = self.attn_metadata_builder.reorder_batch(
483
+ self.input_batch, scheduler_output)
484
+
485
+ if batch_changed or batch_reordered:
486
+ self.input_batch.refresh_sampling_metadata()
487
+
488
+ def _prepare_inputs(
489
+ self,
490
+ scheduler_output: "SchedulerOutput",
491
+ ) -> tuple[FlashAttentionMetadata, torch.Tensor,
492
+ Optional[SpecDecodeMetadata]]:
493
+ total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
494
+ assert total_num_scheduled_tokens > 0
495
+ num_reqs = self.input_batch.num_reqs
496
+ assert num_reqs > 0
497
+
498
+ # OPTIMIZATION: Start copying the block table first.
499
+ # This way, we can overlap the copy with the following CPU operations.
500
+ self.input_batch.block_table.commit(num_reqs)
501
+
502
+ # Get the number of scheduled tokens for each request.
503
+ req_ids = self.input_batch.req_ids
504
+ tokens = [scheduler_output.num_scheduled_tokens[i] for i in req_ids]
505
+ num_scheduled_tokens = np.array(tokens, dtype=np.int32)
506
+ max_num_scheduled_tokens = max(tokens)
507
+
508
+ # Get request indices.
509
+ # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
510
+ req_indices = np.repeat(self.arange_np[:num_reqs],
511
+ num_scheduled_tokens)
512
+
513
+ # Get batched arange.
514
+ # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
515
+ # Equivalent to but faster than:
516
+ # np.concatenate([np.arange(n) for n in num_scheduled_tokens])
517
+ # Step 1. [2, 5, 3] -> [2, 7, 10]
518
+ cu_num_tokens = np.cumsum(num_scheduled_tokens)
519
+ # Step 2. [2, 7, 10] -> [0, 0, 2, 2, 2, 2, 2, 7, 7, 7]
520
+ cumsums_offsets = np.repeat(cu_num_tokens - num_scheduled_tokens,
521
+ num_scheduled_tokens)
522
+ # Step 3. [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
523
+ arange = self.arange_np[:total_num_scheduled_tokens] - cumsums_offsets
524
+
525
+ # Get positions.
526
+ positions_np = self.positions_np[:total_num_scheduled_tokens]
527
+ np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
528
+ arange,
529
+ out=positions_np)
530
+
531
+ # Calculate M-RoPE positions.
532
+ # Only relevant for models using M-RoPE (e.g, Qwen2-VL)
533
+ if self.uses_mrope:
534
+ self._calc_mrope_positions(scheduler_output)
535
+
536
+ # Get token indices.
537
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
538
+ # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
539
+ # where M is the max_model_len.
540
+ token_indices = (positions_np +
541
+ req_indices * self.input_batch.token_ids_cpu.shape[1])
542
+
543
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
544
+ # because torch.index_select is much faster than np.take for large
545
+ # tensors.
546
+ torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
547
+ 0,
548
+ torch.from_numpy(token_indices),
549
+ out=self.input_ids_cpu[:total_num_scheduled_tokens])
550
+
551
+ # Calculate the slot mapping.
552
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
553
+ # -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1]
554
+ # where K is the max_num_blocks_per_req and the block size is 2.
555
+ # NOTE(woosuk): We can't simply use `token_indices // block_size` here
556
+ # because M (max_model_len) is not necessarily divisible by block_size.
557
+ block_table_indices = (req_indices * self.max_num_blocks_per_req +
558
+ positions_np // self.block_size)
559
+ block_table_cpu = self.input_batch.block_table.get_cpu_tensor()
560
+ block_numbers = block_table_cpu.flatten()[block_table_indices].numpy()
561
+ block_offsets = positions_np % self.block_size
562
+ np.add(block_numbers * self.block_size,
563
+ block_offsets,
564
+ out=self.slot_mapping_np[:total_num_scheduled_tokens])
565
+
566
+ # Prepare the attention metadata.
567
+ self.query_start_loc_np[0] = 0
568
+ self.query_start_loc_np[1:num_reqs + 1] = cu_num_tokens
569
+
570
+ self.seq_lens_np[:num_reqs] = (
571
+ self.input_batch.num_computed_tokens_cpu[:num_reqs] +
572
+ num_scheduled_tokens)
573
+
574
+ # Copy the tensors to the GPU.
575
+ self.input_ids[:total_num_scheduled_tokens].copy_(
576
+ self.input_ids_cpu[:total_num_scheduled_tokens], non_blocking=True)
577
+ if self.uses_mrope:
578
+ # Only relevant for models using M-RoPE (e.g, Qwen2-VL)
579
+ self.mrope_positions[:, :total_num_scheduled_tokens].copy_(
580
+ self.mrope_positions_cpu[:, :total_num_scheduled_tokens],
581
+ non_blocking=True)
582
+ else:
583
+ # Common case (1D positions)
584
+ self.positions[:total_num_scheduled_tokens].copy_(
585
+ self.positions_cpu[:total_num_scheduled_tokens],
586
+ non_blocking=True)
587
+
588
+ # Prepare for cascade attention if enabled & beneficial.
589
+ common_prefix_len = 0
590
+ if self.cascade_attn_enabled:
591
+ common_prefix_len = self._compute_cascade_attn_prefix_len(
592
+ num_scheduled_tokens,
593
+ scheduler_output.num_common_prefix_blocks,
594
+ )
595
+
596
+ attn_metadata = self.attn_metadata_builder.build(
597
+ num_reqs=num_reqs,
598
+ num_actual_tokens=total_num_scheduled_tokens,
599
+ max_query_len=max_num_scheduled_tokens,
600
+ common_prefix_len=common_prefix_len,
601
+ )
602
+
603
+ use_spec_decode = len(
604
+ scheduler_output.scheduled_spec_decode_tokens) > 0
605
+ if not use_spec_decode:
606
+ # NOTE(woosuk): Due to chunked prefills, the batch may contain
607
+ # partial requests. While we should not sample any token
608
+ # from these partial requests, we do so for simplicity.
609
+ # We will ignore the sampled tokens from the partial requests.
610
+ # TODO: Support prompt logprobs.
611
+ logits_indices = attn_metadata.query_start_loc[1:] - 1
612
+ spec_decode_metadata = None
613
+ else:
614
+ # Get the number of draft tokens for each request.
615
+ # Iterate over the dictionary rather than all requests since not all
616
+ # requests have draft tokens.
617
+ num_draft_tokens = np.zeros(num_reqs, dtype=np.int32)
618
+ for req_id, draft_token_ids in (
619
+ scheduler_output.scheduled_spec_decode_tokens.items()):
620
+ req_idx = self.input_batch.req_id_to_index[req_id]
621
+ num_draft_tokens[req_idx] = len(draft_token_ids)
622
+
623
+ spec_decode_metadata = self._calc_spec_decode_metadata(
624
+ num_draft_tokens, cu_num_tokens)
625
+ logits_indices = spec_decode_metadata.logits_indices
626
+
627
+ # Hot-Swap lora model
628
+ if self.lora_config:
629
+ self.set_active_loras(self.input_batch, num_scheduled_tokens)
630
+
631
+ return attn_metadata, logits_indices, spec_decode_metadata
632
+
633
+ def _compute_cascade_attn_prefix_len(
634
+ self,
635
+ num_scheduled_tokens: np.ndarray,
636
+ num_common_prefix_blocks: int,
637
+ ) -> int:
638
+ """Compute the length of the common prefix for cascade attention.
639
+
640
+ NOTE(woosuk): The common prefix length returned by this function
641
+ represents the length used specifically for cascade attention, not the
642
+ actual number of tokens shared between requests. When cascade attention
643
+ is disabled (use_cascade=False), this function returns 0 even if
644
+ requests share common tokens. Additionally, the common prefix length is
645
+ truncated to a multiple of the block size and may be further truncated
646
+ due to implementation details explained below.
647
+
648
+ Args:
649
+ num_scheduled_tokens: Number of tokens scheduled per request.
650
+ num_common_prefix_blocks: Number of shared KV cache blocks.
651
+
652
+ Returns:
653
+ int: Length of common prefix in tokens.
654
+ """
655
+ common_prefix_len = num_common_prefix_blocks * self.block_size
656
+ if common_prefix_len == 0:
657
+ # Common case.
658
+ return 0
659
+
660
+ # NOTE(woosuk): Cascade attention uses two attention kernels: one
661
+ # for the common prefix and the other for the rest. For the first
662
+ # kernel, we concatenate all the query tokens (possibly from
663
+ # different requests) and treat them as if they are from the same
664
+ # request. Then, we use bi-directional attention to process the
665
+ # common prefix in the KV cache. Importantly, this means that the
666
+ # first kernel does not do any masking.
667
+
668
+ # Consider the following example:
669
+ # Request 1's input query: [D, E, X]
670
+ # Request 1's kv cache: [A, B, C, D, E, X]
671
+ # Request 1's num_computed_tokens: 3 (i.e., [A, B, C])
672
+ # Request 2's input query: [E, Y]
673
+ # Request 2's kv cache: [A, B, C, D, E, Y]
674
+ # Request 2's num_computed_tokens: 4 (i.e., [A, B, C, D])
675
+
676
+ # If we use [A, B, C, D, E] as the common prefix, then the
677
+ # first kernel will compute the bi-directional attention between
678
+ # input query [D, E, X, E, Y] and common prefix [A, B, C, D, E].
679
+ # However, this is wrong because D in Request 1 should not attend to
680
+ # E in the common prefix (i.e., we need masking).
681
+ # To avoid this, [A, B, C, D] should be the common prefix.
682
+ # That is, the common prefix should be capped by the minimum
683
+ # num_computed_tokens among the requests, and plus one to include
684
+ # the first token of the query.
685
+
686
+ # In practice, we use [A, B, C] as the common prefix, instead of
687
+ # [A, B, C, D] (i.e., the common prefix is capped by the minimum
688
+ # num_computed_tokens, without plus one).
689
+ # This is because of an implementation detail: We want to always
690
+ # use two kernels for cascade attention. Let's imagine:
691
+ # Request 3's input query: [D]
692
+ # Request 3's kv cache: [A, B, C, D]
693
+ # Request 3's num_computed_tokens: 3 (i.e., [A, B, C])
694
+ # If we use [A, B, C, D] as the common prefix for Request 1-3,
695
+ # then Request 3 will be processed only by the first kernel,
696
+ # and the second kernel will get an empty input. While this is not
697
+ # a fundamental problem, our current implementation does not support
698
+ # this case.
699
+ num_reqs = len(num_scheduled_tokens)
700
+ common_prefix_len = min(
701
+ common_prefix_len,
702
+ self.input_batch.num_computed_tokens_cpu[:num_reqs].min())
703
+ # common_prefix_len should be a multiple of the block size.
704
+ common_prefix_len = (common_prefix_len // self.block_size *
705
+ self.block_size)
706
+ use_cascade = self.attn_metadata_builder.use_cascade_attention(
707
+ common_prefix_len=common_prefix_len,
708
+ query_lens=num_scheduled_tokens,
709
+ num_query_heads=self.num_query_heads,
710
+ num_kv_heads=self.num_kv_heads,
711
+ use_alibi=self.use_alibi,
712
+ use_sliding_window=self.window_size is not None,
713
+ num_sms=self.num_sms,
714
+ )
715
+ return common_prefix_len if use_cascade else 0
716
+
717
+ def _calc_mrope_positions(self, scheduler_output: "SchedulerOutput"):
718
+ mrope_pos_ptr = 0
719
+ for index, req_id in enumerate(self.input_batch.req_ids):
720
+ req = self.requests[req_id]
721
+ assert req.mrope_positions is not None
722
+
723
+ num_computed_tokens = \
724
+ self.input_batch.num_computed_tokens_cpu[index]
725
+ num_scheduled_tokens = \
726
+ scheduler_output.num_scheduled_tokens[req_id]
727
+ num_prompt_tokens = len(req.prompt_token_ids)
728
+
729
+ if num_computed_tokens + num_scheduled_tokens > num_prompt_tokens:
730
+ prompt_part_len = max(0,
731
+ num_prompt_tokens - num_computed_tokens)
732
+ completion_part_len = max(
733
+ 0, num_scheduled_tokens - prompt_part_len)
734
+ else:
735
+ prompt_part_len = num_scheduled_tokens
736
+ completion_part_len = 0
737
+
738
+ assert num_scheduled_tokens == prompt_part_len + completion_part_len
739
+
740
+ if prompt_part_len > 0:
741
+ # prompt's mrope_positions are pre-computed
742
+ dst_start = mrope_pos_ptr
743
+ dst_end = mrope_pos_ptr + prompt_part_len
744
+ src_start = num_computed_tokens
745
+ src_end = num_computed_tokens + prompt_part_len
746
+
747
+ self.mrope_positions_cpu[:, dst_start:dst_end] = \
748
+ req.mrope_positions[:,src_start:src_end]
749
+
750
+ mrope_pos_ptr += prompt_part_len
751
+
752
+ if completion_part_len > 0:
753
+ # compute completion's mrope_positions on-the-fly
754
+ dst_start = mrope_pos_ptr
755
+ dst_end = mrope_pos_ptr + completion_part_len
756
+
757
+ self.mrope_positions_cpu[:, dst_start:dst_end] = \
758
+ MRotaryEmbedding.get_next_input_positions_tensor(
759
+ req.mrope_position_delta,
760
+ context_len=num_computed_tokens +
761
+ prompt_part_len,
762
+ seq_len=num_computed_tokens +
763
+ prompt_part_len +
764
+ completion_part_len,
765
+ )
766
+
767
+ mrope_pos_ptr += completion_part_len
768
+
769
+ def _calc_spec_decode_metadata(
770
+ self,
771
+ num_draft_tokens: np.ndarray,
772
+ cu_num_scheduled_tokens: np.ndarray,
773
+ ) -> SpecDecodeMetadata:
774
+ # Inputs:
775
+ # cu_num_scheduled_tokens: [ 4, 104, 107, 207, 209]
776
+ # num_draft_tokens: [ 3, 0, 2, 0, 1]
777
+ # Outputs:
778
+ # cu_num_draft_tokens: [ 3, 3, 5, 5, 6]
779
+ # logits_indices: [ 0, 1, 2, 3, 103, 104, 105, 106,
780
+ # 206, 207, 208]
781
+ # target_logits_indices: [ 0, 1, 2, 5, 6, 9]
782
+ # bonus_logits_indices: [ 3, 4, 7, 8, 10]
783
+
784
+ # Compute the logits indices.
785
+ # [4, 1, 3, 1, 2]
786
+ num_sampled_tokens = num_draft_tokens + 1
787
+ # Step 1. [4, 5, 8, 9, 11]
788
+ cu_num_sampled_tokens = np.cumsum(num_sampled_tokens, dtype=np.int32)
789
+ total_num_sampled_tokens = cu_num_sampled_tokens[-1]
790
+ # Step 2. [0, 0, 0, 0, 4, 5, 5, 5, 8, 9, 9]
791
+ cumsums_offsets = np.repeat(cu_num_sampled_tokens - num_sampled_tokens,
792
+ num_sampled_tokens)
793
+ # Step 3. [0, 1, 2, 3, 0, 0, 1, 2, 0, 0, 1]
794
+ arange = self.arange_np[:total_num_sampled_tokens] - cumsums_offsets
795
+ # Step 4. [0, 0, 0, 0, 103, 104, 104, 104, 206, 207, 207]
796
+ logits_indices = np.repeat(
797
+ cu_num_scheduled_tokens - num_sampled_tokens, num_sampled_tokens)
798
+ # Step 5. [0, 1, 2, 3, 103, 104, 105, 106, 206, 207, 208]
799
+ logits_indices += arange
800
+
801
+ # Compute the bonus logits indices.
802
+ bonus_logits_indices = cu_num_sampled_tokens - 1
803
+
804
+ # Compute the draft logits indices.
805
+ # [3, 3, 5, 5, 6]
806
+ cu_num_draft_tokens = np.cumsum(num_draft_tokens, dtype=np.int32)
807
+ total_num_draft_tokens = cu_num_draft_tokens[-1]
808
+ # [0, 0, 0, 3, 3, 5]
809
+ cumsums_offsets = np.repeat(cu_num_draft_tokens - num_draft_tokens,
810
+ num_draft_tokens)
811
+ # [0, 1, 2, 0, 1, 0]
812
+ arange = self.arange_np[:total_num_draft_tokens] - cumsums_offsets
813
+ # [0, 0, 0, 5, 5, 9]
814
+ target_logits_indices = np.repeat(
815
+ cu_num_sampled_tokens - num_sampled_tokens, num_draft_tokens)
816
+ # [0, 1, 2, 5, 6, 9]
817
+ target_logits_indices += arange
818
+
819
+ # TODO: Optimize the CPU -> GPU copy.
820
+ cu_num_draft_tokens = torch.from_numpy(cu_num_draft_tokens).to(
821
+ self.device, non_blocking=True)
822
+ logits_indices = torch.from_numpy(logits_indices).to(self.device,
823
+ non_blocking=True)
824
+ target_logits_indices = torch.from_numpy(target_logits_indices).to(
825
+ self.device, non_blocking=True)
826
+ bonus_logits_indices = torch.from_numpy(bonus_logits_indices).to(
827
+ self.device, non_blocking=True)
828
+
829
+ # Compute the draft token ids.
830
+ # draft_token_indices: [ 1, 2, 3, 105, 106, 208]
831
+ draft_token_ids = self.input_ids[logits_indices]
832
+ draft_token_ids = draft_token_ids[target_logits_indices + 1]
833
+
834
+ metadata = SpecDecodeMetadata(
835
+ draft_token_ids=draft_token_ids,
836
+ num_draft_tokens=num_draft_tokens.tolist(),
837
+ cu_num_draft_tokens=cu_num_draft_tokens,
838
+ target_logits_indices=target_logits_indices,
839
+ bonus_logits_indices=bonus_logits_indices,
840
+ logits_indices=logits_indices,
841
+ )
842
+ return metadata
843
+
844
+ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
845
+ scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
846
+ if not scheduled_encoder_inputs:
847
+ return
848
+
849
+ # Batch the multi-modal inputs.
850
+ mm_inputs = list[MultiModalKwargs]()
851
+ req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
852
+ for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
853
+ req_state = self.requests[req_id]
854
+
855
+ for mm_input_id in encoder_input_ids:
856
+ mm_inputs.append(req_state.mm_inputs[mm_input_id])
857
+ req_ids_pos.append(
858
+ (req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
859
+
860
+ # Batch mm inputs as much as we can: if a request in the batch has
861
+ # multiple modalities or a different modality than the previous one,
862
+ # we process it separately to preserve item order.
863
+ # FIXME(ywang96): This is a hacky way to deal with multiple modalities
864
+ # in the same batch while still being able to benefit from batching
865
+ # multimodal inputs. The proper solution should be reordering the
866
+ # encoder outputs.
867
+ grouped_mm_inputs_list = group_mm_inputs_by_modality(mm_inputs)
868
+
869
+ encoder_outputs = []
870
+ for grouped_mm_inputs in grouped_mm_inputs_list:
871
+ batched_mm_inputs = MultiModalKwargs.batch(grouped_mm_inputs)
872
+ batched_mm_inputs = MultiModalKwargs.as_kwargs(batched_mm_inputs,
873
+ device=self.device)
874
+
875
+ # Run the encoder.
876
+ # `curr_group_outputs` is either of the following:
877
+ # 1. A tensor of shape (num_items, feature_size, hidden_size)
878
+ # in case feature_size is fixed across all multimodal items.
879
+ # 2. A list or tuple (length: num_items) of tensors, each of shape
880
+ # (feature_size, hidden_size) in case the feature size is dynamic
881
+ # depending on the input multimodal items.
882
+ curr_group_outputs = self.model.get_multimodal_embeddings(
883
+ **batched_mm_inputs)
884
+
885
+ sanity_check_mm_encoder_outputs(
886
+ curr_group_outputs,
887
+ expected_num_items=len(grouped_mm_inputs),
888
+ )
889
+
890
+ for output in curr_group_outputs:
891
+ encoder_outputs.append(output)
892
+
893
+ # Cache the encoder outputs.
894
+ for (req_id, input_id, pos_info), output in zip(
895
+ req_ids_pos,
896
+ encoder_outputs,
897
+ ):
898
+ if req_id not in self.encoder_cache:
899
+ self.encoder_cache[req_id] = {}
900
+
901
+ self.encoder_cache[req_id][input_id] = scatter_mm_placeholders(
902
+ output,
903
+ is_embed=pos_info.is_embed,
904
+ )
905
+
906
+ def _gather_mm_embeddings(
907
+ self,
908
+ scheduler_output: "SchedulerOutput",
909
+ ) -> list[torch.Tensor]:
910
+ mm_embeds: list[torch.Tensor] = []
911
+ for req_id in self.input_batch.req_ids:
912
+ num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
913
+ req_id]
914
+ req_state = self.requests[req_id]
915
+ num_computed_tokens = req_state.num_computed_tokens
916
+ mm_positions = req_state.mm_positions
917
+ for i, pos_info in enumerate(mm_positions):
918
+ start_pos = pos_info.offset
919
+ num_encoder_tokens = pos_info.length
920
+
921
+ # The encoder output is needed if the two ranges overlap:
922
+ # [num_computed_tokens,
923
+ # num_computed_tokens + num_scheduled_tokens) and
924
+ # [start_pos, start_pos + num_encoder_tokens)
925
+ if start_pos >= num_computed_tokens + num_scheduled_tokens:
926
+ # The encoder output is not needed in this step.
927
+ break
928
+ if start_pos + num_encoder_tokens <= num_computed_tokens:
929
+ # The encoder output is already processed and stored
930
+ # in the decoder's KV cache.
931
+ continue
932
+
933
+ start_idx = max(num_computed_tokens - start_pos, 0)
934
+ end_idx = min(
935
+ num_computed_tokens - start_pos + num_scheduled_tokens,
936
+ num_encoder_tokens)
937
+ assert start_idx < end_idx
938
+ assert req_id in self.encoder_cache
939
+ assert i in self.encoder_cache[req_id]
940
+ encoder_output = self.encoder_cache[req_id][i]
941
+
942
+ if (is_embed := pos_info.is_embed) is not None:
943
+ is_embed = is_embed[start_idx:end_idx]
944
+
945
+ mm_embeds_item = gather_mm_placeholders(
946
+ encoder_output[start_idx:end_idx],
947
+ is_embed=is_embed,
948
+ )
949
+ mm_embeds.append(mm_embeds_item)
950
+ return mm_embeds
951
+
952
+ def get_model(self) -> nn.Module:
953
+ return self.model
954
+
955
+ def apply_grammar_bitmask(
956
+ self,
957
+ scheduler_output: "SchedulerOutput",
958
+ logits: torch.Tensor,
959
+ ):
960
+ # Serialization of np.ndarray is much more efficient than a tensor,
961
+ # so we receive it in that format.
962
+ grammar_bitmask = scheduler_output.grammar_bitmask
963
+ if grammar_bitmask is None:
964
+ return
965
+
966
+ # We receive the structured output bitmask from the scheduler, but the
967
+ # indices of the requests in the batch may not match the indices of
968
+ # the bitmask since the scheduler doesn't know how the gpu runner is
969
+ # ordering the requests in the batch. We need to sort the bitmask to
970
+ # match the order of the requests used here.
971
+ struct_out_req_batch_indices: dict[str, int] = {}
972
+ indices_match = True
973
+ for req_id in self.input_batch.req_ids:
974
+ mask_index = scheduler_output.structured_output_request_ids.get(
975
+ req_id)
976
+ if mask_index is None:
977
+ # not a structured output request
978
+ continue
979
+ batch_index = self.input_batch.req_id_to_index[req_id]
980
+ if batch_index != mask_index:
981
+ indices_match = False
982
+ struct_out_req_batch_indices[req_id] = batch_index
983
+
984
+ if not indices_match:
985
+ # Sort the bitmask to match the order of the requests
986
+ sorted_bitmask = np.zeros_like(grammar_bitmask)
987
+ for req_id, batch_index in struct_out_req_batch_indices.items():
988
+ orig_index = scheduler_output.structured_output_request_ids[
989
+ req_id]
990
+ sorted_bitmask[batch_index] = grammar_bitmask[orig_index]
991
+ grammar_bitmask = sorted_bitmask
992
+
993
+ grammar_bitmask = torch.from_numpy(grammar_bitmask)
994
+
995
+ # TODO: compatibility with spec decode
996
+ xgr.apply_token_bitmask_inplace(
997
+ logits,
998
+ grammar_bitmask.to(self.device, non_blocking=True),
999
+ indices=list(struct_out_req_batch_indices.values()),
1000
+ )
1001
+
1002
+ @torch.inference_mode()
1003
+ def execute_model(
1004
+ self,
1005
+ scheduler_output: "SchedulerOutput",
1006
+ intermediate_tensors: Optional[IntermediateTensors] = None,
1007
+ ) -> Union[ModelRunnerOutput, torch.Tensor]:
1008
+ # Update KVConnector with the KVConnector metadata forward().
1009
+ if has_kv_transfer_group():
1010
+ get_kv_transfer_group().bind_connector_metadata(
1011
+ scheduler_output.kv_connector_metadata)
1012
+
1013
+ self._update_states(scheduler_output)
1014
+ if not scheduler_output.total_num_scheduled_tokens:
1015
+ # Return empty ModelRunnerOutput if there's no work to do.
1016
+ return EMPTY_MODEL_RUNNER_OUTPUT
1017
+
1018
+ # Prepare the decoder inputs.
1019
+ attn_metadata, logits_indices, spec_decode_metadata = (
1020
+ self._prepare_inputs(scheduler_output))
1021
+ num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
1022
+ if (self.use_cuda_graph
1023
+ and num_scheduled_tokens <= self.cudagraph_batch_sizes[-1]):
1024
+ # Use piecewise CUDA graphs.
1025
+ # Add padding to the batch size.
1026
+ num_input_tokens = self.vllm_config.pad_for_cudagraph(
1027
+ num_scheduled_tokens)
1028
+ else:
1029
+ # Eager mode.
1030
+ # Pad tokens to multiple of tensor_parallel_size when
1031
+ # enabled collective fusion for SP
1032
+ tp_size = self.vllm_config.parallel_config.tensor_parallel_size
1033
+ if self.vllm_config.compilation_config.pass_config. \
1034
+ enable_sequence_parallelism and tp_size > 1:
1035
+ from vllm.utils import round_up
1036
+ num_input_tokens = round_up(num_scheduled_tokens, tp_size)
1037
+ else:
1038
+ num_input_tokens = num_scheduled_tokens
1039
+ attn_metadata.num_input_tokens = num_input_tokens
1040
+
1041
+ # _prepare_inputs may reorder the batch, so we must gather multi
1042
+ # modal outputs after that to ensure the correct order
1043
+ if self.is_multimodal_model:
1044
+ # Run the multimodal encoder if any.
1045
+ self._execute_mm_encoder(scheduler_output)
1046
+ mm_embeds = self._gather_mm_embeddings(scheduler_output)
1047
+ else:
1048
+ mm_embeds = []
1049
+
1050
+ if self.is_multimodal_model:
1051
+ # NOTE(woosuk): To unify token ids and soft tokens (vision
1052
+ # embeddings), we always use embeddings (rather than token ids)
1053
+ # as input to the multimodal model, even when the input is text.
1054
+ input_ids = self.input_ids[:num_scheduled_tokens]
1055
+ if mm_embeds:
1056
+ inputs_embeds = self.model.get_input_embeddings(
1057
+ input_ids, mm_embeds)
1058
+ else:
1059
+ inputs_embeds = self.model.get_input_embeddings(input_ids)
1060
+ # TODO(woosuk): Avoid the copy. Optimize.
1061
+ self.inputs_embeds[:num_scheduled_tokens].copy_(inputs_embeds)
1062
+ inputs_embeds = self.inputs_embeds[:num_input_tokens]
1063
+ input_ids = None
1064
+ else:
1065
+ # For text-only models, we use token ids as input.
1066
+ # While it is possible to use embeddings as input just like the
1067
+ # multimodal models, it is not desirable for performance since
1068
+ # then the embedding layer is not included in the CUDA graph.
1069
+ input_ids = self.input_ids[:num_input_tokens]
1070
+ inputs_embeds = None
1071
+ if self.uses_mrope:
1072
+ positions = self.mrope_positions[:, :num_input_tokens]
1073
+ else:
1074
+ positions = self.positions[:num_input_tokens]
1075
+
1076
+ if get_pp_group().is_first_rank:
1077
+ intermediate_tensors = None
1078
+ else:
1079
+ assert intermediate_tensors is not None
1080
+ assert self.intermediate_tensors is not None
1081
+ for k, v in intermediate_tensors.items():
1082
+ self.intermediate_tensors[k][:num_input_tokens].copy_(
1083
+ v[:num_input_tokens], non_blocking=True)
1084
+ intermediate_tensors = IntermediateTensors({
1085
+ k: v[:num_input_tokens]
1086
+ for k, v in self.intermediate_tensors.items()
1087
+ })
1088
+
1089
+ # Run the decoder.
1090
+ # Use persistent buffers for CUDA graphs.
1091
+ with set_forward_context(attn_metadata, self.vllm_config):
1092
+ output = self.model(
1093
+ input_ids=input_ids,
1094
+ positions=positions,
1095
+ intermediate_tensors=intermediate_tensors,
1096
+ inputs_embeds=inputs_embeds,
1097
+ )
1098
+
1099
+ if self.use_aux_hidden_state_outputs:
1100
+ hidden_states, aux_hidden_states = output
1101
+ else:
1102
+ hidden_states = output
1103
+
1104
+ if not get_pp_group().is_last_rank:
1105
+ # For mid-pipeline stages, return the hidden states.
1106
+ return hidden_states
1107
+
1108
+ hidden_states = hidden_states[:num_scheduled_tokens]
1109
+ sample_hidden_states = hidden_states[logits_indices]
1110
+ logits = self.model.compute_logits(sample_hidden_states, None)
1111
+
1112
+ # Apply structured output bitmasks if present
1113
+ if scheduler_output.grammar_bitmask is not None:
1114
+ self.apply_grammar_bitmask(scheduler_output, logits)
1115
+
1116
+ # Sample the next token and get logprobs if needed.
1117
+ sampling_metadata = self.input_batch.sampling_metadata
1118
+ if spec_decode_metadata is None:
1119
+ sampler_output = self.sampler(
1120
+ logits=logits,
1121
+ sampling_metadata=sampling_metadata,
1122
+ )
1123
+ else:
1124
+ # When indexing with a tensor (bonus_logits_indices), PyTorch
1125
+ # creates a new tensor with separate storage from the original
1126
+ # logits tensor. This means any in-place operations on bonus_logits
1127
+ # won't affect the original logits tensor.
1128
+ bonus_logits = logits[spec_decode_metadata.bonus_logits_indices]
1129
+ sampler_output = self.sampler(
1130
+ logits=bonus_logits,
1131
+ sampling_metadata=sampling_metadata,
1132
+ )
1133
+ bonus_token_ids = sampler_output.sampled_token_ids
1134
+
1135
+ # Just like `bonus_logits`, `target_logits` is a new tensor with
1136
+ # separate storage from the original `logits` tensor. Therefore,
1137
+ # it is safe to update `target_logits` in place.
1138
+ target_logits = logits[spec_decode_metadata.target_logits_indices]
1139
+ output_token_ids = self.rejection_sampler(
1140
+ spec_decode_metadata,
1141
+ None, # draft_probs
1142
+ target_logits,
1143
+ bonus_token_ids,
1144
+ sampling_metadata,
1145
+ )
1146
+ sampler_output.sampled_token_ids = output_token_ids
1147
+
1148
+ # TODO(woosuk): The following loop can be slow since it iterates over
1149
+ # the requests one by one. Optimize.
1150
+ discard_sampled_tokens_req_indices = []
1151
+ for i, req_id in enumerate(self.input_batch.req_ids):
1152
+ req_state = self.requests[req_id]
1153
+ seq_len = (req_state.num_computed_tokens +
1154
+ scheduler_output.num_scheduled_tokens[req_id])
1155
+ if seq_len < req_state.num_tokens:
1156
+ # Ignore the sampled token for partial prefills.
1157
+ # Rewind the generator state as if the token was not sampled.
1158
+ # This relies on cuda-specific torch-internal impl details
1159
+ generator = self.input_batch.generators.get(i)
1160
+ if generator is not None:
1161
+ generator.set_offset(generator.get_offset() - 4)
1162
+ # Record the index of the request that should not be sampled,
1163
+ # so that we could clear the sampled tokens before returning.
1164
+ discard_sampled_tokens_req_indices.append(i)
1165
+
1166
+ # NOTE: GPU -> CPU Sync happens here.
1167
+ # Move as many CPU operations as possible before this sync point.
1168
+ logprobs_tensors = sampler_output.logprobs_tensors
1169
+ logprobs_lists = logprobs_tensors.tolists() \
1170
+ if logprobs_tensors is not None else None
1171
+
1172
+ # Compute prompt logprobs if needed.
1173
+ prompt_logprobs_dict = self._get_prompt_logprobs_dict(
1174
+ hidden_states,
1175
+ scheduler_output,
1176
+ )
1177
+
1178
+ # Get the valid generated tokens.
1179
+ sampled_token_ids = sampler_output.sampled_token_ids
1180
+ max_gen_len = sampled_token_ids.shape[-1]
1181
+ if max_gen_len == 1:
1182
+ # No spec decode tokens.
1183
+ valid_sampled_token_ids = sampled_token_ids.tolist()
1184
+ else:
1185
+ # Includes spec decode tokens.
1186
+ valid_sampled_token_ids = self.rejection_sampler.parse_output(
1187
+ sampled_token_ids,
1188
+ self.input_batch.vocab_size,
1189
+ )
1190
+ # Mask out the sampled tokens that should not be sampled.
1191
+ for i in discard_sampled_tokens_req_indices:
1192
+ valid_sampled_token_ids[i].clear()
1193
+
1194
+ if not self.use_spec_decode:
1195
+ # Speculative decoding is not enabled.
1196
+ spec_token_ids = None
1197
+ elif self.speculative_config.method == "ngram":
1198
+ assert isinstance(self.drafter, NgramProposer)
1199
+ spec_token_ids = self.generate_draft_token_ids(
1200
+ valid_sampled_token_ids, sampling_metadata)
1201
+ elif self.speculative_config.use_eagle():
1202
+ assert isinstance(self.drafter, EagleProposer)
1203
+ # TODO(woosuk): Refactor the loop.
1204
+ next_token_ids: list[int] = []
1205
+ for i, token_ids in enumerate(valid_sampled_token_ids):
1206
+ if token_ids:
1207
+ # Common case.
1208
+ next_token_id = token_ids[-1]
1209
+ else:
1210
+ # Partial prefill (rare case).
1211
+ # Get the next token id from the request state.
1212
+ req_id = self.input_batch.req_ids[i]
1213
+ req_state = self.requests[req_id]
1214
+ seq_len = (req_state.num_computed_tokens +
1215
+ scheduler_output.num_scheduled_tokens[req_id])
1216
+ next_token_id = req_state.get_token_id(seq_len)
1217
+ next_token_ids.append(next_token_id)
1218
+ next_token_ids = torch.tensor(next_token_ids,
1219
+ dtype=torch.int32,
1220
+ device=self.device)
1221
+
1222
+ if spec_decode_metadata is None:
1223
+ # input_ids can be None for multimodal models.
1224
+ # We need to slice token_ids, positions, and hidden_states
1225
+ # because the eagle head does not use cuda graph and should
1226
+ # not include padding.
1227
+ target_token_ids = self.input_ids[:num_scheduled_tokens]
1228
+ target_positions = positions[:num_scheduled_tokens]
1229
+ if self.use_aux_hidden_state_outputs:
1230
+ target_hidden_states = [
1231
+ h[:num_scheduled_tokens] for h in aux_hidden_states
1232
+ ]
1233
+ else:
1234
+ target_hidden_states = hidden_states[:num_scheduled_tokens]
1235
+ target_slot_mapping = attn_metadata.slot_mapping
1236
+ cu_num_tokens = attn_metadata.query_start_loc
1237
+ else:
1238
+ # TODO(woosuk): Refactor this.
1239
+ num_draft_tokens = spec_decode_metadata.num_draft_tokens
1240
+ num_rejected_tokens = [
1241
+ n + 1 - len(valid_sampled_token_ids[i]) if n > 0 else 0
1242
+ for i, n in enumerate(num_draft_tokens)
1243
+ ]
1244
+ num_rejected_tokens = torch.tensor(
1245
+ num_rejected_tokens,
1246
+ dtype=torch.int32,
1247
+ device=self.device,
1248
+ )
1249
+ cu_num_tokens, token_indices = self.drafter.prepare_inputs(
1250
+ attn_metadata.query_start_loc,
1251
+ num_rejected_tokens,
1252
+ )
1253
+ target_token_ids = self.input_ids[token_indices]
1254
+ target_positions = positions[token_indices]
1255
+ if self.use_aux_hidden_state_outputs:
1256
+ target_hidden_states = [
1257
+ h[token_indices] for h in aux_hidden_states
1258
+ ]
1259
+ else:
1260
+ target_hidden_states = hidden_states[token_indices]
1261
+ target_slot_mapping = attn_metadata.slot_mapping[token_indices]
1262
+
1263
+ if self.use_aux_hidden_state_outputs:
1264
+ target_hidden_states = torch.cat(target_hidden_states, dim=-1)
1265
+ draft_token_ids = self.drafter.propose(
1266
+ target_token_ids=target_token_ids,
1267
+ target_positions=target_positions,
1268
+ target_hidden_states=target_hidden_states,
1269
+ target_slot_mapping=target_slot_mapping,
1270
+ next_token_ids=next_token_ids,
1271
+ cu_num_tokens=cu_num_tokens,
1272
+ block_table=attn_metadata.block_table,
1273
+ sampling_metadata=sampling_metadata,
1274
+ )
1275
+ spec_token_ids = draft_token_ids.tolist()
1276
+
1277
+ # Clear KVConnector state after all KVs are generated.
1278
+ if has_kv_transfer_group():
1279
+ get_kv_transfer_group().clear_connector_metadata()
1280
+
1281
+ return ModelRunnerOutput(
1282
+ req_ids=self.input_batch.req_ids,
1283
+ req_id_to_index=self.input_batch.req_id_to_index,
1284
+ sampled_token_ids=valid_sampled_token_ids,
1285
+ spec_token_ids=spec_token_ids,
1286
+ logprobs=logprobs_lists,
1287
+ prompt_logprobs_dict=prompt_logprobs_dict,
1288
+ )
1289
+
1290
+ def generate_draft_token_ids(
1291
+ self,
1292
+ sampled_token_ids: list[list[int]],
1293
+ sampling_metadata: SamplingMetadata,
1294
+ ) -> list[list[int]]:
1295
+ # TODO(woosuk): Optimize.
1296
+ draft_token_ids: list[list[int]] = []
1297
+ for i, sampled_ids in enumerate(sampled_token_ids):
1298
+ num_sampled_ids = len(sampled_ids)
1299
+ if not num_sampled_ids:
1300
+ # Skip speculative decoding.
1301
+ draft_token_ids.append([])
1302
+ continue
1303
+
1304
+ # Skip requests that require sampling parameters that are not
1305
+ # supported with speculative decoding.
1306
+ req_id = self.input_batch.req_ids[i]
1307
+ if not is_spec_decode_supported(req_id, self.input_batch):
1308
+ draft_token_ids.append([])
1309
+ continue
1310
+
1311
+ # Add sampled_token_ids to token_ids_cpu.
1312
+ start_idx = self.input_batch.num_tokens_no_spec[i]
1313
+ end_idx = start_idx + num_sampled_ids
1314
+ if end_idx >= self.max_model_len:
1315
+ # Skip requests that have already reached the max model length.
1316
+ draft_token_ids.append([])
1317
+ continue
1318
+
1319
+ self.input_batch.token_ids_cpu[i, start_idx:end_idx] = sampled_ids
1320
+ drafter_output = self.drafter.propose(
1321
+ self.input_batch.token_ids_cpu[i, :end_idx])
1322
+ if drafter_output is None or len(drafter_output) == 0:
1323
+ draft_token_ids.append([])
1324
+ else:
1325
+ draft_token_ids.append(drafter_output.tolist())
1326
+ return draft_token_ids
1327
+
1328
+ def load_model(self) -> None:
1329
+ logger.info("Starting to load model %s...", self.model_config.model)
1330
+ with DeviceMemoryProfiler() as m: # noqa: SIM117
1331
+ time_before_load = time.perf_counter()
1332
+ self.model = get_model(vllm_config=self.vllm_config)
1333
+ if self.lora_config:
1334
+ self.model = self.load_lora_model(self.model,
1335
+ self.model_config,
1336
+ self.scheduler_config,
1337
+ self.lora_config,
1338
+ self.device)
1339
+ if hasattr(self, "drafter"):
1340
+ logger.info("Loading drafter model...")
1341
+ self.drafter.load_model(self.model)
1342
+ if self.use_aux_hidden_state_outputs:
1343
+ self.model.set_aux_hidden_state_layers(
1344
+ self.model.get_eagle3_aux_hidden_state_layers())
1345
+ time_after_load = time.perf_counter()
1346
+ self.model_memory_usage = m.consumed_memory
1347
+ logger.info("Model loading took %.4f GiB and %.6f seconds",
1348
+ self.model_memory_usage / GiB_bytes,
1349
+ time_after_load - time_before_load)
1350
+
1351
+ def _get_prompt_logprobs_dict(
1352
+ self,
1353
+ hidden_states: torch.Tensor,
1354
+ scheduler_output: "SchedulerOutput",
1355
+ ) -> dict[str, Optional[LogprobsTensors]]:
1356
+ num_prompt_logprobs_dict = self.input_batch.num_prompt_logprobs
1357
+ if not num_prompt_logprobs_dict:
1358
+ return {}
1359
+
1360
+ in_progress_dict = self.input_batch.in_progress_prompt_logprobs_cpu
1361
+ prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
1362
+
1363
+ # Since prompt logprobs are a rare feature, prioritize simple,
1364
+ # maintainable loop over optimal performance.
1365
+ completed_prefill_reqs = []
1366
+ for req_id, num_prompt_logprobs in num_prompt_logprobs_dict.items():
1367
+
1368
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
1369
+
1370
+ # Get metadata for this request.
1371
+ request = self.requests[req_id]
1372
+ num_prompt_tokens = len(request.prompt_token_ids)
1373
+ prompt_token_ids = torch.tensor(request.prompt_token_ids).to(
1374
+ self.device, non_blocking=True)
1375
+
1376
+ # Set up target LogprobsTensors object.
1377
+ logprobs_tensors = in_progress_dict.get(req_id)
1378
+ if not logprobs_tensors:
1379
+ # Create empty logprobs CPU tensors for the entire prompt.
1380
+ # If chunked, we'll copy in slice by slice.
1381
+ logprobs_tensors = LogprobsTensors.empty_cpu(
1382
+ num_prompt_tokens - 1, num_prompt_logprobs + 1)
1383
+ in_progress_dict[req_id] = logprobs_tensors
1384
+
1385
+ # Determine number of logits to retrieve.
1386
+ start_idx = request.num_computed_tokens
1387
+ start_tok = start_idx + 1
1388
+ num_remaining_tokens = num_prompt_tokens - start_tok
1389
+ if num_tokens <= num_remaining_tokens:
1390
+ # This is a chunk, more tokens remain.
1391
+ # In the == case, there are no more prompt logprobs to produce
1392
+ # but we want to defer returning them to the next step where we
1393
+ # have new generated tokens to return.
1394
+ num_logits = num_tokens
1395
+ else:
1396
+ # This is the last chunk of prompt tokens to return.
1397
+ num_logits = num_remaining_tokens
1398
+ completed_prefill_reqs.append(req_id)
1399
+ prompt_logprobs_dict[req_id] = logprobs_tensors
1400
+
1401
+ if num_logits <= 0:
1402
+ # This can happen for the final chunk if we prefilled exactly
1403
+ # (num_prompt_tokens - 1) tokens for this request in the prior
1404
+ # step. There are no more prompt logprobs to produce.
1405
+ continue
1406
+
1407
+ # Get the logits corresponding to this req's prompt tokens.
1408
+ # If this is a partial request (i.e. chunked prefill),
1409
+ # then there is prompt logprob generated for each index.
1410
+ req_idx = self.input_batch.req_id_to_index[req_id]
1411
+ offset = self.query_start_loc_np[req_idx].item()
1412
+ prompt_hidden_states = hidden_states[offset:offset + num_logits]
1413
+ logits = self.model.compute_logits(prompt_hidden_states, None)
1414
+
1415
+ # Get the "target" tokens for each index. For prompt at index i,
1416
+ # the token at prompt index i+1 is the "sampled" token we want
1417
+ # to gather the logprob for.
1418
+ tgt_token_ids = prompt_token_ids[start_tok:start_tok + num_logits]
1419
+
1420
+ # Compute prompt logprobs.
1421
+ logprobs = self.sampler.compute_logprobs(logits)
1422
+ token_ids, logprobs, ranks = self.sampler.gather_logprobs(
1423
+ logprobs, num_prompt_logprobs, tgt_token_ids)
1424
+
1425
+ # Transfer GPU->CPU async.
1426
+ chunk_slice = slice(start_idx, start_idx + num_logits)
1427
+ logprobs_tensors.logprob_token_ids[chunk_slice].copy_(
1428
+ token_ids, non_blocking=True)
1429
+ logprobs_tensors.logprobs[chunk_slice].copy_(logprobs,
1430
+ non_blocking=True)
1431
+ logprobs_tensors.selected_token_ranks[chunk_slice].copy_(
1432
+ ranks, non_blocking=True)
1433
+
1434
+ # Remove requests that have completed prefill from the batch
1435
+ # num_prompt_logprobs_dict.
1436
+ for req_id in completed_prefill_reqs:
1437
+ del num_prompt_logprobs_dict[req_id]
1438
+ del in_progress_dict[req_id]
1439
+
1440
+ # Must synchronize the non-blocking GPU->CPU transfers.
1441
+ if prompt_logprobs_dict:
1442
+ torch.cuda.synchronize()
1443
+
1444
+ return prompt_logprobs_dict
1445
+
1446
+ @torch.inference_mode()
1447
+ def _dummy_run(
1448
+ self,
1449
+ num_tokens: int,
1450
+ ) -> torch.Tensor:
1451
+
1452
+ # Set num_scheduled_tokens based on num_tokens and max_num_seqs
1453
+ # for dummy run with LoRA so that the num_reqs collectively
1454
+ # has num_tokens in total.
1455
+ assert num_tokens <= self.scheduler_config.max_num_batched_tokens
1456
+ max_num_reqs = self.scheduler_config.max_num_seqs
1457
+ num_reqs = max_num_reqs if num_tokens >= max_num_reqs else num_tokens
1458
+ min_tokens_per_req = num_tokens // num_reqs
1459
+ num_scheduled_tokens_list = [min_tokens_per_req] * num_reqs
1460
+ num_scheduled_tokens_list[-1] += num_tokens % num_reqs
1461
+ assert sum(num_scheduled_tokens_list) == num_tokens
1462
+ assert len(num_scheduled_tokens_list) == num_reqs
1463
+ num_scheduled_tokens = np.array(num_scheduled_tokens_list,
1464
+ dtype=np.int32)
1465
+
1466
+ with self.maybe_dummy_run_with_lora(self.lora_config,
1467
+ num_scheduled_tokens):
1468
+ model = self.model
1469
+ if self.is_multimodal_model:
1470
+ input_ids = None
1471
+ inputs_embeds = self.inputs_embeds[:num_tokens]
1472
+ else:
1473
+ input_ids = self.input_ids[:num_tokens]
1474
+ inputs_embeds = None
1475
+ if self.uses_mrope:
1476
+ positions = self.mrope_positions[:, :num_tokens]
1477
+ else:
1478
+ positions = self.positions[:num_tokens]
1479
+
1480
+ if get_pp_group().is_first_rank:
1481
+ intermediate_tensors = None
1482
+ else:
1483
+ if self.intermediate_tensors is None:
1484
+ self.intermediate_tensors = (
1485
+ self.model.make_empty_intermediate_tensors(
1486
+ batch_size=self.max_num_tokens,
1487
+ dtype=self.model_config.dtype,
1488
+ device=self.device))
1489
+ intermediate_tensors = IntermediateTensors({
1490
+ k: v[:num_tokens]
1491
+ for k, v in self.intermediate_tensors.items()
1492
+ })
1493
+
1494
+ with set_forward_context(None,
1495
+ self.vllm_config,
1496
+ num_tokens=num_tokens):
1497
+ outputs = model(
1498
+ input_ids=input_ids,
1499
+ positions=positions,
1500
+ intermediate_tensors=intermediate_tensors,
1501
+ inputs_embeds=inputs_embeds,
1502
+ )
1503
+ if self.use_aux_hidden_state_outputs:
1504
+ hidden_states, _ = outputs
1505
+ else:
1506
+ hidden_states = outputs
1507
+
1508
+ logit_indices = np.cumsum(num_scheduled_tokens) - 1
1509
+ return hidden_states[logit_indices]
1510
+
1511
+ @torch.inference_mode()
1512
+ def _dummy_sampler_run(
1513
+ self,
1514
+ hidden_states: torch.Tensor,
1515
+ ) -> torch.Tensor:
1516
+
1517
+ logits = self.model.compute_logits(hidden_states, None)
1518
+ num_reqs = logits.size(0)
1519
+
1520
+ dummy_tensors = lambda v: torch.full(
1521
+ (num_reqs, ), v, device=self.device)
1522
+
1523
+ dummy_metadata = SamplingMetadata(
1524
+ temperature=dummy_tensors(0.5),
1525
+ all_greedy=False,
1526
+ all_random=False,
1527
+ top_p=dummy_tensors(0.9),
1528
+ top_k=dummy_tensors(logits.size(1) - 1),
1529
+ min_p=None,
1530
+ generators={},
1531
+ max_num_logprobs=None,
1532
+ no_penalties=True,
1533
+ prompt_token_ids=None,
1534
+ frequency_penalties=dummy_tensors(0.1),
1535
+ presence_penalties=dummy_tensors(0.1),
1536
+ repetition_penalties=dummy_tensors(0.1),
1537
+ output_token_ids=[[] for _ in range(num_reqs)],
1538
+ min_tokens={},
1539
+ logit_bias=[None for _ in range(num_reqs)],
1540
+ allowed_token_ids_mask=None,
1541
+ bad_words_token_ids={},
1542
+ )
1543
+ try:
1544
+ sampler_output = self.sampler(logits=logits,
1545
+ sampling_metadata=dummy_metadata)
1546
+ except RuntimeError as e:
1547
+ if 'out of memory' in str(e):
1548
+ raise RuntimeError(
1549
+ "CUDA out of memory occurred when warming up sampler with "
1550
+ f"{num_reqs} dummy requests. Please try lowering "
1551
+ "`max_num_seqs` or `gpu_memory_utilization` when "
1552
+ "initializing the engine.") from e
1553
+ else:
1554
+ raise e
1555
+ if self.use_spec_decode:
1556
+ draft_token_ids = [[0] for _ in range(num_reqs)]
1557
+ dummy_spec_decode_metadata = SpecDecodeMetadata.make_dummy(
1558
+ draft_token_ids, self.device)
1559
+
1560
+ num_tokens = sum(len(ids) for ids in draft_token_ids)
1561
+ # draft_probs = torch.randn(
1562
+ # num_tokens, logits.shape[-1], device=self.device,
1563
+ # dtype=logits.dtype)
1564
+ draft_probs = None
1565
+ target_logits = torch.randn(num_tokens,
1566
+ logits.shape[-1],
1567
+ device=self.device,
1568
+ dtype=logits.dtype)
1569
+ # NOTE(woosuk): Here, we should use int32 because the sampler uses
1570
+ # int32 for bonus_token_ids. If the dtype mismatches, re-compilation
1571
+ # will occur at runtime.
1572
+ bonus_token_ids = torch.zeros(num_reqs,
1573
+ device=self.device,
1574
+ dtype=torch.int32)
1575
+ self.rejection_sampler(
1576
+ dummy_spec_decode_metadata,
1577
+ draft_probs,
1578
+ target_logits,
1579
+ bonus_token_ids,
1580
+ dummy_metadata,
1581
+ )
1582
+ return sampler_output
1583
+
1584
+ def profile_run(self) -> None:
1585
+ # Profile with multimodal encoder & encoder cache.
1586
+ # TODO: handle encoder-decoder models once we support them.
1587
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
1588
+ and self.encoder_cache_size > 0):
1589
+
1590
+ # NOTE: Currently model is profiled with a single non-text
1591
+ # modality with the max possible input tokens even when
1592
+ # it supports multiple.
1593
+ max_tokens_by_modality_dict = self.mm_registry \
1594
+ .get_max_tokens_per_item_by_nonzero_modality(self.model_config)
1595
+ dummy_data_modality, max_tokens_per_mm_item = max(
1596
+ max_tokens_by_modality_dict.items(), key=lambda item: item[1])
1597
+
1598
+ # Check how many items of this modality can be supported by
1599
+ # the encoder budget.
1600
+ encoder_budget = min(self.max_num_encoder_input_tokens,
1601
+ self.encoder_cache_size)
1602
+
1603
+ max_num_mm_items_encoder_budget = cdiv(encoder_budget,
1604
+ max_tokens_per_mm_item)
1605
+
1606
+ # Check how many items of this modality can be supported by
1607
+ # the decoder budget.
1608
+ max_mm_items_per_req = self.mm_registry.get_mm_limits_per_prompt(
1609
+ self.model_config)[dummy_data_modality]
1610
+
1611
+ # NOTE: We do not consider max_num_batched_tokens on purpose
1612
+ # because the multimodal embeddings can be generated in advance
1613
+ # and chunked prefilled.
1614
+ max_num_mm_items_decoder_budget = self.max_num_reqs * \
1615
+ max_mm_items_per_req
1616
+
1617
+ max_num_mm_items = min(max_num_mm_items_encoder_budget,
1618
+ max_num_mm_items_decoder_budget)
1619
+
1620
+ logger.info(
1621
+ "Encoder cache will be initialized with a budget of %s tokens,"
1622
+ " and profiled with %s %s items of the maximum feature size.",
1623
+ encoder_budget, max_num_mm_items, dummy_data_modality)
1624
+
1625
+ # Create dummy batch of multimodal inputs.
1626
+ dummy_mm_kwargs = self.mm_registry.get_decoder_dummy_data(
1627
+ model_config=self.model_config,
1628
+ seq_len=self.max_num_tokens,
1629
+ mm_counts={
1630
+ dummy_data_modality: 1
1631
+ },
1632
+ ).multi_modal_data
1633
+
1634
+ batched_dummy_mm_inputs = MultiModalKwargs.batch(
1635
+ [dummy_mm_kwargs] * max_num_mm_items)
1636
+ batched_dummy_mm_inputs = MultiModalKwargs.as_kwargs(
1637
+ batched_dummy_mm_inputs, device=self.device)
1638
+
1639
+ # Run multimodal encoder.
1640
+ dummy_encoder_outputs = self.model.get_multimodal_embeddings(
1641
+ **batched_dummy_mm_inputs)
1642
+
1643
+ sanity_check_mm_encoder_outputs(
1644
+ dummy_encoder_outputs,
1645
+ expected_num_items=max_num_mm_items,
1646
+ )
1647
+
1648
+ # Cache the dummy encoder outputs.
1649
+ self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
1650
+
1651
+ hidden_states = self._dummy_run(self.max_num_tokens)
1652
+ if get_pp_group().is_last_rank:
1653
+ sampler_output = self._dummy_sampler_run(hidden_states)
1654
+ else:
1655
+ sampler_output = None
1656
+ torch.cuda.synchronize()
1657
+ del hidden_states, sampler_output
1658
+ self.encoder_cache.clear()
1659
+ gc.collect()
1660
+
1661
+ def capture_model(self) -> None:
1662
+ if not self.use_cuda_graph:
1663
+ logger.warning(
1664
+ "Skipping CUDA graph capture. Please add "
1665
+ "-O %s to use CUDA graphs.", CompilationLevel.PIECEWISE)
1666
+ return
1667
+
1668
+ start_time = time.perf_counter()
1669
+ start_free_gpu_memory = torch.cuda.mem_get_info()[0]
1670
+
1671
+ # Trigger CUDA graph capture for specific shapes.
1672
+ # Capture the large shapes first so that the smaller shapes
1673
+ # can reuse the memory pool allocated for the large shapes.
1674
+ with graph_capture(device=self.device):
1675
+ for num_tokens in reversed(self.cudagraph_batch_sizes):
1676
+ for _ in range(self.vllm_config.compilation_config.
1677
+ cudagraph_num_of_warmups):
1678
+ self._dummy_run(num_tokens)
1679
+ self._dummy_run(num_tokens)
1680
+
1681
+ end_time = time.perf_counter()
1682
+ end_free_gpu_memory = torch.cuda.mem_get_info()[0]
1683
+ elapsed_time = end_time - start_time
1684
+ cuda_graph_size = start_free_gpu_memory - end_free_gpu_memory
1685
+ # This usually takes 5~20 seconds.
1686
+ logger.info("Graph capturing finished in %.0f secs, took %.2f GiB",
1687
+ elapsed_time, cuda_graph_size / (1 << 30))
1688
+
1689
+ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
1690
+ """
1691
+ Initialize KV cache based on `kv_cache_config`.
1692
+ Args:
1693
+ kv_cache_config: Configuration for the KV cache, including the KV
1694
+ cache size of each layer
1695
+ """
1696
+ if len(kv_cache_config.kv_cache_groups) > 1:
1697
+ raise NotImplementedError(
1698
+ "Hybrid models with more than one KV cache type are not "
1699
+ "supported yet.")
1700
+
1701
+ kv_caches: dict[str, torch.Tensor] = {}
1702
+
1703
+ for kv_cache_group in kv_cache_config.kv_cache_groups:
1704
+ kv_cache_spec = kv_cache_group.kv_cache_spec
1705
+ for layer_name in kv_cache_group.layer_names:
1706
+ tensor_config = kv_cache_config.tensors[layer_name]
1707
+ assert tensor_config.size % kv_cache_spec.page_size_bytes == 0
1708
+ num_blocks = tensor_config.size // kv_cache_spec.page_size_bytes
1709
+ # `num_blocks` is the number of blocks the model runner can use.
1710
+ # `kv_cache_config.num_blocks` is the number of blocks that
1711
+ # KVCacheManager may allocate.
1712
+ # Since different GPUs may have different number of layers and
1713
+ # different memory capacities, `num_blocks` can be different on
1714
+ # different GPUs, and `kv_cache_config.num_blocks` is set to
1715
+ # the min of all `num_blocks`. Verify it here.
1716
+ assert num_blocks >= kv_cache_config.num_blocks
1717
+ if isinstance(kv_cache_spec, AttentionSpec):
1718
+ kv_cache_shape = self.attn_backend.get_kv_cache_shape(
1719
+ num_blocks, kv_cache_spec.block_size,
1720
+ kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
1721
+ dtype = kv_cache_spec.dtype
1722
+ kv_caches[layer_name] = torch.zeros(kv_cache_shape,
1723
+ dtype=dtype,
1724
+ device=self.device)
1725
+ else:
1726
+ # TODO: add new branches when introducing more types of
1727
+ # KV cache specs.
1728
+ raise ValueError("Unknown KV cache spec type.")
1729
+
1730
+ bind_kv_cache(
1731
+ kv_caches,
1732
+ self.vllm_config.compilation_config.static_forward_context,
1733
+ self.kv_caches)
1734
+
1735
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
1736
+ """
1737
+ Generates the KVCacheSpec by parsing the kv cache format from each
1738
+ Attention module in the static forward context.
1739
+ Returns:
1740
+ KVCacheSpec: A dictionary mapping layer names to their KV cache
1741
+ format. Layers that do not need KV cache are not included.
1742
+ """
1743
+
1744
+ layers = get_layers_from_vllm_config(self.vllm_config, Attention)
1745
+ block_size = self.vllm_config.cache_config.block_size
1746
+ use_mla = self.vllm_config.model_config.use_mla
1747
+ kv_cache_spec: dict[str, KVCacheSpec] = {}
1748
+ for layer_name, attn_module in layers.items():
1749
+ # TODO: Support other attention modules, e.g., cross-attention
1750
+ if attn_module.attn_type == AttentionType.DECODER:
1751
+ if attn_module.sliding_window is not None:
1752
+ kv_cache_spec[layer_name] = SlidingWindowSpec(
1753
+ block_size=block_size,
1754
+ num_kv_heads=attn_module.num_kv_heads,
1755
+ head_size=attn_module.head_size,
1756
+ dtype=self.kv_cache_dtype,
1757
+ sliding_window=attn_module.sliding_window,
1758
+ use_mla=use_mla)
1759
+ else:
1760
+ kv_cache_spec[layer_name] = FullAttentionSpec(
1761
+ block_size=block_size,
1762
+ num_kv_heads=attn_module.num_kv_heads,
1763
+ head_size=attn_module.head_size,
1764
+ dtype=self.kv_cache_dtype,
1765
+ use_mla=use_mla)
1766
+ elif attn_module.attn_type in (AttentionType.ENCODER,
1767
+ AttentionType.ENCODER_ONLY):
1768
+ # encoder-only attention does not need KV cache.
1769
+ continue
1770
+ elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
1771
+ raise NotImplementedError
1772
+ else:
1773
+ raise ValueError(
1774
+ f"Unknown attention type: {attn_module.attn_type}")
1775
+
1776
+ return kv_cache_spec