vllm-cpu 0.9.2.post2__cp311-cp311-manylinux_2_17_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1236) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +214 -0
  3. vllm/_custom_ops.py +1915 -0
  4. vllm/_ipex_ops.py +350 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +139 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +325 -0
  20. vllm/attention/backends/blocksparse_attn.py +465 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1506 -0
  23. vllm/attention/backends/flash_attn.py +1008 -0
  24. vllm/attention/backends/flashinfer.py +1107 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +318 -0
  27. vllm/attention/backends/ipex_attn.py +403 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1391 -0
  30. vllm/attention/backends/pallas.py +356 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +1015 -0
  34. vllm/attention/backends/torch_sdpa.py +707 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +807 -0
  38. vllm/attention/layer.py +481 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +903 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/pallas_kv_cache_update.py +120 -0
  52. vllm/attention/ops/prefix_prefill.py +902 -0
  53. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  54. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  55. vllm/attention/ops/triton_decode_attention.py +674 -0
  56. vllm/attention/ops/triton_flash_attention.py +984 -0
  57. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  58. vllm/attention/ops/triton_unified_attention.py +738 -0
  59. vllm/attention/selector.py +214 -0
  60. vllm/attention/utils/fa_utils.py +72 -0
  61. vllm/beam_search.py +87 -0
  62. vllm/benchmarks/__init__.py +0 -0
  63. vllm/benchmarks/datasets.py +1441 -0
  64. vllm/benchmarks/endpoint_request_func.py +393 -0
  65. vllm/benchmarks/latency.py +168 -0
  66. vllm/benchmarks/serve.py +1063 -0
  67. vllm/benchmarks/throughput.py +609 -0
  68. vllm/benchmarks/utils.py +70 -0
  69. vllm/collect_env.py +820 -0
  70. vllm/compilation/__init__.py +0 -0
  71. vllm/compilation/activation_quant_fusion.py +89 -0
  72. vllm/compilation/backends.py +610 -0
  73. vllm/compilation/base_piecewise_backend.py +72 -0
  74. vllm/compilation/collective_fusion.py +127 -0
  75. vllm/compilation/compiler_interface.py +564 -0
  76. vllm/compilation/counter.py +41 -0
  77. vllm/compilation/cuda_piecewise_backend.py +218 -0
  78. vllm/compilation/decorators.py +250 -0
  79. vllm/compilation/fix_functionalization.py +191 -0
  80. vllm/compilation/fusion.py +645 -0
  81. vllm/compilation/fusion_attn.py +166 -0
  82. vllm/compilation/fx_utils.py +84 -0
  83. vllm/compilation/inductor_pass.py +115 -0
  84. vllm/compilation/monitor.py +39 -0
  85. vllm/compilation/multi_output_match.py +109 -0
  86. vllm/compilation/noop_elimination.py +165 -0
  87. vllm/compilation/pass_manager.py +82 -0
  88. vllm/compilation/sequence_parallelism.py +482 -0
  89. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  90. vllm/compilation/vllm_inductor_pass.py +70 -0
  91. vllm/compilation/wrapper.py +135 -0
  92. vllm/config.py +4913 -0
  93. vllm/connections.py +174 -0
  94. vllm/core/__init__.py +0 -0
  95. vllm/core/block/__init__.py +0 -0
  96. vllm/core/block/block_table.py +399 -0
  97. vllm/core/block/common.py +371 -0
  98. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  99. vllm/core/block/interfaces.py +319 -0
  100. vllm/core/block/naive_block.py +466 -0
  101. vllm/core/block/prefix_caching_block.py +1135 -0
  102. vllm/core/block/utils.py +28 -0
  103. vllm/core/block_manager.py +525 -0
  104. vllm/core/evictor.py +157 -0
  105. vllm/core/interfaces.py +139 -0
  106. vllm/core/placeholder_block_space_manager.py +103 -0
  107. vllm/core/scheduler.py +2126 -0
  108. vllm/device_allocator/__init__.py +0 -0
  109. vllm/device_allocator/cumem.py +281 -0
  110. vllm/distributed/__init__.py +6 -0
  111. vllm/distributed/communication_op.py +41 -0
  112. vllm/distributed/device_communicators/__init__.py +0 -0
  113. vllm/distributed/device_communicators/all2all.py +264 -0
  114. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  115. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  116. vllm/distributed/device_communicators/cuda_communicator.py +194 -0
  117. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  118. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  119. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  120. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  121. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  122. vllm/distributed/device_communicators/pynccl.py +218 -0
  123. vllm/distributed/device_communicators/pynccl_wrapper.py +349 -0
  124. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  125. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  126. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  127. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  128. vllm/distributed/eplb/__init__.py +8 -0
  129. vllm/distributed/eplb/eplb_state.py +432 -0
  130. vllm/distributed/eplb/rebalance_algo.py +234 -0
  131. vllm/distributed/eplb/rebalance_execute.py +307 -0
  132. vllm/distributed/kv_events.py +356 -0
  133. vllm/distributed/kv_transfer/README.md +29 -0
  134. vllm/distributed/kv_transfer/__init__.py +12 -0
  135. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  137. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  138. vllm/distributed/kv_transfer/kv_connector/factory.py +133 -0
  139. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  140. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  141. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  142. vllm/distributed/kv_transfer/kv_connector/utils.py +109 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1103 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +485 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +533 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +265 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +389 -0
  153. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  154. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  155. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  156. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  158. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  159. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  160. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  161. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  162. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  163. vllm/distributed/parallel_state.py +1385 -0
  164. vllm/distributed/tpu_distributed_utils.py +178 -0
  165. vllm/distributed/utils.py +536 -0
  166. vllm/engine/__init__.py +0 -0
  167. vllm/engine/arg_utils.py +1801 -0
  168. vllm/engine/async_llm_engine.py +1200 -0
  169. vllm/engine/async_timeout.py +173 -0
  170. vllm/engine/llm_engine.py +2101 -0
  171. vllm/engine/metrics.py +629 -0
  172. vllm/engine/metrics_types.py +94 -0
  173. vllm/engine/multiprocessing/__init__.py +148 -0
  174. vllm/engine/multiprocessing/client.py +681 -0
  175. vllm/engine/multiprocessing/engine.py +460 -0
  176. vllm/engine/output_processor/__init__.py +0 -0
  177. vllm/engine/output_processor/interfaces.py +75 -0
  178. vllm/engine/output_processor/multi_step.py +216 -0
  179. vllm/engine/output_processor/single_step.py +145 -0
  180. vllm/engine/output_processor/stop_checker.py +131 -0
  181. vllm/engine/output_processor/util.py +28 -0
  182. vllm/engine/protocol.py +326 -0
  183. vllm/entrypoints/__init__.py +0 -0
  184. vllm/entrypoints/api_server.py +178 -0
  185. vllm/entrypoints/chat_utils.py +1278 -0
  186. vllm/entrypoints/cli/__init__.py +12 -0
  187. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  188. vllm/entrypoints/cli/benchmark/base.py +25 -0
  189. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  190. vllm/entrypoints/cli/benchmark/main.py +58 -0
  191. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  192. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  193. vllm/entrypoints/cli/collect_env.py +36 -0
  194. vllm/entrypoints/cli/main.py +71 -0
  195. vllm/entrypoints/cli/openai.py +201 -0
  196. vllm/entrypoints/cli/run_batch.py +69 -0
  197. vllm/entrypoints/cli/serve.py +265 -0
  198. vllm/entrypoints/cli/types.py +29 -0
  199. vllm/entrypoints/launcher.py +147 -0
  200. vllm/entrypoints/llm.py +1599 -0
  201. vllm/entrypoints/logger.py +50 -0
  202. vllm/entrypoints/openai/__init__.py +0 -0
  203. vllm/entrypoints/openai/api_server.py +1495 -0
  204. vllm/entrypoints/openai/cli_args.py +331 -0
  205. vllm/entrypoints/openai/logits_processors.py +90 -0
  206. vllm/entrypoints/openai/protocol.py +2096 -0
  207. vllm/entrypoints/openai/run_batch.py +473 -0
  208. vllm/entrypoints/openai/serving_chat.py +1258 -0
  209. vllm/entrypoints/openai/serving_classification.py +160 -0
  210. vllm/entrypoints/openai/serving_completion.py +618 -0
  211. vllm/entrypoints/openai/serving_embedding.py +201 -0
  212. vllm/entrypoints/openai/serving_engine.py +988 -0
  213. vllm/entrypoints/openai/serving_models.py +315 -0
  214. vllm/entrypoints/openai/serving_pooling.py +234 -0
  215. vllm/entrypoints/openai/serving_score.py +431 -0
  216. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  217. vllm/entrypoints/openai/serving_transcription.py +132 -0
  218. vllm/entrypoints/openai/speech_to_text.py +395 -0
  219. vllm/entrypoints/openai/tool_parsers/__init__.py +25 -0
  220. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  221. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  222. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  223. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  224. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  225. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  226. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  227. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  228. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  229. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +369 -0
  230. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  231. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  232. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  233. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  234. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +466 -0
  235. vllm/entrypoints/score_utils.py +50 -0
  236. vllm/entrypoints/ssl.py +75 -0
  237. vllm/entrypoints/utils.py +262 -0
  238. vllm/env_override.py +41 -0
  239. vllm/envs.py +1029 -0
  240. vllm/executor/__init__.py +0 -0
  241. vllm/executor/executor_base.py +401 -0
  242. vllm/executor/mp_distributed_executor.py +244 -0
  243. vllm/executor/msgspec_utils.py +30 -0
  244. vllm/executor/multiproc_worker_utils.py +313 -0
  245. vllm/executor/ray_distributed_executor.py +701 -0
  246. vllm/executor/ray_utils.py +399 -0
  247. vllm/executor/uniproc_executor.py +139 -0
  248. vllm/forward_context.py +185 -0
  249. vllm/inputs/__init__.py +41 -0
  250. vllm/inputs/data.py +331 -0
  251. vllm/inputs/parse.py +151 -0
  252. vllm/inputs/preprocess.py +924 -0
  253. vllm/inputs/registry.py +245 -0
  254. vllm/jsontree.py +80 -0
  255. vllm/logger.py +212 -0
  256. vllm/logging_utils/__init__.py +8 -0
  257. vllm/logging_utils/dump_input.py +81 -0
  258. vllm/logging_utils/formatter.py +18 -0
  259. vllm/logits_process.py +119 -0
  260. vllm/lora/__init__.py +0 -0
  261. vllm/lora/fully_sharded_layers.py +355 -0
  262. vllm/lora/layers.py +1285 -0
  263. vllm/lora/lora.py +199 -0
  264. vllm/lora/models.py +818 -0
  265. vllm/lora/ops/__init__.py +0 -0
  266. vllm/lora/ops/torch_ops/__init__.py +16 -0
  267. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  268. vllm/lora/ops/triton_ops/__init__.py +12 -0
  269. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  270. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  271. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  272. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  273. vllm/lora/ops/triton_ops/utils.py +120 -0
  274. vllm/lora/ops/xla_ops/__init__.py +7 -0
  275. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  276. vllm/lora/peft_helper.py +136 -0
  277. vllm/lora/punica_wrapper/__init__.py +10 -0
  278. vllm/lora/punica_wrapper/punica_base.py +485 -0
  279. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  280. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  281. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  284. vllm/lora/punica_wrapper/utils.py +164 -0
  285. vllm/lora/request.py +99 -0
  286. vllm/lora/resolver.py +85 -0
  287. vllm/lora/utils.py +240 -0
  288. vllm/lora/worker_manager.py +256 -0
  289. vllm/model_executor/__init__.py +16 -0
  290. vllm/model_executor/custom_op.py +208 -0
  291. vllm/model_executor/guided_decoding/__init__.py +181 -0
  292. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  293. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  294. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  295. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  296. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  297. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  298. vllm/model_executor/guided_decoding/utils.py +242 -0
  299. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  300. vllm/model_executor/layers/__init__.py +0 -0
  301. vllm/model_executor/layers/activation.py +420 -0
  302. vllm/model_executor/layers/fused_moe/__init__.py +78 -0
  303. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +298 -0
  304. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +140 -0
  305. vllm/model_executor/layers/fused_moe/config.py +456 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  475. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +215 -0
  476. vllm/model_executor/layers/fused_moe/cutlass_moe.py +645 -0
  477. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +250 -0
  478. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +231 -0
  479. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +183 -0
  480. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1021 -0
  481. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +234 -0
  482. vllm/model_executor/layers/fused_moe/fused_moe.py +1734 -0
  483. vllm/model_executor/layers/fused_moe/layer.py +1528 -0
  484. vllm/model_executor/layers/fused_moe/modular_kernel.py +598 -0
  485. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +224 -0
  486. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  487. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  488. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  489. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +233 -0
  490. vllm/model_executor/layers/fused_moe/prepare_finalize.py +66 -0
  491. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +429 -0
  492. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +136 -0
  493. vllm/model_executor/layers/fused_moe/utils.py +144 -0
  494. vllm/model_executor/layers/layernorm.py +287 -0
  495. vllm/model_executor/layers/lightning_attn.py +652 -0
  496. vllm/model_executor/layers/linear.py +1547 -0
  497. vllm/model_executor/layers/logits_processor.py +197 -0
  498. vllm/model_executor/layers/mamba/__init__.py +0 -0
  499. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  500. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  501. vllm/model_executor/layers/mamba/mamba_mixer2.py +731 -0
  502. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  503. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  504. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  505. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  506. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  507. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  508. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  509. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  510. vllm/model_executor/layers/pooler.py +473 -0
  511. vllm/model_executor/layers/quantization/__init__.py +160 -0
  512. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  513. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  514. vllm/model_executor/layers/quantization/awq.py +228 -0
  515. vllm/model_executor/layers/quantization/awq_marlin.py +523 -0
  516. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  517. vllm/model_executor/layers/quantization/base_config.py +164 -0
  518. vllm/model_executor/layers/quantization/bitblas.py +462 -0
  519. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  520. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  521. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +694 -0
  522. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1613 -0
  523. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  524. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  525. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  526. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  527. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  528. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +149 -0
  529. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  530. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  531. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  532. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  533. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  534. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  535. vllm/model_executor/layers/quantization/deepgemm.py +83 -0
  536. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  537. vllm/model_executor/layers/quantization/experts_int8.py +204 -0
  538. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  539. vllm/model_executor/layers/quantization/fp8.py +950 -0
  540. vllm/model_executor/layers/quantization/gguf.py +577 -0
  541. vllm/model_executor/layers/quantization/gptq.py +278 -0
  542. vllm/model_executor/layers/quantization/gptq_bitblas.py +446 -0
  543. vllm/model_executor/layers/quantization/gptq_marlin.py +679 -0
  544. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  545. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  546. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  547. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  548. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  549. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  550. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  551. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  552. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  553. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +132 -0
  554. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  555. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  556. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  557. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  558. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  559. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  560. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  561. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  562. vllm/model_executor/layers/quantization/marlin.py +263 -0
  563. vllm/model_executor/layers/quantization/modelopt.py +747 -0
  564. vllm/model_executor/layers/quantization/moe_wna16.py +457 -0
  565. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  566. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  567. vllm/model_executor/layers/quantization/qqq.py +275 -0
  568. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  569. vllm/model_executor/layers/quantization/quark/quark.py +437 -0
  570. vllm/model_executor/layers/quantization/quark/quark_moe.py +245 -0
  571. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  572. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  573. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  574. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +157 -0
  575. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  576. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  577. vllm/model_executor/layers/quantization/rtn.py +289 -0
  578. vllm/model_executor/layers/quantization/schema.py +86 -0
  579. vllm/model_executor/layers/quantization/torchao.py +212 -0
  580. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  581. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  582. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  583. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/fp8_utils.py +653 -0
  787. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  788. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  789. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  790. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  791. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  792. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  793. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  794. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  795. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  796. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  797. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  798. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +146 -0
  799. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  800. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  801. vllm/model_executor/layers/rejection_sampler.py +406 -0
  802. vllm/model_executor/layers/resampler.py +270 -0
  803. vllm/model_executor/layers/rotary_embedding.py +2025 -0
  804. vllm/model_executor/layers/sampler.py +1204 -0
  805. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  806. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  807. vllm/model_executor/layers/utils.py +116 -0
  808. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  809. vllm/model_executor/model_loader/__init__.py +77 -0
  810. vllm/model_executor/model_loader/base_loader.py +43 -0
  811. vllm/model_executor/model_loader/bitsandbytes_loader.py +613 -0
  812. vllm/model_executor/model_loader/default_loader.py +282 -0
  813. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  814. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  815. vllm/model_executor/model_loader/neuron.py +476 -0
  816. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  817. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  818. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  819. vllm/model_executor/model_loader/tensorizer.py +602 -0
  820. vllm/model_executor/model_loader/tensorizer_loader.py +127 -0
  821. vllm/model_executor/model_loader/tpu.py +113 -0
  822. vllm/model_executor/model_loader/utils.py +315 -0
  823. vllm/model_executor/model_loader/weight_utils.py +782 -0
  824. vllm/model_executor/models/__init__.py +30 -0
  825. vllm/model_executor/models/adapters.py +375 -0
  826. vllm/model_executor/models/aimv2.py +246 -0
  827. vllm/model_executor/models/arctic.py +559 -0
  828. vllm/model_executor/models/aria.py +670 -0
  829. vllm/model_executor/models/aya_vision.py +486 -0
  830. vllm/model_executor/models/baichuan.py +474 -0
  831. vllm/model_executor/models/bamba.py +558 -0
  832. vllm/model_executor/models/bart.py +938 -0
  833. vllm/model_executor/models/bert.py +513 -0
  834. vllm/model_executor/models/bert_with_rope.py +617 -0
  835. vllm/model_executor/models/blip.py +339 -0
  836. vllm/model_executor/models/blip2.py +728 -0
  837. vllm/model_executor/models/bloom.py +373 -0
  838. vllm/model_executor/models/chameleon.py +1146 -0
  839. vllm/model_executor/models/chatglm.py +478 -0
  840. vllm/model_executor/models/clip.py +407 -0
  841. vllm/model_executor/models/commandr.py +471 -0
  842. vllm/model_executor/models/config.py +200 -0
  843. vllm/model_executor/models/constant_size_cache.py +137 -0
  844. vllm/model_executor/models/dbrx.py +472 -0
  845. vllm/model_executor/models/deepseek.py +486 -0
  846. vllm/model_executor/models/deepseek_mtp.py +281 -0
  847. vllm/model_executor/models/deepseek_v2.py +935 -0
  848. vllm/model_executor/models/deepseek_vl2.py +660 -0
  849. vllm/model_executor/models/dots1.py +536 -0
  850. vllm/model_executor/models/eagle.py +261 -0
  851. vllm/model_executor/models/ernie45.py +43 -0
  852. vllm/model_executor/models/ernie45_moe.py +583 -0
  853. vllm/model_executor/models/exaone.py +551 -0
  854. vllm/model_executor/models/fairseq2_llama.py +154 -0
  855. vllm/model_executor/models/falcon.py +510 -0
  856. vllm/model_executor/models/falcon_h1.py +708 -0
  857. vllm/model_executor/models/florence2.py +1113 -0
  858. vllm/model_executor/models/fuyu.py +406 -0
  859. vllm/model_executor/models/gemma.py +427 -0
  860. vllm/model_executor/models/gemma2.py +427 -0
  861. vllm/model_executor/models/gemma3.py +535 -0
  862. vllm/model_executor/models/gemma3_mm.py +729 -0
  863. vllm/model_executor/models/gemma3n.py +811 -0
  864. vllm/model_executor/models/glm.py +23 -0
  865. vllm/model_executor/models/glm4.py +305 -0
  866. vllm/model_executor/models/glm4_1v.py +1590 -0
  867. vllm/model_executor/models/glm4v.py +657 -0
  868. vllm/model_executor/models/gpt2.py +382 -0
  869. vllm/model_executor/models/gpt_bigcode.py +335 -0
  870. vllm/model_executor/models/gpt_j.py +339 -0
  871. vllm/model_executor/models/gpt_neox.py +332 -0
  872. vllm/model_executor/models/granite.py +493 -0
  873. vllm/model_executor/models/granite_speech.py +790 -0
  874. vllm/model_executor/models/granitemoe.py +437 -0
  875. vllm/model_executor/models/granitemoehybrid.py +653 -0
  876. vllm/model_executor/models/granitemoeshared.py +341 -0
  877. vllm/model_executor/models/gritlm.py +224 -0
  878. vllm/model_executor/models/grok1.py +546 -0
  879. vllm/model_executor/models/h2ovl.py +549 -0
  880. vllm/model_executor/models/hunyuan_v1_moe.py +897 -0
  881. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  882. vllm/model_executor/models/idefics3.py +786 -0
  883. vllm/model_executor/models/interfaces.py +681 -0
  884. vllm/model_executor/models/interfaces_base.py +164 -0
  885. vllm/model_executor/models/intern_vit.py +480 -0
  886. vllm/model_executor/models/internlm2.py +455 -0
  887. vllm/model_executor/models/internlm2_ve.py +147 -0
  888. vllm/model_executor/models/internvl.py +1432 -0
  889. vllm/model_executor/models/jais.py +373 -0
  890. vllm/model_executor/models/jamba.py +592 -0
  891. vllm/model_executor/models/keye.py +1736 -0
  892. vllm/model_executor/models/kimi_vl.py +585 -0
  893. vllm/model_executor/models/llama.py +644 -0
  894. vllm/model_executor/models/llama4.py +531 -0
  895. vllm/model_executor/models/llama_eagle.py +165 -0
  896. vllm/model_executor/models/llama_eagle3.py +263 -0
  897. vllm/model_executor/models/llava.py +887 -0
  898. vllm/model_executor/models/llava_next.py +604 -0
  899. vllm/model_executor/models/llava_next_video.py +492 -0
  900. vllm/model_executor/models/llava_onevision.py +985 -0
  901. vllm/model_executor/models/mamba.py +273 -0
  902. vllm/model_executor/models/mamba2.py +320 -0
  903. vllm/model_executor/models/mamba_cache.py +76 -0
  904. vllm/model_executor/models/medusa.py +219 -0
  905. vllm/model_executor/models/mimo.py +192 -0
  906. vllm/model_executor/models/mimo_mtp.py +285 -0
  907. vllm/model_executor/models/minicpm.py +592 -0
  908. vllm/model_executor/models/minicpm3.py +230 -0
  909. vllm/model_executor/models/minicpm_eagle.py +391 -0
  910. vllm/model_executor/models/minicpmo.py +772 -0
  911. vllm/model_executor/models/minicpmv.py +1307 -0
  912. vllm/model_executor/models/minimax_cache.py +36 -0
  913. vllm/model_executor/models/minimax_text_01.py +1301 -0
  914. vllm/model_executor/models/minimax_vl_01.py +374 -0
  915. vllm/model_executor/models/mistral3.py +624 -0
  916. vllm/model_executor/models/mixtral.py +488 -0
  917. vllm/model_executor/models/mixtral_quant.py +453 -0
  918. vllm/model_executor/models/mllama.py +1682 -0
  919. vllm/model_executor/models/mllama4.py +947 -0
  920. vllm/model_executor/models/mlp_speculator.py +206 -0
  921. vllm/model_executor/models/modernbert.py +339 -0
  922. vllm/model_executor/models/module_mapping.py +72 -0
  923. vllm/model_executor/models/molmo.py +1576 -0
  924. vllm/model_executor/models/moonvit.py +630 -0
  925. vllm/model_executor/models/mpt.py +331 -0
  926. vllm/model_executor/models/nemotron.py +508 -0
  927. vllm/model_executor/models/nemotron_h.py +588 -0
  928. vllm/model_executor/models/nemotron_nas.py +484 -0
  929. vllm/model_executor/models/nvlm_d.py +216 -0
  930. vllm/model_executor/models/olmo.py +389 -0
  931. vllm/model_executor/models/olmo2.py +414 -0
  932. vllm/model_executor/models/olmoe.py +468 -0
  933. vllm/model_executor/models/opt.py +412 -0
  934. vllm/model_executor/models/orion.py +349 -0
  935. vllm/model_executor/models/ovis.py +577 -0
  936. vllm/model_executor/models/paligemma.py +419 -0
  937. vllm/model_executor/models/persimmon.py +344 -0
  938. vllm/model_executor/models/phi.py +356 -0
  939. vllm/model_executor/models/phi3.py +19 -0
  940. vllm/model_executor/models/phi3_small.py +465 -0
  941. vllm/model_executor/models/phi3v.py +733 -0
  942. vllm/model_executor/models/phi4mm.py +1258 -0
  943. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  944. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  945. vllm/model_executor/models/phimoe.py +674 -0
  946. vllm/model_executor/models/pixtral.py +1329 -0
  947. vllm/model_executor/models/plamo2.py +738 -0
  948. vllm/model_executor/models/prithvi_geospatial_mae.py +240 -0
  949. vllm/model_executor/models/qwen.py +362 -0
  950. vllm/model_executor/models/qwen2.py +501 -0
  951. vllm/model_executor/models/qwen2_5_omni_thinker.py +923 -0
  952. vllm/model_executor/models/qwen2_5_vl.py +1175 -0
  953. vllm/model_executor/models/qwen2_audio.py +420 -0
  954. vllm/model_executor/models/qwen2_moe.py +540 -0
  955. vllm/model_executor/models/qwen2_rm.py +122 -0
  956. vllm/model_executor/models/qwen2_vl.py +1513 -0
  957. vllm/model_executor/models/qwen3.py +325 -0
  958. vllm/model_executor/models/qwen3_moe.py +541 -0
  959. vllm/model_executor/models/qwen_vl.py +796 -0
  960. vllm/model_executor/models/registry.py +634 -0
  961. vllm/model_executor/models/roberta.py +271 -0
  962. vllm/model_executor/models/siglip.py +524 -0
  963. vllm/model_executor/models/skyworkr1v.py +961 -0
  964. vllm/model_executor/models/smolvlm.py +52 -0
  965. vllm/model_executor/models/solar.py +506 -0
  966. vllm/model_executor/models/stablelm.py +343 -0
  967. vllm/model_executor/models/starcoder2.py +356 -0
  968. vllm/model_executor/models/tarsier.py +652 -0
  969. vllm/model_executor/models/telechat2.py +140 -0
  970. vllm/model_executor/models/teleflm.py +79 -0
  971. vllm/model_executor/models/transformers.py +509 -0
  972. vllm/model_executor/models/ultravox.py +670 -0
  973. vllm/model_executor/models/utils.py +744 -0
  974. vllm/model_executor/models/vision.py +147 -0
  975. vllm/model_executor/models/whisper.py +886 -0
  976. vllm/model_executor/models/zamba2.py +1036 -0
  977. vllm/model_executor/parameter.py +459 -0
  978. vllm/model_executor/pooling_metadata.py +72 -0
  979. vllm/model_executor/sampling_metadata.py +597 -0
  980. vllm/model_executor/utils.py +80 -0
  981. vllm/multimodal/__init__.py +33 -0
  982. vllm/multimodal/audio.py +116 -0
  983. vllm/multimodal/base.py +219 -0
  984. vllm/multimodal/hasher.py +91 -0
  985. vllm/multimodal/image.py +103 -0
  986. vllm/multimodal/inputs.py +878 -0
  987. vllm/multimodal/parse.py +499 -0
  988. vllm/multimodal/processing.py +1948 -0
  989. vllm/multimodal/profiling.py +283 -0
  990. vllm/multimodal/registry.py +331 -0
  991. vllm/multimodal/utils.py +492 -0
  992. vllm/multimodal/video.py +227 -0
  993. vllm/outputs.py +516 -0
  994. vllm/platforms/__init__.py +291 -0
  995. vllm/platforms/cpu.py +281 -0
  996. vllm/platforms/cuda.py +568 -0
  997. vllm/platforms/hpu.py +106 -0
  998. vllm/platforms/interface.py +551 -0
  999. vllm/platforms/neuron.py +150 -0
  1000. vllm/platforms/rocm.py +453 -0
  1001. vllm/platforms/tpu.py +206 -0
  1002. vllm/platforms/xpu.py +192 -0
  1003. vllm/plugins/__init__.py +94 -0
  1004. vllm/plugins/lora_resolvers/README.md +15 -0
  1005. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1006. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1007. vllm/pooling_params.py +64 -0
  1008. vllm/profiler/__init__.py +0 -0
  1009. vllm/profiler/layerwise_profile.py +375 -0
  1010. vllm/profiler/utils.py +148 -0
  1011. vllm/prompt_adapter/__init__.py +0 -0
  1012. vllm/prompt_adapter/layers.py +83 -0
  1013. vllm/prompt_adapter/models.py +358 -0
  1014. vllm/prompt_adapter/request.py +37 -0
  1015. vllm/prompt_adapter/utils.py +98 -0
  1016. vllm/prompt_adapter/worker_manager.py +179 -0
  1017. vllm/py.typed +2 -0
  1018. vllm/reasoning/__init__.py +15 -0
  1019. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  1020. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1021. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1022. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1023. vllm/sampling_params.py +602 -0
  1024. vllm/scalar_type.py +347 -0
  1025. vllm/scripts.py +15 -0
  1026. vllm/sequence.py +1568 -0
  1027. vllm/spec_decode/__init__.py +0 -0
  1028. vllm/spec_decode/batch_expansion.py +506 -0
  1029. vllm/spec_decode/draft_model_runner.py +349 -0
  1030. vllm/spec_decode/interfaces.py +99 -0
  1031. vllm/spec_decode/medusa_worker.py +138 -0
  1032. vllm/spec_decode/metrics.py +213 -0
  1033. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1034. vllm/spec_decode/mqa_scorer.py +160 -0
  1035. vllm/spec_decode/multi_step_worker.py +423 -0
  1036. vllm/spec_decode/ngram_worker.py +196 -0
  1037. vllm/spec_decode/proposer_worker_base.py +59 -0
  1038. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1039. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1040. vllm/spec_decode/target_model_runner.py +45 -0
  1041. vllm/spec_decode/top1_proposer.py +275 -0
  1042. vllm/spec_decode/util.py +277 -0
  1043. vllm/test_utils.py +130 -0
  1044. vllm/third_party/__init__.py +0 -0
  1045. vllm/third_party/pynvml.py +6140 -0
  1046. vllm/tracing.py +131 -0
  1047. vllm/transformers_utils/__init__.py +24 -0
  1048. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1049. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1050. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1051. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1052. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1053. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1054. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1055. vllm/transformers_utils/config.py +922 -0
  1056. vllm/transformers_utils/configs/__init__.py +57 -0
  1057. vllm/transformers_utils/configs/arctic.py +207 -0
  1058. vllm/transformers_utils/configs/chatglm.py +72 -0
  1059. vllm/transformers_utils/configs/cohere2.py +195 -0
  1060. vllm/transformers_utils/configs/dbrx.py +280 -0
  1061. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1062. vllm/transformers_utils/configs/eagle.py +85 -0
  1063. vllm/transformers_utils/configs/exaone.py +190 -0
  1064. vllm/transformers_utils/configs/falcon.py +90 -0
  1065. vllm/transformers_utils/configs/jais.py +238 -0
  1066. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1067. vllm/transformers_utils/configs/medusa.py +63 -0
  1068. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1069. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1070. vllm/transformers_utils/configs/mllama.py +31 -0
  1071. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1072. vllm/transformers_utils/configs/moonvit.py +33 -0
  1073. vllm/transformers_utils/configs/mpt.py +180 -0
  1074. vllm/transformers_utils/configs/nemotron.py +205 -0
  1075. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1076. vllm/transformers_utils/configs/nvlm_d.py +31 -0
  1077. vllm/transformers_utils/configs/ovis.py +184 -0
  1078. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1079. vllm/transformers_utils/configs/solar.py +247 -0
  1080. vllm/transformers_utils/configs/telechat2.py +64 -0
  1081. vllm/transformers_utils/configs/ultravox.py +108 -0
  1082. vllm/transformers_utils/detokenizer.py +168 -0
  1083. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1084. vllm/transformers_utils/processor.py +221 -0
  1085. vllm/transformers_utils/processors/__init__.py +8 -0
  1086. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1087. vllm/transformers_utils/processors/ovis.py +420 -0
  1088. vllm/transformers_utils/s3_utils.py +162 -0
  1089. vllm/transformers_utils/tokenizer.py +302 -0
  1090. vllm/transformers_utils/tokenizer_base.py +149 -0
  1091. vllm/transformers_utils/tokenizer_group.py +120 -0
  1092. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1093. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1094. vllm/transformers_utils/utils.py +99 -0
  1095. vllm/triton_utils/__init__.py +14 -0
  1096. vllm/triton_utils/importing.py +94 -0
  1097. vllm/usage/__init__.py +0 -0
  1098. vllm/usage/usage_lib.py +259 -0
  1099. vllm/utils/__init__.py +3008 -0
  1100. vllm/v1/__init__.py +0 -0
  1101. vllm/v1/attention/__init__.py +0 -0
  1102. vllm/v1/attention/backends/__init__.py +0 -0
  1103. vllm/v1/attention/backends/cpu_attn.py +184 -0
  1104. vllm/v1/attention/backends/flash_attn.py +757 -0
  1105. vllm/v1/attention/backends/flashinfer.py +680 -0
  1106. vllm/v1/attention/backends/flex_attention.py +491 -0
  1107. vllm/v1/attention/backends/mamba_attn.py +192 -0
  1108. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1109. vllm/v1/attention/backends/mla/common.py +978 -0
  1110. vllm/v1/attention/backends/mla/cutlass_mla.py +98 -0
  1111. vllm/v1/attention/backends/mla/flashmla.py +180 -0
  1112. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +241 -0
  1113. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1114. vllm/v1/attention/backends/pallas.py +320 -0
  1115. vllm/v1/attention/backends/rocm_aiter_fa.py +609 -0
  1116. vllm/v1/attention/backends/triton_attn.py +449 -0
  1117. vllm/v1/attention/backends/utils.py +310 -0
  1118. vllm/v1/core/__init__.py +0 -0
  1119. vllm/v1/core/block_pool.py +349 -0
  1120. vllm/v1/core/encoder_cache_manager.py +254 -0
  1121. vllm/v1/core/kv_cache_coordinator.py +369 -0
  1122. vllm/v1/core/kv_cache_manager.py +398 -0
  1123. vllm/v1/core/kv_cache_utils.py +999 -0
  1124. vllm/v1/core/sched/__init__.py +0 -0
  1125. vllm/v1/core/sched/interface.py +150 -0
  1126. vllm/v1/core/sched/output.py +157 -0
  1127. vllm/v1/core/sched/request_queue.py +224 -0
  1128. vllm/v1/core/sched/scheduler.py +1115 -0
  1129. vllm/v1/core/sched/utils.py +36 -0
  1130. vllm/v1/core/single_type_kv_cache_manager.py +444 -0
  1131. vllm/v1/engine/__init__.py +179 -0
  1132. vllm/v1/engine/async_llm.py +626 -0
  1133. vllm/v1/engine/coordinator.py +278 -0
  1134. vllm/v1/engine/core.py +1046 -0
  1135. vllm/v1/engine/core_client.py +1049 -0
  1136. vllm/v1/engine/detokenizer.py +292 -0
  1137. vllm/v1/engine/exceptions.py +17 -0
  1138. vllm/v1/engine/llm_engine.py +322 -0
  1139. vllm/v1/engine/logprobs.py +200 -0
  1140. vllm/v1/engine/mm_input_cache.py +91 -0
  1141. vllm/v1/engine/output_processor.py +477 -0
  1142. vllm/v1/engine/parallel_sampling.py +133 -0
  1143. vllm/v1/engine/processor.py +422 -0
  1144. vllm/v1/engine/utils.py +546 -0
  1145. vllm/v1/executor/__init__.py +0 -0
  1146. vllm/v1/executor/abstract.py +113 -0
  1147. vllm/v1/executor/multiproc_executor.py +532 -0
  1148. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1149. vllm/v1/kv_cache_interface.py +223 -0
  1150. vllm/v1/metrics/__init__.py +0 -0
  1151. vllm/v1/metrics/loggers.py +557 -0
  1152. vllm/v1/metrics/prometheus.py +82 -0
  1153. vllm/v1/metrics/ray_wrappers.py +131 -0
  1154. vllm/v1/metrics/reader.py +246 -0
  1155. vllm/v1/metrics/stats.py +240 -0
  1156. vllm/v1/outputs.py +124 -0
  1157. vllm/v1/pool/__init__.py +0 -0
  1158. vllm/v1/pool/metadata.py +17 -0
  1159. vllm/v1/request.py +229 -0
  1160. vllm/v1/sample/__init__.py +0 -0
  1161. vllm/v1/sample/logits_processor.py +517 -0
  1162. vllm/v1/sample/metadata.py +43 -0
  1163. vllm/v1/sample/ops/__init__.py +0 -0
  1164. vllm/v1/sample/ops/bad_words.py +39 -0
  1165. vllm/v1/sample/ops/penalties.py +43 -0
  1166. vllm/v1/sample/ops/topk_topp_sampler.py +296 -0
  1167. vllm/v1/sample/rejection_sampler.py +631 -0
  1168. vllm/v1/sample/sampler.py +226 -0
  1169. vllm/v1/sample/tpu/__init__.py +0 -0
  1170. vllm/v1/sample/tpu/metadata.py +124 -0
  1171. vllm/v1/sample/tpu/sampler.py +145 -0
  1172. vllm/v1/serial_utils.py +315 -0
  1173. vllm/v1/spec_decode/__init__.py +0 -0
  1174. vllm/v1/spec_decode/eagle.py +441 -0
  1175. vllm/v1/spec_decode/medusa.py +64 -0
  1176. vllm/v1/spec_decode/metadata.py +62 -0
  1177. vllm/v1/spec_decode/metrics.py +178 -0
  1178. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1179. vllm/v1/spec_decode/utils.py +41 -0
  1180. vllm/v1/structured_output/__init__.py +227 -0
  1181. vllm/v1/structured_output/backend_guidance.py +245 -0
  1182. vllm/v1/structured_output/backend_types.py +134 -0
  1183. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1184. vllm/v1/structured_output/request.py +86 -0
  1185. vllm/v1/structured_output/utils.py +175 -0
  1186. vllm/v1/utils.py +377 -0
  1187. vllm/v1/worker/__init__.py +0 -0
  1188. vllm/v1/worker/block_table.py +142 -0
  1189. vllm/v1/worker/cpu_model_runner.py +91 -0
  1190. vllm/v1/worker/cpu_worker.py +153 -0
  1191. vllm/v1/worker/gpu_input_batch.py +757 -0
  1192. vllm/v1/worker/gpu_model_runner.py +2739 -0
  1193. vllm/v1/worker/gpu_worker.py +408 -0
  1194. vllm/v1/worker/lora_model_runner_mixin.py +177 -0
  1195. vllm/v1/worker/tpu_input_batch.py +585 -0
  1196. vllm/v1/worker/tpu_model_runner.py +1849 -0
  1197. vllm/v1/worker/tpu_worker.py +315 -0
  1198. vllm/v1/worker/utils.py +112 -0
  1199. vllm/v1/worker/worker_base.py +65 -0
  1200. vllm/v1/worker/xpu_model_runner.py +33 -0
  1201. vllm/v1/worker/xpu_worker.py +165 -0
  1202. vllm/version.py +41 -0
  1203. vllm/vllm_flash_attn/.gitkeep +0 -0
  1204. vllm/worker/__init__.py +0 -0
  1205. vllm/worker/cache_engine.py +145 -0
  1206. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1207. vllm/worker/cpu_model_runner.py +671 -0
  1208. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1209. vllm/worker/cpu_worker.py +452 -0
  1210. vllm/worker/enc_dec_model_runner.py +555 -0
  1211. vllm/worker/hpu_model_runner.py +2320 -0
  1212. vllm/worker/hpu_worker.py +484 -0
  1213. vllm/worker/model_runner.py +2178 -0
  1214. vllm/worker/model_runner_base.py +282 -0
  1215. vllm/worker/multi_step_hpu_worker.py +123 -0
  1216. vllm/worker/multi_step_model_runner.py +911 -0
  1217. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1218. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1219. vllm/worker/multi_step_tpu_worker.py +108 -0
  1220. vllm/worker/multi_step_worker.py +197 -0
  1221. vllm/worker/neuron_model_runner.py +460 -0
  1222. vllm/worker/neuron_worker.py +193 -0
  1223. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1224. vllm/worker/pooling_model_runner.py +211 -0
  1225. vllm/worker/tpu_model_runner.py +909 -0
  1226. vllm/worker/tpu_worker.py +337 -0
  1227. vllm/worker/utils.py +53 -0
  1228. vllm/worker/worker.py +577 -0
  1229. vllm/worker/worker_base.py +646 -0
  1230. vllm/worker/xpu_model_runner.py +606 -0
  1231. vllm/worker/xpu_worker.py +186 -0
  1232. vllm_cpu-0.9.2.post2.dist-info/METADATA +339 -0
  1233. vllm_cpu-0.9.2.post2.dist-info/RECORD +1236 -0
  1234. vllm_cpu-0.9.2.post2.dist-info/WHEEL +5 -0
  1235. vllm_cpu-0.9.2.post2.dist-info/entry_points.txt +5 -0
  1236. vllm_cpu-0.9.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1849 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import bisect
4
+ import gc
5
+ import time
6
+ from typing import TYPE_CHECKING, Optional, cast
7
+ from unittest.mock import patch
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn as nn
12
+ # TPU XLA related
13
+ import torch_xla.core.xla_model as xm
14
+ import torch_xla.distributed.spmd as xs
15
+ import torch_xla.runtime as xr
16
+
17
+ import vllm.envs as envs
18
+ from vllm.attention.backends.abstract import AttentionType
19
+ from vllm.attention.layer import Attention
20
+ from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher
21
+ from vllm.config import ParallelConfig, VllmConfig, get_layers_from_vllm_config
22
+ from vllm.forward_context import set_forward_context
23
+ from vllm.logger import init_logger
24
+ from vllm.lora.layers import BaseLayerWithLoRA
25
+ from vllm.model_executor.model_loader import get_model_loader
26
+ from vllm.model_executor.model_loader.tpu import TPUModelLoader
27
+ from vllm.multimodal import MULTIMODAL_REGISTRY
28
+ from vllm.multimodal.inputs import (BatchedTensorInputs, MultiModalKwargs,
29
+ PlaceholderRange)
30
+ from vllm.multimodal.utils import group_mm_inputs_by_modality
31
+ from vllm.sequence import IntermediateTensors
32
+ from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, cdiv,
33
+ is_pin_memory_available)
34
+ from vllm.v1.attention.backends.pallas import (PallasAttentionBackend,
35
+ PallasMetadata)
36
+ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
37
+ from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
38
+ KVCacheConfig, KVCacheSpec,
39
+ SlidingWindowSpec)
40
+ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsLists,
41
+ LogprobsTensors, ModelRunnerOutput)
42
+ from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
43
+ from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler
44
+ from vllm.v1.utils import bind_kv_cache
45
+ from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
46
+ from vllm.v1.worker.tpu_input_batch import CachedRequestState, InputBatch
47
+
48
+ from .utils import (initialize_kv_cache_for_kv_sharing,
49
+ sanity_check_mm_encoder_outputs)
50
+
51
+ if TYPE_CHECKING:
52
+ from vllm.v1.core.sched.output import SchedulerOutput
53
+
54
+ logger = init_logger(__name__)
55
+
56
+ INVALID_TOKEN_ID = -1
57
+ # Smallest output size
58
+ MIN_NUM_SEQS = 8
59
+ # Block size used for kv cache updating kernel
60
+ NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK = 8
61
+
62
+
63
+ #########################################################
64
+ # Ways to avoid recompilation
65
+ #########################################################
66
+ #
67
+ # The model executor has two primary components:
68
+ # 1. preparing the model and sampler inputs
69
+ # 2. executing the model and sampler.
70
+ # The core idea is to avoid any TPU computation during input preparation. For
71
+ # better compilation tracking and increased flexibility, the model execution and
72
+ # sampler are divided into several distinct components.
73
+ #
74
+ # Below are the detailed steps:
75
+ #
76
+ # Step 1
77
+ # It is recommended to avoid TPU operations when preparing the model and sampler
78
+ # inputs. CPU tensors can be prepared and transferred to the XLA device using
79
+ # cpu_tensor.to(xla_device), which only triggers CPU to TPU transfers and avoids
80
+ # compilation.
81
+ #
82
+ # Step 2
83
+ # The TPU execution should be decomposed into subgraphs (4 at the moment):
84
+ # 1. the main model
85
+ # 2. selecting hidden states for each request
86
+ # 3. sampler
87
+ # 4. encoder.
88
+ # Each subgraph should be decorated in a torch.compile. This is used to make
89
+ # sure that we have the same subgraph topology in both dummy_run and
90
+ # xecute_model. The results from these subgraphs should either be passed to
91
+ # other subgraphs, or transferred from TPU to CPU using xla_tensor.cpu() for
92
+ # subsequent processing on the CPU.
93
+ #
94
+ # Step 3
95
+ # The dummy_run should be comprehensive, ensuring all potential input shapes and
96
+ # branch predictions are included as subgraph inputs to facilitate
97
+ # pre-compilation.
98
+ class TPUModelRunner(LoRAModelRunnerMixin):
99
+
100
+ def __init__(
101
+ self,
102
+ vllm_config: VllmConfig,
103
+ device: torch.device,
104
+ original_parallel_config: Optional[ParallelConfig] = None,
105
+ ):
106
+ self.vllm_config = vllm_config
107
+ self.model_config = vllm_config.model_config
108
+ self.cache_config = vllm_config.cache_config
109
+ self.lora_config = vllm_config.lora_config
110
+ self.load_config = vllm_config.load_config
111
+ self.parallel_config = vllm_config.parallel_config
112
+ self.original_parallel_config = original_parallel_config
113
+ self.scheduler_config = vllm_config.scheduler_config
114
+ self.speculative_config = vllm_config.speculative_config
115
+ self.prompt_adapter_config = vllm_config.prompt_adapter_config
116
+ self.observability_config = vllm_config.observability_config
117
+ self.device_config = vllm_config.device_config
118
+
119
+ model_config = self.model_config
120
+ cache_config = self.cache_config
121
+ scheduler_config = self.scheduler_config
122
+ parallel_config = self.parallel_config
123
+ self.device = device
124
+ self.check_recompilation = envs.VLLM_XLA_CHECK_RECOMPILATION
125
+
126
+ # SPMD Related
127
+ self.use_spmd = envs.VLLM_XLA_USE_SPMD
128
+ if self.use_spmd:
129
+ num_devices = xr.global_runtime_device_count()
130
+ mesh_shape = (num_devices, 1)
131
+ device_ids = np.array(range(num_devices))
132
+ self.mesh = xs.Mesh(device_ids, mesh_shape, ('x', 'y'))
133
+
134
+ self.enforce_eager = model_config.enforce_eager
135
+
136
+ self.num_xla_graphs = 0
137
+ self._update_num_xla_graphs("init")
138
+
139
+ self.pin_memory = is_pin_memory_available()
140
+ self.dtype = self.model_config.dtype
141
+ if cache_config.cache_dtype == "auto":
142
+ self.kv_cache_dtype = self.dtype
143
+ else:
144
+ self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
145
+ cache_config.cache_dtype]
146
+ self._hidden_states_dtype = self.dtype
147
+
148
+ self.is_multimodal_model = model_config.is_multimodal_model
149
+ self.sliding_window = model_config.get_sliding_window()
150
+ self.block_size = cache_config.block_size
151
+ self.max_model_len = model_config.max_model_len
152
+ self.most_model_len = envs.VLLM_TPU_MOST_MODEL_LEN
153
+ self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
154
+ self.num_blocks_per_most_len_req = cdiv(
155
+ self.most_model_len,
156
+ self.block_size) if self.most_model_len is not None else None
157
+ # InputBatch needs to work with sampling tensors greater than padding
158
+ # to avoid dynamic shapes. Also, avoid suboptimal alignment.
159
+ self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS)
160
+ self.num_tokens_paddings = _get_token_paddings(
161
+ min_token_size=16,
162
+ max_token_size=scheduler_config.max_num_batched_tokens,
163
+ padding_gap=envs.VLLM_TPU_BUCKET_PADDING_GAP)
164
+ # In case `max_num_tokens < max(num_tokens_paddings)` use the actual
165
+ # padded max value to pre-allocate data structures and pre-compile.
166
+ self.max_num_tokens = self.num_tokens_paddings[-1]
167
+
168
+ # Model-related.
169
+ self.num_attn_layers = model_config.get_num_layers_by_block_type(
170
+ parallel_config, LayerBlockType.attention)
171
+ self.num_query_heads = model_config.get_num_attention_heads(
172
+ parallel_config)
173
+ self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
174
+ self.head_size = model_config.get_head_size()
175
+ self.hidden_size = model_config.get_hidden_size()
176
+ self.vocab_size = model_config.get_vocab_size()
177
+
178
+ if self.lora_config is not None:
179
+ self.vocab_size += self.lora_config.lora_extra_vocab_size
180
+
181
+ # Multi-modal data support
182
+ self.mm_registry = MULTIMODAL_REGISTRY
183
+ self.uses_mrope = model_config.uses_mrope
184
+ # TODO: Support M-RoPE (e.g, Qwen2-VL)
185
+ assert not self.uses_mrope, "TPU does not support M-RoPE yet."
186
+
187
+ encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
188
+ model_config=model_config,
189
+ scheduler_config=scheduler_config,
190
+ mm_registry=self.mm_registry,
191
+ )
192
+ self.max_num_encoder_input_tokens = encoder_compute_budget
193
+ self.encoder_cache_size = encoder_cache_size
194
+
195
+ # Lazy initialization
196
+ self.model: nn.Module # Set after load_model
197
+ self.kv_caches: list[torch.Tensor] = []
198
+ # req_id -> (input_id -> encoder_output)
199
+ self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
200
+
201
+ # Request states.
202
+ self.requests: dict[str, CachedRequestState] = {}
203
+
204
+ # Initialize input batch early to avoid AttributeError in _update_states
205
+ self.input_batch = InputBatch(
206
+ max_num_reqs=self.max_num_reqs,
207
+ max_model_len=self.max_model_len,
208
+ max_num_batched_tokens=self.max_num_tokens,
209
+ device=self.device,
210
+ pin_memory=self.pin_memory,
211
+ vocab_size=self.model_config.get_vocab_size(),
212
+ block_sizes=[self.block_size],
213
+ )
214
+
215
+ # Cached torch/numpy tensor
216
+ # The pytorch tensor and numpy array share the same buffer.
217
+ # Sometimes the numpy op is faster so we create both.
218
+ self.input_ids_cpu = torch.zeros(self.max_num_tokens,
219
+ dtype=torch.int32,
220
+ device="cpu")
221
+
222
+ self.positions_cpu = torch.zeros(self.max_num_tokens,
223
+ dtype=torch.int32,
224
+ device="cpu")
225
+ self.positions_np = self.positions_cpu.numpy()
226
+ self.block_table_cpu = torch.zeros(
227
+ (self.max_num_reqs, self.max_num_blocks_per_req),
228
+ dtype=torch.int32,
229
+ device="cpu")
230
+ # adjust num_reqs to avoid SMEM OOM.
231
+ self.num_reqs_most_model_len = min(
232
+ PallasAttentionBackend.get_max_num_seqs(self.most_model_len,
233
+ self.block_size),
234
+ self.max_num_reqs) if self.most_model_len is not None else None
235
+ self.num_reqs_max_model_len = min(
236
+ PallasAttentionBackend.get_max_num_seqs(self.max_model_len,
237
+ self.block_size),
238
+ self.max_num_reqs)
239
+ self.query_start_loc_cpu = torch.zeros(self.max_num_tokens + 1,
240
+ dtype=torch.int32,
241
+ device="cpu",
242
+ pin_memory=self.pin_memory)
243
+ self.query_start_loc_np = self.query_start_loc_cpu.numpy()
244
+
245
+ self.seq_lens_cpu = torch.zeros(self.max_num_tokens,
246
+ dtype=torch.int32,
247
+ device="cpu",
248
+ pin_memory=self.pin_memory)
249
+ self.seq_lens_np = self.seq_lens_cpu.numpy()
250
+
251
+ # Range tensor with values [0 .. self.max_num_tokens - 1].
252
+ # Used to initialize positions / context_lens / seq_lens
253
+ # Keep in int64 to avoid overflow with long context
254
+ self.arange_np = np.arange(self.max_num_tokens, dtype=np.int64)
255
+ self.num_reqs_paddings = _get_req_paddings(
256
+ min_req_size=MIN_NUM_SEQS, max_req_size=self.max_num_reqs)
257
+
258
+ # Layer pairings for cross-layer KV sharing.
259
+ # If an Attention layer `layer_name` is in the keys of this dict, it
260
+ # means this layer will perform attention using the keys and values
261
+ # from the KV cache of `shared_kv_cache_layers[layer_name]`.
262
+ self.shared_kv_cache_layers: dict[str, str] = {}
263
+
264
+ # tensors for structured decoding
265
+ self.grammar_bitmask_cpu = torch.zeros(
266
+ (self.max_num_reqs, cdiv(self.vocab_size, 32)),
267
+ dtype=torch.int32,
268
+ device="cpu",
269
+ pin_memory=self.pin_memory)
270
+ self.require_structured_out_cpu = torch.zeros(
271
+ (self.max_num_reqs, 1),
272
+ dtype=torch.bool,
273
+ device="cpu",
274
+ pin_memory=self.pin_memory)
275
+ self.structured_decode_arange = torch.arange(
276
+ 0, 32, device="cpu", pin_memory=self.pin_memory)
277
+
278
+ # Get maximum number of mm items per modality (batch size).
279
+ self.max_num_mm_items_by_modality = dict()
280
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
281
+ and self.encoder_cache_size > 0):
282
+ max_tokens_by_modality_dict = (
283
+ MULTIMODAL_REGISTRY.
284
+ get_max_tokens_per_item_by_nonzero_modality(self.model_config))
285
+ for modality, max_tokens in max_tokens_by_modality_dict.items():
286
+ # Check how many items of this modality can be supported by
287
+ # the encoder budget.
288
+ encoder_budget = min(self.max_num_encoder_input_tokens,
289
+ self.encoder_cache_size)
290
+
291
+ max_num_mm_items_encoder_budget = cdiv(encoder_budget,
292
+ max_tokens)
293
+
294
+ # Check how many items of this modality can be supported by
295
+ # the decoder budget.
296
+ max_mm_items_per_req = self.mm_registry.\
297
+ get_mm_limits_per_prompt(self.model_config)[modality]
298
+
299
+ # NOTE: We do not consider max_num_batched_tokens on purpose
300
+ # because the multimodal embeddings can be generated in advance
301
+ # and chunked prefilled.
302
+ max_num_mm_items_decoder_budget = self.max_num_reqs * \
303
+ max_mm_items_per_req
304
+
305
+ max_num_mm_items = min(max_num_mm_items_encoder_budget,
306
+ max_num_mm_items_decoder_budget)
307
+ self.max_num_mm_items_by_modality[modality] = max_num_mm_items
308
+
309
+ if not self.use_spmd:
310
+ self.sample_from_logits_func = torch.compile(
311
+ self.sample_from_logits,
312
+ backend="openxla",
313
+ fullgraph=True,
314
+ dynamic=False)
315
+ else:
316
+ self.sample_from_logits_func = self.sample_from_logits
317
+
318
+ def _update_num_xla_graphs(self, case_str):
319
+ check_comp = self.check_recompilation and not self.enforce_eager
320
+ if not check_comp:
321
+ return
322
+
323
+ total_cached_graphs = xr.get_num_cached_compilation_graph()
324
+ new_compiled_graphs = total_cached_graphs - self.num_xla_graphs
325
+ if new_compiled_graphs == 0:
326
+ return
327
+
328
+ logger.info("Add new %d compiled XLA graphs due to %s",
329
+ new_compiled_graphs, case_str)
330
+ self.num_xla_graphs += new_compiled_graphs
331
+
332
+ def _verify_num_xla_graphs(self, case_str):
333
+ check_comp = self.check_recompilation and not self.enforce_eager
334
+ if not check_comp:
335
+ return
336
+
337
+ curr_cached_graph = xr.get_num_cached_compilation_graph()
338
+ assert self.num_xla_graphs == curr_cached_graph, (
339
+ "Recompilation after warm up is detected during {}."
340
+ " num_xla_graphs = {} curr_cached_graph = {}".format(
341
+ case_str, self.num_xla_graphs, curr_cached_graph))
342
+
343
+ def _update_states(self, scheduler_output: "SchedulerOutput") -> bool:
344
+ """Update the cached states and the persistent batch with the scheduler
345
+ output.
346
+
347
+ The updated states are used by the `_prepare_inputs` function to create
348
+ the input GPU tensors for the model.
349
+
350
+ Returns:
351
+ True if there is a new/resumed/paused/finished request.
352
+ If False, we can skip copying SamplingMetadata to the GPU.
353
+ """
354
+ # Remove finished requests from the cached states.
355
+ for req_id in scheduler_output.finished_req_ids:
356
+ self.requests.pop(req_id, None)
357
+ self.encoder_cache.pop(req_id, None)
358
+
359
+ # Remove the finished requests from the persistent batch.
360
+ # NOTE(woosuk): There could be an edge case where finished_req_ids and
361
+ # scheduled_req_ids overlap. This happens when a request is aborted and
362
+ # then resubmitted with the same ID. In this case, we treat them as two
363
+ # distinct requests - clearing the cached states for the first request
364
+ # and handling the second as a new request.
365
+ removed_req_indices: list[int] = []
366
+ for req_id in scheduler_output.finished_req_ids:
367
+ req_index = self.input_batch.remove_request(req_id)
368
+ if req_index is not None:
369
+ removed_req_indices.append(req_index)
370
+
371
+ # Free the cached encoder outputs.
372
+ for req_id, input_id in scheduler_output.free_encoder_input_ids:
373
+ encoder_outputs = self.encoder_cache.get(req_id)
374
+ if encoder_outputs is not None:
375
+ encoder_outputs.pop(input_id, None)
376
+ if not encoder_outputs:
377
+ self.encoder_cache.pop(req_id, None)
378
+
379
+ # Remove the unscheduled requests from the persistent batch.
380
+ # NOTE(woosuk): The unscheduled requests are either preempted requests
381
+ # or running requests that are not scheduled in this step. We remove
382
+ # them from the persistent batch but keep their cached states since
383
+ # they will be scheduled again sometime in the future.
384
+ scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
385
+ cached_req_ids = self.input_batch.req_id_to_index.keys()
386
+ unscheduled_req_ids = cached_req_ids - scheduled_req_ids
387
+ # NOTE(woosuk): The persistent batch optimization assumes that
388
+ # consecutive batches contain mostly the same requests. If batches
389
+ # have low request overlap (e.g., alternating between two distinct
390
+ # sets of requests), this optimization becomes very inefficient.
391
+ for req_id in unscheduled_req_ids:
392
+ req_index = self.input_batch.remove_request(req_id)
393
+ assert req_index is not None
394
+ removed_req_indices.append(req_index)
395
+
396
+ req_ids_to_add: list[str] = []
397
+ # Add new requests to the cached states.
398
+ for new_req_data in scheduler_output.scheduled_new_reqs:
399
+ assert new_req_data.sampling_params is not None,\
400
+ "Pooling is not supported in TPU yet"
401
+ req_id = new_req_data.req_id
402
+ sampling_params = new_req_data.sampling_params
403
+
404
+ self.requests[req_id] = CachedRequestState(
405
+ req_id=req_id,
406
+ prompt_token_ids=new_req_data.prompt_token_ids,
407
+ mm_inputs=new_req_data.mm_inputs,
408
+ mm_positions=new_req_data.mm_positions,
409
+ sampling_params=sampling_params,
410
+ pooling_params=None,
411
+ generator=None,
412
+ block_ids=new_req_data.block_ids,
413
+ num_computed_tokens=new_req_data.num_computed_tokens,
414
+ output_token_ids=[],
415
+ lora_request=new_req_data.lora_request,
416
+ )
417
+
418
+ req_ids_to_add.append(req_id)
419
+
420
+ # Update the states of the running/resumed requests.
421
+ req_data = scheduler_output.scheduled_cached_reqs
422
+ for i, req_id in enumerate(req_data.req_ids):
423
+ req_state = self.requests[req_id]
424
+ num_computed_tokens = req_data.num_computed_tokens[i]
425
+ new_block_ids = req_data.new_block_ids[i]
426
+ resumed_from_preemption = req_data.resumed_from_preemption[i]
427
+
428
+ # Update the cached states.
429
+ req_state.num_computed_tokens = num_computed_tokens
430
+ if not resumed_from_preemption:
431
+ # Append the new blocks to the existing block IDs.
432
+ for block_ids, new_ids in zip(req_state.block_ids,
433
+ new_block_ids):
434
+ block_ids.extend(new_ids)
435
+ else:
436
+ # The request is resumed from preemption.
437
+ # Replace the existing block IDs with the new ones.
438
+ req_state.block_ids = new_block_ids
439
+
440
+ req_index = self.input_batch.req_id_to_index.get(req_id)
441
+ if req_index is None:
442
+ # The request is not in the persistent batch.
443
+ # The request was either preempted and resumed later, or was not
444
+ # scheduled in the previous step and needs to be added again.
445
+ req_ids_to_add.append(req_id)
446
+ continue
447
+
448
+ # Update the persistent batch.
449
+ self.input_batch.num_computed_tokens_cpu[req_index] = (
450
+ num_computed_tokens)
451
+ self.input_batch.block_table.append_row(new_block_ids, req_index)
452
+
453
+ # Add the new or resumed requests to the persistent batch.
454
+ # The smaller empty indices are filled first.
455
+ removed_req_indices = sorted(removed_req_indices, reverse=True)
456
+ for req_id in req_ids_to_add:
457
+ req_state = self.requests[req_id]
458
+ if removed_req_indices:
459
+ # Fill the empty index.
460
+ req_index = removed_req_indices.pop()
461
+ else:
462
+ # Append to the end.
463
+ req_index = None
464
+ self.input_batch.add_request(req_state, req_index)
465
+
466
+ # Condense the batched states if there are empty indices.
467
+ if removed_req_indices:
468
+ self.input_batch.condense(removed_req_indices)
469
+
470
+ return len(unscheduled_req_ids) > 0 or len(req_ids_to_add) > 0
471
+
472
+ def get_model(self) -> nn.Module:
473
+ return self.model
474
+
475
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
476
+ """
477
+ Generates the KVCacheSpec by parsing the kv cache format from each
478
+ Attention module in the static forward context.
479
+ Returns:
480
+ KVCacheSpec: A dictionary mapping layer names to their KV cache
481
+ format. Layers that do not need KV cache are not included.
482
+ """
483
+
484
+ layers = get_layers_from_vllm_config(self.vllm_config, Attention)
485
+ block_size = self.vllm_config.cache_config.block_size
486
+ kv_cache_spec: dict[str, KVCacheSpec] = {}
487
+ for layer_name, attn_module in layers.items():
488
+ if (kv_tgt_layer :=
489
+ attn_module.kv_sharing_target_layer_name) is not None:
490
+ # The layer doesn't need its own KV cache and will use that of
491
+ # the target layer. We skip creating a KVCacheSpec for it, so
492
+ # that KV cache management logic will act as this layer does
493
+ # not exist, and doesn't allocate KV cache for the layer. This
494
+ # enables the memory saving of cross-layer kv sharing, allowing
495
+ # a given amount of memory to accommodate longer context lengths
496
+ # or enable more requests to be processed simultaneously.
497
+ self.shared_kv_cache_layers[layer_name] = kv_tgt_layer
498
+ continue
499
+
500
+ if attn_module.attn_type == AttentionType.DECODER:
501
+ if attn_module.sliding_window is not None:
502
+ kv_cache_spec[layer_name] = SlidingWindowSpec(
503
+ block_size=block_size,
504
+ num_kv_heads=attn_module.num_kv_heads,
505
+ head_size=attn_module.head_size,
506
+ dtype=self.kv_cache_dtype,
507
+ sliding_window=attn_module.sliding_window,
508
+ use_mla=False,
509
+ )
510
+ else:
511
+ kv_cache_spec[layer_name] = FullAttentionSpec(
512
+ block_size=block_size,
513
+ num_kv_heads=attn_module.num_kv_heads,
514
+ head_size=attn_module.head_size,
515
+ dtype=self.kv_cache_dtype,
516
+ use_mla=False,
517
+ )
518
+ elif attn_module.attn_type in (AttentionType.ENCODER,
519
+ AttentionType.ENCODER_ONLY):
520
+ # encoder-only attention does not need KV cache.
521
+ continue
522
+ elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
523
+ raise NotImplementedError
524
+ else:
525
+ raise ValueError(
526
+ f"Unknown attention type: {attn_module.attn_type}")
527
+
528
+ return kv_cache_spec
529
+
530
+ def _get_slot_mapping_metadata(self, num_reqs,
531
+ num_scheduled_tokens_per_req):
532
+ """
533
+ Computes metadata for mapping slots to blocks in the key-value (KV)
534
+ cache for a batch of requests.
535
+
536
+ This function determines, for each request in the batch, how the
537
+ scheduled tokens are distributed across memory blocks, and generates
538
+ metadata needed to map slices of tokens to their corresponding positions
539
+ in the KV cache.
540
+
541
+ Args:
542
+ num_reqs (int): Number of requests in the current batch.
543
+ num_scheduled_tokens_per_req (int or np.ndarray): Number of tokens
544
+ to be scheduled for each request.
545
+
546
+ Returns:
547
+ np.ndarray: A 2D array of shape (total_block_len, 3), where each row
548
+ contains:
549
+ - kv_cache_start_index (int): The starting index in the KV cache
550
+ for the corresponding slice.
551
+ - new_kv_start_index (int): The starting index in the new KV
552
+ cache for the corresponding slice.
553
+ - slice_len (int): The length of the slice.
554
+ """
555
+ slices_start = self.input_batch.num_computed_tokens_cpu[:num_reqs]
556
+ slices_end = self.input_batch.num_computed_tokens_cpu[:num_reqs] + \
557
+ num_scheduled_tokens_per_req
558
+ local_block_start_idx = slices_start // self.block_size
559
+ local_block_end_idx = (slices_end - 1) // self.block_size
560
+ no_repeat_req_indices = self.arange_np[:num_reqs]
561
+ global_block_start_idx = (
562
+ no_repeat_req_indices * self.max_num_blocks_per_req +
563
+ local_block_start_idx)
564
+ block_lens = local_block_end_idx - local_block_start_idx + 1
565
+ global_block_start_idx = np.repeat(global_block_start_idx, block_lens)
566
+ slice_arange = np.concatenate([self.arange_np[:n] for n in block_lens])
567
+ global_block_indices = global_block_start_idx + slice_arange
568
+ block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor()
569
+ block_numbers = block_table_cpu.flatten()[global_block_indices].numpy()
570
+ total_block_len = np.sum(block_lens)
571
+ slot_mapping_slices = np.repeat(np.array([[0, self.block_size]],
572
+ dtype=np.int32),
573
+ total_block_len,
574
+ axis=0)
575
+ cu_block_lens = np.zeros(len(block_lens) + 1, dtype=np.int32)
576
+ np.cumsum(block_lens, out=cu_block_lens[1:])
577
+ for req_idx in range(num_reqs):
578
+ slot_mapping_slices[cu_block_lens[req_idx]][
579
+ 0] = slices_start[req_idx] % self.block_size
580
+ slot_mapping_slices[
581
+ cu_block_lens[req_idx + 1] -
582
+ 1][1] = (slices_end[req_idx] - 1) % self.block_size + 1
583
+ slice_lens = slot_mapping_slices[:, 1] - slot_mapping_slices[:, 0]
584
+ cu_slices_lens = np.zeros(len(slice_lens) + 1, dtype=np.int32)
585
+ np.cumsum(slice_lens, out=cu_slices_lens[1:])
586
+ kv_cache_start_indices = slot_mapping_slices[:, 0] + \
587
+ (block_numbers * self.block_size)
588
+ new_kv_start_indices = cu_slices_lens[:-1]
589
+ slot_mapping_metadata = np.stack(
590
+ [kv_cache_start_indices, new_kv_start_indices, slice_lens], axis=1)
591
+ return slot_mapping_metadata
592
+
593
+ def _prepare_inputs(self, scheduler_output: "SchedulerOutput",
594
+ start_index: int):
595
+ assert scheduler_output.total_num_scheduled_tokens > 0
596
+ num_reqs = self.input_batch.num_reqs
597
+ assert num_reqs > 0
598
+ assert start_index < num_reqs
599
+
600
+ # Get the number of scheduled tokens for each request.
601
+ use_max_model_len = self.most_model_len is None
602
+ num_scheduled_tokens_per_req = []
603
+ max_num_scheduled_tokens_all_reqs = 0
604
+ end_index = start_index
605
+
606
+ # Use either most_model_len or max_model_len depending on request size.
607
+ for i in range(start_index, num_reqs):
608
+ req_id = self.input_batch.req_ids[i]
609
+ assert req_id is not None
610
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
611
+ if not use_max_model_len and num_tokens > self.most_model_len:
612
+ use_max_model_len = True
613
+ num_scheduled_tokens_per_req.append(num_tokens)
614
+ if use_max_model_len:
615
+ if len(num_scheduled_tokens_per_req) > self.num_reqs_max_model_len:
616
+ num_scheduled_tokens_per_req = \
617
+ num_scheduled_tokens_per_req[:self.num_reqs_max_model_len]
618
+ end_index = start_index + self.num_reqs_max_model_len
619
+ else:
620
+ end_index = num_reqs
621
+ else:
622
+ if len(num_scheduled_tokens_per_req
623
+ ) > self.num_reqs_most_model_len:
624
+ num_scheduled_tokens_per_req = \
625
+ num_scheduled_tokens_per_req[:self.num_reqs_most_model_len]
626
+ end_index = start_index + self.num_reqs_most_model_len
627
+ else:
628
+ end_index = num_reqs
629
+ max_num_scheduled_tokens_all_reqs = max(num_scheduled_tokens_per_req)
630
+ num_scheduled_tokens_per_req = np.array(num_scheduled_tokens_per_req,
631
+ dtype=np.int32)
632
+ total_num_scheduled_tokens = sum(num_scheduled_tokens_per_req)
633
+ assert max_num_scheduled_tokens_all_reqs > 0
634
+
635
+ num_reqs = len(num_scheduled_tokens_per_req)
636
+
637
+ # Get request indices.
638
+ # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
639
+ # For each scheduled token, what are the corresponding req index.
640
+ req_indices = np.repeat(self.arange_np[:num_reqs],
641
+ num_scheduled_tokens_per_req)
642
+
643
+ # Get batched arange.
644
+ # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
645
+ # For each scheduled token, what is its position in corresponding req.
646
+ arange = np.concatenate(
647
+ [self.arange_np[:n] for n in num_scheduled_tokens_per_req])
648
+
649
+ # Get positions.
650
+ positions_np = self.positions_np[:total_num_scheduled_tokens]
651
+ np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
652
+ arange,
653
+ out=positions_np)
654
+
655
+ # Get token indices.
656
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
657
+ # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
658
+ # where M is the max_model_len.
659
+ token_indices = (positions_np +
660
+ req_indices * self.input_batch.token_ids_cpu.shape[1])
661
+
662
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
663
+ # because torch.index_select is much faster than np.take for large
664
+ # tensors.
665
+ torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
666
+ 0,
667
+ torch.from_numpy(token_indices),
668
+ out=self.input_ids_cpu[:total_num_scheduled_tokens])
669
+
670
+ # Prepare the attention metadata.
671
+ self.query_start_loc_np[0] = 0
672
+ np.cumsum(num_scheduled_tokens_per_req,
673
+ out=self.query_start_loc_np[1:num_reqs + 1])
674
+ self.query_start_loc_np[num_reqs + 1:] = 1
675
+
676
+ self.seq_lens_np[:num_reqs] = (
677
+ self.input_batch.num_computed_tokens_cpu[:num_reqs] +
678
+ num_scheduled_tokens_per_req)
679
+
680
+ # Do the padding and copy the tensors to the TPU.
681
+ padded_total_num_scheduled_tokens = _get_padded_token_len(
682
+ self.num_tokens_paddings, total_num_scheduled_tokens)
683
+ # Zero out to avoid spurious values from prev iteration (last cp chunk)
684
+ self.input_ids_cpu[
685
+ total_num_scheduled_tokens:padded_total_num_scheduled_tokens] = 0
686
+ self.input_ids = self.input_ids_cpu[:
687
+ padded_total_num_scheduled_tokens].to(
688
+ self.device)
689
+ self.position_ids = self.positions_cpu[:
690
+ padded_total_num_scheduled_tokens].to(
691
+ self.device)
692
+ if use_max_model_len:
693
+ block_tables = self.block_table_cpu[:self.num_reqs_max_model_len, :
694
+ self.max_num_blocks_per_req]
695
+ block_tables[:num_reqs, :self.max_num_blocks_per_req] = (
696
+ self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs])
697
+ query_start_loc = self.query_start_loc_cpu[:self.
698
+ num_reqs_max_model_len +
699
+ 1].to(self.device)
700
+ seq_lens = self.seq_lens_cpu[:self.num_reqs_max_model_len].to(
701
+ self.device)
702
+ else:
703
+ block_tables = self.block_table_cpu[:self.
704
+ num_reqs_most_model_len, :self.
705
+ num_blocks_per_most_len_req]
706
+ block_tables[:num_reqs, :self.num_blocks_per_most_len_req] = (
707
+ self.input_batch.block_table[0].get_cpu_tensor()
708
+ [:num_reqs, :self.num_blocks_per_most_len_req])
709
+ query_start_loc = self.query_start_loc_cpu[:self.
710
+ num_reqs_most_model_len +
711
+ 1].to(self.device)
712
+ seq_lens = self.seq_lens_cpu[:self.num_reqs_most_model_len].to(
713
+ self.device)
714
+ block_tables = block_tables.to(self.device)
715
+
716
+ # Calculate the slot mapping
717
+ slot_mapping_metadata = self._get_slot_mapping_metadata(
718
+ num_reqs, num_scheduled_tokens_per_req)
719
+ num_kv_update_slices = slot_mapping_metadata.shape[0]
720
+ padded_num_slices = _get_padded_num_kv_cache_update_slices(
721
+ padded_total_num_scheduled_tokens, self.max_num_reqs,
722
+ self.block_size)
723
+ slot_mapping_metadata = np.pad(
724
+ slot_mapping_metadata,
725
+ [[0, padded_num_slices - len(slot_mapping_metadata)], [0, 0]],
726
+ constant_values=0)
727
+ slot_mapping_metadata = np.transpose(slot_mapping_metadata)
728
+ slot_mapping_metadata = torch.tensor(slot_mapping_metadata,
729
+ device=self.device)
730
+
731
+ if self.lora_config is not None:
732
+ # We need to respect padding when activating LoRA adapters
733
+ padded_num_scheduled_tokens_per_req = np.copy(
734
+ num_scheduled_tokens_per_req
735
+ ) # Copying to avoid accidental state corruption bugs
736
+ padded_num_scheduled_tokens_per_req[-1] += \
737
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
738
+
739
+ self.set_active_loras(self.input_batch,
740
+ padded_num_scheduled_tokens_per_req)
741
+
742
+ attn_metadata = PallasMetadata(
743
+ slot_mapping=slot_mapping_metadata,
744
+ block_tables=block_tables,
745
+ context_lens=seq_lens,
746
+ query_start_loc=query_start_loc,
747
+ num_seqs=torch.tensor([num_reqs],
748
+ dtype=torch.int32,
749
+ device=self.device),
750
+ num_kv_update_slices=torch.tensor([num_kv_update_slices],
751
+ dtype=torch.int32,
752
+ device=self.device),
753
+ num_slices_per_kv_cache_update_block=
754
+ NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK,
755
+ )
756
+ # NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial
757
+ # request in the batch. While we should not sample any token from this
758
+ # partial request, we do so for simplicity. We will ignore the sampled
759
+ # token from the partial request.
760
+ # TODO: Support prompt logprobs.
761
+ padded_num_reqs = _get_padded_num_reqs_with_upper_limit(
762
+ num_reqs, self.max_num_reqs)
763
+ # Indices at which we sample (positions of last token in the sequence).
764
+ # Padded to avoid recompiling when `num_reqs` varies.
765
+ logits_indices = self.query_start_loc_cpu[1:padded_num_reqs + 1] - 1
766
+ logits_indices = logits_indices.to(self.device)
767
+
768
+ if self.lora_config is not None:
769
+ # We need to respect padding when activating LoRA adapters
770
+ padded_num_scheduled_tokens_per_req = np.copy(
771
+ num_scheduled_tokens_per_req
772
+ ) # Copying to avoid accidental state corruption bugs
773
+ padded_num_scheduled_tokens_per_req[-1] += \
774
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
775
+
776
+ self.set_active_loras(self.input_batch,
777
+ padded_num_scheduled_tokens_per_req)
778
+
779
+ layer_names = get_layers_from_vllm_config(self.vllm_config,
780
+ Attention).keys()
781
+ per_layer_attn_metadata = {
782
+ layer_name: attn_metadata
783
+ for layer_name in layer_names
784
+ }
785
+ return per_layer_attn_metadata, logits_indices, padded_num_reqs,\
786
+ num_reqs, end_index
787
+
788
+ def _scatter_placeholders(
789
+ self,
790
+ embeds: torch.Tensor,
791
+ is_embed: Optional[torch.Tensor],
792
+ ) -> torch.Tensor:
793
+ if is_embed is None:
794
+ return embeds
795
+
796
+ placeholders = embeds.new_full(
797
+ (is_embed.shape[0], embeds.shape[-1]),
798
+ fill_value=torch.nan,
799
+ )
800
+ placeholders[is_embed] = embeds
801
+ return placeholders
802
+
803
+ def _gather_placeholders(
804
+ self,
805
+ placeholders: torch.Tensor,
806
+ is_embed: Optional[torch.Tensor],
807
+ ) -> torch.Tensor:
808
+ if is_embed is None:
809
+ return placeholders
810
+
811
+ return placeholders[is_embed]
812
+
813
+ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
814
+ scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
815
+ if not scheduled_encoder_inputs:
816
+ return
817
+
818
+ # Batch the multi-modal inputs.
819
+ mm_inputs = list[MultiModalKwargs]()
820
+ req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
821
+ for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
822
+ req_state = self.requests[req_id]
823
+
824
+ for mm_input_id in encoder_input_ids:
825
+ mm_inputs.append(req_state.mm_inputs[mm_input_id])
826
+ req_ids_pos.append(
827
+ (req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
828
+
829
+ # Batch mm inputs as much as we can: if a request in the batch has
830
+ # multiple modalities or a different modality than the previous one,
831
+ # we process it separately to preserve item order.
832
+ # FIXME(ywang96): This is a hacky way to deal with multiple modalities
833
+ # in the same batch while still being able to benefit from batching
834
+ # multimodal inputs. The proper solution should be reordering the
835
+ # encoder outputs.
836
+ grouped_mm_inputs_list = group_mm_inputs_by_modality(mm_inputs)
837
+
838
+ encoder_outputs = []
839
+ for grouped_mm_inputs in grouped_mm_inputs_list:
840
+ batched_mm_inputs = MultiModalKwargs.batch(grouped_mm_inputs)
841
+ batched_mm_inputs = MultiModalKwargs.as_kwargs(
842
+ batched_mm_inputs,
843
+ device=self.device,
844
+ )
845
+
846
+ # Run the encoder.
847
+ # `curr_group_outputs` is either of the following:
848
+ # 1. A tensor of shape (num_items, feature_size, hidden_size)
849
+ # in case feature_size is fixed across all multimodal items.
850
+ # 2. A list or tuple (length: num_items) of tensors, each of shape
851
+ # (feature_size, hidden_size) in case the feature size is dynamic
852
+ # depending on the input multimodal items.
853
+ xm.mark_step()
854
+ curr_group_outputs = self.model.get_multimodal_embeddings(
855
+ **batched_mm_inputs)
856
+ xm.mark_step()
857
+
858
+ sanity_check_mm_encoder_outputs(
859
+ curr_group_outputs,
860
+ expected_num_items=len(grouped_mm_inputs),
861
+ )
862
+
863
+ if isinstance(curr_group_outputs, torch.Tensor):
864
+ encoder_outputs.append(curr_group_outputs)
865
+ else:
866
+ assert isinstance(curr_group_outputs, (list, tuple))
867
+ for output in curr_group_outputs:
868
+ encoder_outputs.append(output)
869
+
870
+ # Cache the encoder outputs.
871
+ # NOTE (NickLucche) here we diverge from logic in other runners, as we
872
+ # assume to only have whole mm items to process. Hence we avoid the
873
+ # intrinsic dynamism that `scatter_mm_placeholders` introduces.
874
+ for (req_id, input_id, pos_info), output in zip(
875
+ req_ids_pos,
876
+ encoder_outputs,
877
+ ):
878
+ if req_id not in self.encoder_cache:
879
+ self.encoder_cache[req_id] = {}
880
+ assert pos_info.is_embed is None, "Expected all positions to be"\
881
+ " contiguous and embeddings."
882
+ self.encoder_cache[req_id][input_id] = output
883
+
884
+ def _gather_mm_embeddings(
885
+ self,
886
+ scheduler_output: "SchedulerOutput",
887
+ ) -> list[torch.Tensor]:
888
+ mm_embeds: list[torch.Tensor] = []
889
+ for req_id in self.input_batch.req_ids:
890
+ num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
891
+ req_id]
892
+ req_state = self.requests[req_id]
893
+ num_computed_tokens = req_state.num_computed_tokens
894
+ mm_positions = req_state.mm_positions
895
+ # TODO unroll loop and assume/enforce --disable_chunked_mm_input
896
+ # NOTE (NickLucche) here we diverge from logic in other runners, as
897
+ # we assume to only have whole mm items to process. Hence we avoid
898
+ # the intrinsic dynamism that `gather_mm_placeholders` introduces.
899
+ for i, pos_info in enumerate(mm_positions):
900
+ start_pos = pos_info.offset
901
+ num_encoder_tokens = pos_info.length
902
+
903
+ # The encoder output is needed if the two ranges overlap:
904
+ # [num_computed_tokens,
905
+ # num_computed_tokens + num_scheduled_tokens) and
906
+ # [start_pos, start_pos + num_encoder_tokens)
907
+ if start_pos >= num_computed_tokens + num_scheduled_tokens:
908
+ # The encoder output is not needed in this step.
909
+ break
910
+ if start_pos + num_encoder_tokens <= num_computed_tokens:
911
+ # The encoder output is already processed and stored
912
+ # in the decoder's KV cache.
913
+ continue
914
+
915
+ assert req_id in self.encoder_cache
916
+ assert i in self.encoder_cache[req_id]
917
+ assert pos_info.is_embed is None, "Expected all positions to"\
918
+ " be contiguous and embeddings."
919
+ encoder_output = self.encoder_cache[req_id][i]
920
+ mm_embeds.append(encoder_output)
921
+ return mm_embeds
922
+
923
+ def _get_model_inputs(self, input_ids: torch.Tensor,
924
+ mm_embeds: list[torch.Tensor]):
925
+ if self.is_multimodal_model:
926
+ # NOTE(woosuk): To unify token ids and soft tokens (vision
927
+ # embeddings), we always use embeddings (rather than token ids)
928
+ # as input to the multimodal model, even when the input is text.
929
+ if mm_embeds:
930
+ inputs_embeds = self.model.get_input_embeddings(
931
+ input_ids, mm_embeds)
932
+ else:
933
+ inputs_embeds = self.model.get_input_embeddings(input_ids)
934
+ return None, inputs_embeds
935
+ else:
936
+ # For text-only models, we use token ids as input.
937
+ # While it is possible to use embeddings as input just like the
938
+ # multimodal models, it is not desirable for performance since
939
+ # then the embedding layer is not included in the CUDA graph.
940
+ return input_ids, None
941
+
942
+ @torch.no_grad()
943
+ def execute_model(
944
+ self,
945
+ scheduler_output: "SchedulerOutput",
946
+ intermediate_tensors: Optional[IntermediateTensors] = None,
947
+ ) -> ModelRunnerOutput:
948
+ # Update cached state
949
+ self._update_states(scheduler_output)
950
+ if not scheduler_output.total_num_scheduled_tokens:
951
+ # Return empty ModelRunnerOutput if there's no work to do.
952
+ return EMPTY_MODEL_RUNNER_OUTPUT
953
+
954
+ if self.is_multimodal_model:
955
+ # Run the multimodal encoder if any.
956
+ self._execute_mm_encoder(scheduler_output)
957
+ mm_embeds = self._gather_mm_embeddings(scheduler_output)
958
+ else:
959
+ mm_embeds = []
960
+ xm.mark_step()
961
+ # Prepare inputs, the requests might be splitted into multiple
962
+ # executions, combine the result of each execution.
963
+ start_index = 0
964
+ combined_selected_tokens: list[torch.Tensor] = []
965
+ combined_logprobs: list[LogprobsLists] = []
966
+ while start_index < self.input_batch.num_reqs:
967
+ attn_metadata, logits_indices, padded_num_reqs, num_reqs,\
968
+ end_index = self._prepare_inputs(scheduler_output, start_index)
969
+ input_ids, inputs_embeds = self._get_model_inputs(
970
+ self.input_ids, mm_embeds)
971
+ xm.mark_step()
972
+ # Run the decoder
973
+ with set_forward_context(
974
+ attn_metadata,
975
+ self.vllm_config,
976
+ num_tokens=scheduler_output.total_num_scheduled_tokens):
977
+ hidden_states = self.model(
978
+ input_ids=input_ids,
979
+ positions=self.position_ids,
980
+ inputs_embeds=inputs_embeds,
981
+ )
982
+ hidden_states = self.select_hidden_states(hidden_states,
983
+ logits_indices)
984
+ logits = self.compute_logits(hidden_states)
985
+ tpu_sampling_metadata = TPUSupportedSamplingMetadata.\
986
+ from_input_batch(self.input_batch, padded_num_reqs, self.device)
987
+ if scheduler_output.grammar_bitmask is not None:
988
+ require_struct_decoding, grammar_bitmask_padded, arange = \
989
+ self.prepare_structured_decoding_input(logits,
990
+ scheduler_output)
991
+ logits = self.structured_decode(require_struct_decoding,
992
+ grammar_bitmask_padded, logits,
993
+ arange)
994
+ selected_token_ids = self.sample_from_logits_func(
995
+ logits, tpu_sampling_metadata)
996
+ # NOTE (NickLucche) Use the original logits (before any penalties or
997
+ # temperature scaling) for the top-k logprobs. We can't enforce it
998
+ # due to recompilations outside torch.compiled code, so just make
999
+ # sure `sample_from_logits` does not modify the logits in-place.
1000
+ logprobs = self.gather_logprobs(logits, selected_token_ids) \
1001
+ if tpu_sampling_metadata.logprobs else None
1002
+
1003
+ # Remove padding on cpu and keep dynamic op outside of xla graph.
1004
+ selected_token_ids = selected_token_ids.cpu()[:num_reqs]
1005
+
1006
+ combined_selected_tokens.append(selected_token_ids)
1007
+ if tpu_sampling_metadata.logprobs:
1008
+ combined_logprobs.append(logprobs.tolists())
1009
+
1010
+ start_index = end_index
1011
+
1012
+ selected_token_ids = torch.cat(combined_selected_tokens, dim=0)
1013
+ if tpu_sampling_metadata.logprobs:
1014
+
1015
+ def concat_lists(input_lists):
1016
+ result = []
1017
+ for input_list in input_lists:
1018
+ result.extend(input_list)
1019
+ return result
1020
+
1021
+ logprobs_lists = LogprobsLists(logprob_token_ids=concat_lists(
1022
+ [lp.logprob_token_ids for lp in combined_logprobs]),
1023
+ logprobs=concat_lists([
1024
+ lp.logprobs
1025
+ for lp in combined_logprobs
1026
+ ]),
1027
+ sampled_token_ranks=concat_lists([
1028
+ lp.sampled_token_ranks
1029
+ for lp in combined_logprobs
1030
+ ]))
1031
+ else:
1032
+ logprobs_lists = None
1033
+
1034
+ # Update the cache state concurrently. Code above will not block until
1035
+ # we use `selected_token_ids`. Add mark_step if post-processing changes
1036
+ request_seq_lens: list[tuple[int, CachedRequestState, int]] = []
1037
+ discard_sampled_tokens_req_indices = []
1038
+ num_reqs = self.input_batch.num_reqs
1039
+ for i, req_id in zip(range(num_reqs), self.input_batch.req_ids):
1040
+ assert req_id is not None
1041
+ req_state = self.requests[req_id]
1042
+ seq_len = (req_state.num_computed_tokens +
1043
+ scheduler_output.num_scheduled_tokens[req_id])
1044
+ if seq_len >= req_state.num_tokens:
1045
+ request_seq_lens.append((i, req_state, seq_len))
1046
+ else:
1047
+ # Ignore the sampled token from the partial request.
1048
+ # Rewind the generator state as if the token was not sampled.
1049
+ generator = self.input_batch.generators.get(i)
1050
+ if generator is not None:
1051
+ # This relies on cuda-specific torch-internal impl details
1052
+ generator.set_offset(generator.get_offset() - 4)
1053
+
1054
+ # Record the index of the request that should not be sampled,
1055
+ # so that we could clear the sampled tokens before returning.
1056
+ discard_sampled_tokens_req_indices.append(i)
1057
+
1058
+ assert all(
1059
+ req_id is not None for req_id in
1060
+ self.input_batch.req_ids[:num_reqs]), "req_ids contains None"
1061
+ req_ids = cast(list[str], self.input_batch.req_ids[:num_reqs])
1062
+
1063
+ prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
1064
+ for req_id in self.input_batch.req_ids[:num_reqs]:
1065
+ prompt_logprobs_dict[req_id] = None
1066
+
1067
+ max_gen_len = selected_token_ids.shape[-1]
1068
+ if max_gen_len == 1:
1069
+ valid_sampled_token_ids = selected_token_ids.tolist()
1070
+
1071
+ # Mask out the sampled tokens that should not be sampled.
1072
+ # TODO: Keep in sync with gpu_model_runner.py, in particular
1073
+ # the "else" case here
1074
+ for i in discard_sampled_tokens_req_indices:
1075
+ valid_sampled_token_ids[i].clear()
1076
+
1077
+ # Append sampled tokens
1078
+ for i, req_state, seq_len in request_seq_lens:
1079
+ token_id = valid_sampled_token_ids[i][0]
1080
+ self.input_batch.token_ids_cpu[i, seq_len] = token_id
1081
+ req_state.output_token_ids.append(token_id)
1082
+ self.input_batch.num_tokens[i] += 1
1083
+
1084
+ else:
1085
+ valid_mask = selected_token_ids != INVALID_TOKEN_ID
1086
+ gen_lens = valid_mask.sum(dim=1).tolist()
1087
+ valid_sampled_token_ids = [
1088
+ seq.tolist()
1089
+ for seq in selected_token_ids[valid_mask].split(gen_lens)
1090
+ ]
1091
+ self.input_batch.num_tokens[:num_reqs] += gen_lens
1092
+ for i, req_state, seq_len in request_seq_lens:
1093
+ target_slice = slice(seq_len - gen_lens[i] + 1, seq_len + 1)
1094
+ self.input_batch.token_ids_cpu[
1095
+ i, target_slice] = valid_sampled_token_ids[i]
1096
+ req_state.output_token_ids.extend(valid_sampled_token_ids[i])
1097
+
1098
+ model_runner_output = ModelRunnerOutput(
1099
+ req_ids=req_ids,
1100
+ req_id_to_index=self.input_batch.req_id_to_index,
1101
+ sampled_token_ids=valid_sampled_token_ids,
1102
+ spec_token_ids=None,
1103
+ logprobs=logprobs_lists,
1104
+ prompt_logprobs_dict=prompt_logprobs_dict,
1105
+ pooler_output=[],
1106
+ )
1107
+
1108
+ # Check there are no new graphs compiled - all the graphs should be
1109
+ # captured and compiled during warm up.
1110
+ self._verify_num_xla_graphs("execute_model")
1111
+
1112
+ return model_runner_output
1113
+
1114
+ def load_model(self) -> None:
1115
+ self.device = self.device_config.device
1116
+
1117
+ # NOTE(woosuk): While the executor assigns the TP ranks to the worker
1118
+ # process, the ranks can be different from the ranks internally assigned
1119
+ # by the xm runtime. Therefore, there is a mismatch in the rank
1120
+ # assignment between the gloo (cpu) runtime and the xm (tpu) runtime.
1121
+ # This is not a problem in linear layers because all-reduce is
1122
+ # rank-agnostic. However, it matters for all-gather as the ranks
1123
+ # determine the order of concatenating the output tensors.
1124
+ # As a workaround, we use the xm's rank assignment only when loading
1125
+ # the embedding weights.
1126
+ xm_tp_rank = xr.global_ordinal()
1127
+ with patch(
1128
+ "vllm.model_executor.layers.vocab_parallel_embedding."
1129
+ "get_tensor_model_parallel_rank",
1130
+ return_value=xm_tp_rank):
1131
+ if self.use_spmd:
1132
+ tpu_loader = TPUModelLoader(
1133
+ load_config=self.vllm_config.load_config)
1134
+ model = tpu_loader.load_model(
1135
+ vllm_config=self.vllm_config,
1136
+ model_config=self.vllm_config.model_config,
1137
+ mesh=self.mesh)
1138
+ else:
1139
+ # model = get_model(vllm_config=self.vllm_config)
1140
+ model_loader = get_model_loader(self.load_config)
1141
+ if not hasattr(self, "model"):
1142
+ logger.info("Loading model from scratch...")
1143
+ model = model_loader.load_model(
1144
+ vllm_config=self.vllm_config,
1145
+ model_config=self.model_config)
1146
+ else:
1147
+ logger.info("Model was already initialized. \
1148
+ Loading weights inplace...")
1149
+ model_loader.load_weights(self.model,
1150
+ model_config=self.model_config)
1151
+ if self.lora_config is not None:
1152
+ model = self.load_lora_model(model, self.model_config,
1153
+ self.scheduler_config,
1154
+ self.lora_config, self.device)
1155
+ replace_set_lora(model)
1156
+
1157
+ # Sync all pending XLA execution during model initialization and weight
1158
+ # loading.
1159
+ xm.mark_step()
1160
+ xm.wait_device_ops()
1161
+ if not hasattr(self, "model"):
1162
+ self.model = model
1163
+ self.sampler = TPUSampler()
1164
+
1165
+ @torch.no_grad()
1166
+ def _dummy_run(self, num_tokens: int, num_reqs: int,
1167
+ num_blocks: int) -> None:
1168
+ if self.is_multimodal_model:
1169
+ input_ids = None
1170
+ inputs_embeds = torch.zeros((num_tokens, self.hidden_size),
1171
+ dtype=self.dtype,
1172
+ device=self.device)
1173
+ else:
1174
+ input_ids = torch.zeros((num_tokens),
1175
+ dtype=torch.int32).to(self.device)
1176
+ inputs_embeds = None
1177
+ actual_num_reqs = min(num_tokens, num_reqs)
1178
+ position_ids = torch.zeros(num_tokens,
1179
+ dtype=torch.int32).to(self.device)
1180
+ padded_num_slices = _get_padded_num_kv_cache_update_slices(
1181
+ num_tokens, self.max_num_reqs, self.block_size)
1182
+ num_kv_update_slices = torch.tensor([padded_num_slices],
1183
+ dtype=torch.int32).to(self.device)
1184
+ slot_mapping = torch.zeros((3, padded_num_slices),
1185
+ dtype=torch.int32).to(self.device)
1186
+ block_tables = torch.zeros((num_reqs, num_blocks),
1187
+ dtype=torch.int32).to(self.device)
1188
+ query_lens = [1] * num_reqs
1189
+ query_start_loc = torch.cumsum(torch.tensor([0] + query_lens,
1190
+ dtype=torch.int32),
1191
+ dim=0,
1192
+ dtype=torch.int32).to(self.device)
1193
+ context_lens = torch.ones((num_reqs, ),
1194
+ dtype=torch.int32).to(self.device)
1195
+ num_seqs = torch.tensor([actual_num_reqs],
1196
+ dtype=torch.int32).to(self.device)
1197
+ attn_metadata = PallasMetadata(
1198
+ slot_mapping=slot_mapping,
1199
+ block_tables=block_tables,
1200
+ context_lens=context_lens,
1201
+ query_start_loc=query_start_loc,
1202
+ num_seqs=num_seqs,
1203
+ num_kv_update_slices=num_kv_update_slices,
1204
+ num_slices_per_kv_cache_update_block=
1205
+ NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK,
1206
+ )
1207
+
1208
+ if self.is_multimodal_model:
1209
+ torch._dynamo.mark_dynamic(inputs_embeds, 0)
1210
+ else:
1211
+ torch._dynamo.mark_dynamic(input_ids, 0)
1212
+ torch._dynamo.mark_dynamic(position_ids, 0)
1213
+ torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0)
1214
+ torch._dynamo.mark_dynamic(attn_metadata.block_tables, (0, 1))
1215
+ torch._dynamo.mark_dynamic(attn_metadata.context_lens, 0)
1216
+ torch._dynamo.mark_dynamic(attn_metadata.query_start_loc, 0)
1217
+
1218
+ layer_names = get_layers_from_vllm_config(self.vllm_config,
1219
+ Attention).keys()
1220
+ per_layer_attn_metadata = {
1221
+ layer_name: attn_metadata
1222
+ for layer_name in layer_names
1223
+ }
1224
+
1225
+ with self.maybe_select_dummy_loras(
1226
+ self.lora_config,
1227
+ np.array([num_tokens], dtype=np.int32)), set_forward_context(
1228
+ per_layer_attn_metadata, self.vllm_config, 0):
1229
+ out = self.model(input_ids=input_ids,
1230
+ positions=position_ids,
1231
+ inputs_embeds=inputs_embeds)
1232
+ self._hidden_states_dtype = out.dtype
1233
+
1234
+ def _set_active_loras(self, prompt_lora_mapping, token_lora_mapping,
1235
+ lora_requests) -> None:
1236
+ xm.mark_step() # Captures input updates
1237
+ super()._set_active_loras(prompt_lora_mapping, token_lora_mapping,
1238
+ lora_requests)
1239
+ xm.mark_step() # Captures metadata updates
1240
+
1241
+ def _precompile_mm_encoder(self) -> None:
1242
+ # Pre-compile MM encoder for all supported data modalities.
1243
+ hf_config = self.vllm_config.model_config.hf_config
1244
+ for mode, max_items_by_mode in \
1245
+ self.max_num_mm_items_by_modality.items():
1246
+ logger.info(
1247
+ "Compiling Multimodal %s Encoder with different input"
1248
+ " shapes.", mode)
1249
+ start = time.perf_counter()
1250
+ # No padding for MM encoder just yet.
1251
+ for num_items in range(1, max_items_by_mode + 1):
1252
+ logger.info(" -- mode: %s items: %d", mode, num_items)
1253
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1254
+ mode, num_items)
1255
+ # Run multimodal encoder.
1256
+ xm.mark_step()
1257
+ mm_embeds = self.model.\
1258
+ get_multimodal_embeddings(**batched_dummy_mm_inputs)
1259
+ xm.mark_step()
1260
+ num_patches = mm_embeds[0].shape[0]
1261
+ items_size = num_patches * num_items
1262
+
1263
+ # NOTE (NickLucche) pre-compile `get_input_embeddings` when mm
1264
+ # embeddings are present. We assume `--disable-mm-chunked`,
1265
+ # hence only whole items can be scheduled. This implies we just
1266
+ # need to compile when `num_items` fit the (padded) `input_ids`
1267
+ for num_tokens in self.num_tokens_paddings:
1268
+ if num_tokens >= items_size:
1269
+ # XLA Workaround: if torch.zeros(..device) is used, XLA
1270
+ # compiles a scalar+expansion op, which won't match
1271
+ # the graph generated at runtime. CPU->TPU must be used
1272
+ placeholders_ids = torch.zeros(num_tokens,
1273
+ dtype=torch.int32,
1274
+ device="cpu")
1275
+ # Align placeholders and actual num mm_embeddings.
1276
+ placeholders_ids[:items_size] = \
1277
+ hf_config.image_token_index
1278
+
1279
+ placeholders_ids = placeholders_ids.to(self.device)
1280
+ # Assign outputs or the graph will be cut short.
1281
+ a, b = self._get_model_inputs(placeholders_ids,
1282
+ [mm_embeds])
1283
+ assert a is None
1284
+ xm.mark_step()
1285
+
1286
+ # Pre-compile `get_input_embeddings` when mm_embeddings are not
1287
+ # present. Chunk is only made of text, no mm_placeholders.
1288
+ for num_tokens in self.num_tokens_paddings:
1289
+ placeholders_ids = torch.zeros(num_tokens,
1290
+ dtype=torch.int32,
1291
+ device="cpu")
1292
+ placeholders_ids = placeholders_ids.to(self.device)
1293
+ a, b = self._get_model_inputs(placeholders_ids, [])
1294
+ assert a is None
1295
+ xm.mark_step()
1296
+
1297
+ xm.wait_device_ops()
1298
+ end = time.perf_counter()
1299
+ logger.info(
1300
+ "Multimodal %s Encoder compilation finished in in %.2f "
1301
+ "[secs].", mode, end - start)
1302
+
1303
+ def _precompile_backbone(self) -> None:
1304
+ logger.info("Compiling the model with different input shapes.")
1305
+ start = time.perf_counter()
1306
+ for num_tokens in self.num_tokens_paddings:
1307
+ logger.info(" -- num_tokens: %d", num_tokens)
1308
+ self._dummy_run(num_tokens, self.num_reqs_max_model_len,
1309
+ self.max_num_blocks_per_req)
1310
+ if self.most_model_len is not None:
1311
+ self._dummy_run(num_tokens, self.num_reqs_most_model_len,
1312
+ self.num_blocks_per_most_len_req)
1313
+ xm.wait_device_ops()
1314
+ end = time.perf_counter()
1315
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1316
+ self._update_num_xla_graphs("model backbone")
1317
+
1318
+ def _precompile_select_hidden_states(self) -> None:
1319
+ # Compile hidden state selection function for bucketed
1320
+ # n_tokens x max_num_reqs. Graph is really small so this is fine.
1321
+ logger.info(
1322
+ "Compiling select_hidden_states with different input shapes.")
1323
+ start = time.perf_counter()
1324
+ hsize = self.model_config.get_hidden_size()
1325
+ for num_tokens in self.num_tokens_paddings:
1326
+ dummy_hidden = torch.zeros((num_tokens, hsize),
1327
+ device=self.device,
1328
+ dtype=self._hidden_states_dtype)
1329
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1330
+ for num_reqs in self.num_reqs_paddings:
1331
+ indices = torch.zeros(num_reqs,
1332
+ dtype=torch.int32,
1333
+ device=self.device)
1334
+ torch._dynamo.mark_dynamic(indices, 0)
1335
+ self.select_hidden_states(dummy_hidden, indices)
1336
+ logger.info(" -- num_tokens: %d, num_seqs: %d", num_tokens,
1337
+ num_reqs)
1338
+ # Requests can't be more than tokens. But do compile for the
1339
+ # next bigger value in case num_tokens uses bucketed padding.
1340
+ if num_reqs >= min(num_tokens, self.max_num_reqs):
1341
+ break
1342
+ xm.wait_device_ops()
1343
+ end = time.perf_counter()
1344
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1345
+ self._update_num_xla_graphs("select_hidden_states")
1346
+
1347
+ def _precompile_compute_logits(self) -> None:
1348
+ logger.info("Compiling compute_logits with different input shapes.")
1349
+ start = time.perf_counter()
1350
+ hsize = self.model_config.get_hidden_size()
1351
+ for num_reqs in self.num_reqs_paddings:
1352
+ dummy_hidden = torch.zeros((num_reqs, hsize),
1353
+ device=self.device,
1354
+ dtype=self._hidden_states_dtype)
1355
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1356
+ self.compute_logits(dummy_hidden)
1357
+ logger.info(" -- num_seqs: %d", num_reqs)
1358
+ xm.wait_device_ops()
1359
+ end = time.perf_counter()
1360
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1361
+ self._update_num_xla_graphs("compute_logits")
1362
+
1363
+ def _precompile_structured_decoding(self) -> None:
1364
+ logger.info(
1365
+ "Compiling structured_decoding with different input shapes.")
1366
+ start = time.perf_counter()
1367
+ for num_reqs in self.num_reqs_paddings:
1368
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1369
+ device=self.device,
1370
+ dtype=self._hidden_states_dtype)
1371
+ dummy_require_struct_decoding = \
1372
+ self.require_structured_out_cpu[:num_reqs].to(self.device)
1373
+ dummy_grammar_bitmask = \
1374
+ self.grammar_bitmask_cpu[:num_reqs].to(self.device)
1375
+ # The first dimension of the above 3 dummy tensors cannot be
1376
+ # mark_dynamic because some operations in structured_decode require
1377
+ # them to be static.
1378
+ arange = self.structured_decode_arange.to(self.device)
1379
+ self.structured_decode(dummy_require_struct_decoding,
1380
+ dummy_grammar_bitmask, dummy_logits, arange)
1381
+ logger.info(" -- num_seqs: %d", num_reqs)
1382
+ xm.wait_device_ops()
1383
+ end = time.perf_counter()
1384
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1385
+ self._update_num_xla_graphs("structured_decoding")
1386
+
1387
+ def _precompile_sample_from_logits(self) -> None:
1388
+ logger.info(
1389
+ "Compiling sample_from_logits with different input shapes.")
1390
+ start = time.perf_counter()
1391
+ for num_reqs in self.num_reqs_paddings:
1392
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1393
+ device=self.device,
1394
+ dtype=self._hidden_states_dtype)
1395
+ # The first dimension of dummy_logits cannot be mark_dynamic
1396
+ # because some operations in the sampler require it to be static.
1397
+ for all_greedy in [False, True]:
1398
+ generate_params_if_all_greedy = not all_greedy
1399
+ sampling_metadata = (
1400
+ TPUSupportedSamplingMetadata.from_input_batch(
1401
+ self.input_batch,
1402
+ num_reqs,
1403
+ self.device,
1404
+ generate_params_if_all_greedy,
1405
+ ))
1406
+ sampling_metadata.all_greedy = all_greedy
1407
+ with self.maybe_select_dummy_loras(
1408
+ self.lora_config, np.array([num_reqs],
1409
+ dtype=np.int32)):
1410
+ self.sample_from_logits_func(dummy_logits,
1411
+ sampling_metadata)
1412
+ logger.info(" -- num_seqs: %d", num_reqs)
1413
+ xm.wait_device_ops()
1414
+ end = time.perf_counter()
1415
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1416
+ self._update_num_xla_graphs("sample_from_logits")
1417
+
1418
+ def _precompile_gather_logprobs(self) -> None:
1419
+ logger.info("Compiling gather_logprobs with different input shapes.")
1420
+ start = time.perf_counter()
1421
+ for num_reqs in self.num_reqs_paddings:
1422
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1423
+ device=self.device,
1424
+ dtype=self._hidden_states_dtype)
1425
+ dummy_tokens = torch.zeros((num_reqs, 1),
1426
+ dtype=torch.int64).to(self.device)
1427
+ with self.maybe_select_dummy_loras(
1428
+ self.lora_config, np.array([num_reqs], dtype=np.int32)):
1429
+ self.gather_logprobs(dummy_logits, dummy_tokens)
1430
+ logger.info(" -- num_seqs: %d", num_reqs)
1431
+ xm.wait_device_ops()
1432
+ end = time.perf_counter()
1433
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1434
+ self._update_num_xla_graphs("gather_logprobs")
1435
+
1436
+ def capture_model(self) -> None:
1437
+ """
1438
+ Precompile all the subgraphs with possible input shapes.
1439
+ """
1440
+ with self.maybe_setup_dummy_loras(self.lora_config):
1441
+ self._precompile_mm_encoder()
1442
+ self._precompile_backbone()
1443
+ self._precompile_select_hidden_states()
1444
+ self._precompile_compute_logits()
1445
+ self._precompile_structured_decoding()
1446
+ self._precompile_sample_from_logits()
1447
+ self._precompile_gather_logprobs()
1448
+
1449
+ def profile_run(
1450
+ self,
1451
+ num_tokens: int,
1452
+ ) -> None:
1453
+ # Profile with multimodal encoder & encoder cache.
1454
+ # TODO: handle encoder-decoder models once we support them.
1455
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
1456
+ and self.encoder_cache_size > 0):
1457
+
1458
+ # NOTE: Currently model is profiled with a single non-text
1459
+ # modality with the max possible input tokens even when
1460
+ # it supports multiple.
1461
+ dummy_data_modality, max_num_mm_items = max(
1462
+ self.max_num_mm_items_by_modality.items(), key=lambda t: t[1])
1463
+
1464
+ encoder_budget = min(self.max_num_encoder_input_tokens,
1465
+ self.encoder_cache_size)
1466
+
1467
+ logger.info(
1468
+ "Encoder cache will be initialized with a budget of %d tokens,"
1469
+ " and profiled with %s %s items of the maximum feature size.",
1470
+ encoder_budget, max_num_mm_items, dummy_data_modality)
1471
+
1472
+ # Create dummy batch of multimodal inputs.
1473
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1474
+ dummy_data_modality, max_num_mm_items)
1475
+
1476
+ # Run multimodal encoder.
1477
+ # Isolate encoder graph from post-processing to minimize
1478
+ # impact of recompilation until it's fixed.
1479
+ start = time.perf_counter()
1480
+ xm.mark_step()
1481
+ dummy_encoder_outputs = self.model.get_multimodal_embeddings(
1482
+ **batched_dummy_mm_inputs)
1483
+ xm.mark_step()
1484
+ xm.wait_device_ops()
1485
+ end = time.perf_counter()
1486
+ logger.info(
1487
+ "Multimodal Encoder profiling finished in in %.2f [secs].",
1488
+ end - start)
1489
+
1490
+ assert len(dummy_encoder_outputs) == max_num_mm_items, (
1491
+ "Expected dimension 0 of encoder outputs to match the number "
1492
+ f"of multimodal data items: {max_num_mm_items}, got "
1493
+ f"{len(dummy_encoder_outputs)=} instead. This is most likely "
1494
+ "due to the 'get_multimodal_embeddings' method of the model "
1495
+ "not implemented correctly.")
1496
+
1497
+ # Cache the dummy encoder outputs.
1498
+ self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
1499
+
1500
+ # Trigger compilation for general shape.
1501
+ self._dummy_run(num_tokens, self.num_reqs_max_model_len,
1502
+ self.max_num_blocks_per_req)
1503
+ if self.most_model_len is not None:
1504
+ self._dummy_run(num_tokens, self.num_reqs_most_model_len,
1505
+ self.num_blocks_per_most_len_req)
1506
+
1507
+ xm.mark_step()
1508
+ xm.wait_device_ops()
1509
+ self.encoder_cache.clear()
1510
+ gc.collect()
1511
+
1512
+ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
1513
+ """
1514
+ Initialize KV cache based on `kv_cache_config`.
1515
+ Args:
1516
+ kv_cache_config: Configuration for the KV cache, including the KV
1517
+ cache size of each layer
1518
+ """
1519
+ if len(kv_cache_config.kv_cache_groups) > 1:
1520
+ raise NotImplementedError(
1521
+ "Hybrid models with more than one KV cache type are not "
1522
+ "supported yet.")
1523
+
1524
+ if kv_cache_config.kv_cache_groups[
1525
+ 0].kv_cache_spec.block_size != self.block_size:
1526
+ self.input_batch = InputBatch(
1527
+ max_num_reqs=self.max_num_reqs,
1528
+ max_model_len=self.max_model_len,
1529
+ max_num_batched_tokens=self.max_num_tokens,
1530
+ device=self.device,
1531
+ pin_memory=self.pin_memory,
1532
+ vocab_size=self.model_config.get_vocab_size(),
1533
+ block_sizes=[
1534
+ kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
1535
+ ],
1536
+ )
1537
+ # Verify dtype compatibility between block_table_cpu and input_batch
1538
+ assert self.block_table_cpu.dtype == self.input_batch.block_table[
1539
+ 0].get_cpu_tensor().dtype
1540
+
1541
+ kv_cache_sizes = {}
1542
+ for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
1543
+ assert len(kv_cache_tensor.shared_by) == 1, (
1544
+ "KV cache tensor shared by multiple layers is not supported in "
1545
+ "TPU.")
1546
+ kv_cache_sizes[kv_cache_tensor.shared_by[0]] = kv_cache_tensor.size
1547
+
1548
+ kv_caches: dict[str, torch.Tensor] = {}
1549
+ for kv_cache_group in kv_cache_config.kv_cache_groups:
1550
+ kv_cache_spec = kv_cache_group.kv_cache_spec
1551
+ for layer_name in kv_cache_group.layer_names:
1552
+ tensor_size = kv_cache_sizes[layer_name]
1553
+ assert tensor_size % kv_cache_spec.page_size_bytes == 0
1554
+ num_blocks = tensor_size // kv_cache_spec.page_size_bytes # noqa
1555
+ if isinstance(kv_cache_spec, AttentionSpec):
1556
+ if self.use_spmd:
1557
+ num_kv_heads = kv_cache_spec.num_kv_heads
1558
+ assert self.original_parallel_config is not None
1559
+ tp_size = \
1560
+ self.original_parallel_config.tensor_parallel_size
1561
+ # TODO: Handle kv cache duplication under SPMD mode.
1562
+ assert num_kv_heads % tp_size == 0, (
1563
+ f"num_kv_heads {num_kv_heads} must be divisible by "
1564
+ f"tp_size {tp_size} under SPMD mode")
1565
+ kv_cache_shape = PallasAttentionBackend.get_kv_cache_shape(
1566
+ num_blocks, kv_cache_spec.block_size,
1567
+ kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
1568
+ dtype = kv_cache_spec.dtype
1569
+
1570
+ tpu_kv_cache = torch.zeros(kv_cache_shape,
1571
+ dtype=dtype).to(self.device)
1572
+
1573
+ kv_caches[layer_name] = tpu_kv_cache
1574
+ else:
1575
+ raise NotImplementedError
1576
+
1577
+ # Setup `kv_cache_config` and `kv_caches` for models
1578
+ # with cross-layer KV sharing
1579
+ if self.shared_kv_cache_layers:
1580
+ initialize_kv_cache_for_kv_sharing(
1581
+ self.shared_kv_cache_layers,
1582
+ kv_cache_config.kv_cache_groups,
1583
+ kv_caches,
1584
+ )
1585
+
1586
+ bind_kv_cache(
1587
+ kv_caches,
1588
+ self.vllm_config.compilation_config.static_forward_context,
1589
+ self.kv_caches)
1590
+
1591
+ if self.use_spmd:
1592
+ # Shard KV Cache
1593
+ for cache in self.kv_caches:
1594
+ xs.mark_sharding(cache, self.mesh, (None, 'x', None, None))
1595
+
1596
+ def reset_dynamo_cache(self):
1597
+ if self.is_multimodal_model:
1598
+ compiled_model = self.model.get_language_model().model
1599
+ else:
1600
+ compiled_model = self.model.model
1601
+ if isinstance(compiled_model, TorchCompileWrapperWithCustomDispatcher):
1602
+ logger.info("Clear dynamo cache and cached dynamo bytecode.")
1603
+ torch._dynamo.eval_frame.remove_from_cache(
1604
+ compiled_model.original_code_object)
1605
+ compiled_model.compiled_codes.clear()
1606
+
1607
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1608
+ def select_hidden_states(self, hidden_states, indices_do_sample):
1609
+ return hidden_states[indices_do_sample]
1610
+
1611
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1612
+ def compute_logits(self,
1613
+ sample_hidden_states: torch.Tensor) -> torch.Tensor:
1614
+ return self.model.compute_logits(sample_hidden_states, None)
1615
+
1616
+ # TODO: Under SPMD mode, sample_from_logits has correctness issue.
1617
+ # Re-enable the torch.compile once the issue is fixed in torchxla.
1618
+ # @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1619
+ def sample_from_logits(
1620
+ self, logits: torch.Tensor,
1621
+ sampling_metadata: TPUSupportedSamplingMetadata) -> torch.Tensor:
1622
+ """
1623
+ Sample with xla-friendly function. This function is to be traced
1624
+ separately from `forward` for lighter compilation overhead.
1625
+ """
1626
+ if sampling_metadata.all_greedy:
1627
+ out_tokens = torch.argmax(logits, dim=-1, keepdim=True)
1628
+ else:
1629
+ out_tokens = self.sampler(logits,
1630
+ sampling_metadata).sampled_token_ids
1631
+ return out_tokens
1632
+
1633
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1634
+ def gather_logprobs(self, logits: torch.Tensor,
1635
+ sampled_tokens: torch.Tensor) -> LogprobsTensors:
1636
+ """
1637
+ Gather the top_logprobs with corresponding tokens. Use a fixed number
1638
+ of logprobs as an alternative to having multiple pre-compiled graphs.
1639
+ Select the number of logprobs actually demanded by each request on CPU.
1640
+ """
1641
+ logprobs = self.sampler.compute_logprobs(logits)
1642
+ return self.sampler.gather_logprobs(
1643
+ logprobs,
1644
+ self.model_config.max_logprobs,
1645
+ token_ids=sampled_tokens.squeeze(-1))
1646
+
1647
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1648
+ def structured_decode(self, require_struct_decoding: torch.Tensor,
1649
+ grammar_bitmask: torch.Tensor, logits: torch.Tensor,
1650
+ arange: torch.Tensor) -> torch.Tensor:
1651
+ return torch.where(
1652
+ require_struct_decoding,
1653
+ self.apply_grammar_bitmask(logits, grammar_bitmask, arange),
1654
+ logits)
1655
+
1656
+ def apply_grammar_bitmask(self, logits: torch.Tensor,
1657
+ grammar_bitmask: torch.Tensor,
1658
+ arange: torch.Tensor):
1659
+ assert (logits.shape[0] == grammar_bitmask.shape[0])
1660
+ logits_cloned = logits.clone()
1661
+ for i in range(logits.shape[0]):
1662
+ unpacked_bitmask = (torch.bitwise_right_shift(
1663
+ grammar_bitmask[i][:, None], arange[None, :]) & 1) == 0
1664
+ unpacked_bitmask = unpacked_bitmask.reshape(-1)[:self.vocab_size]
1665
+ logits_cloned[i] = logits_cloned[i].masked_fill(
1666
+ unpacked_bitmask, -float("inf"))
1667
+ return logits_cloned
1668
+
1669
+ def get_multimodal_embeddings(self, *args, **kwargs):
1670
+ return self.model.get_multimodal_embeddings(*args, **kwargs)
1671
+
1672
+ def get_input_embeddings(self, *args, **kwargs):
1673
+ return self.model.get_input_embeddings(*args, **kwargs)
1674
+
1675
+ def prepare_structured_decoding_input(
1676
+ self, logits: torch.Tensor, scheduler_output: "SchedulerOutput"
1677
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
1678
+ grammar_bitmask = scheduler_output.grammar_bitmask
1679
+ assert grammar_bitmask is not None
1680
+ num_reqs, _ = logits.shape
1681
+
1682
+ # Reset pre-allocated tensors
1683
+ self.grammar_bitmask_cpu.zero_()
1684
+ self.require_structured_out_cpu.zero_()
1685
+
1686
+ # We receive the structured output bitmask from the scheduler, but the
1687
+ # indices of the requests in the batch may not match the indices of
1688
+ # the bitmask since the scheduler doesn't know how the tpu runner is
1689
+ # ordering the requests in the batch. We need to match the order of
1690
+ # bitmask with the order of requests
1691
+ struct_out_indices: list[int] = []
1692
+ mask_indices: list[int] = []
1693
+ for req_id in self.input_batch.req_ids:
1694
+ mask_index = scheduler_output.structured_output_request_ids.get(
1695
+ req_id)
1696
+ if mask_index is None:
1697
+ continue
1698
+ batch_index = self.input_batch.req_id_to_index[req_id]
1699
+ struct_out_indices.append(batch_index)
1700
+ mask_indices.append(mask_index)
1701
+ self.grammar_bitmask_cpu[struct_out_indices] = torch.from_numpy(
1702
+ grammar_bitmask[mask_indices])
1703
+ # It's not guaranteed that all requests in this batch require
1704
+ # structured output, so create a bool tensor to represent
1705
+ # the requests that need structured output.
1706
+ struct_out_indices = torch.tensor(struct_out_indices, dtype=torch.long)
1707
+ self.require_structured_out_cpu[struct_out_indices] = True
1708
+ return self.require_structured_out_cpu[:num_reqs].to(logits.device), \
1709
+ self.grammar_bitmask_cpu[:num_reqs].to(logits.device), \
1710
+ self.structured_decode_arange.to(logits.device)
1711
+
1712
+ def _get_mm_dummy_batch(self, modality: str,
1713
+ batch_size: int) -> BatchedTensorInputs:
1714
+ # Dummy data for pre-compiling multimodal models.
1715
+ dummy_request_data = self.mm_registry.get_decoder_dummy_data(
1716
+ model_config=self.model_config,
1717
+ seq_len=self.max_num_tokens,
1718
+ )
1719
+ dummy_mm_data = dummy_request_data.multi_modal_data
1720
+
1721
+ # Dummy data definition in V0 may contain multiple multimodal items
1722
+ # (e.g, multiple images) for a single request, therefore here we
1723
+ # always replicate first item by max_num_mm_items times since in V1
1724
+ # they are scheduled to be processed separately.
1725
+ assert isinstance(dummy_mm_data, MultiModalKwargs), (
1726
+ "Expected dummy multimodal data to be of type "
1727
+ f"MultiModalKwargs, got {type(dummy_mm_data)=} instead. "
1728
+ "This is most likely due to the model not having a merged "
1729
+ "processor.")
1730
+
1731
+ # When models have a merged processor, their dummy data is
1732
+ # already batched `MultiModalKwargs`, therefore we take the first
1733
+ # `MultiModalKwargsItem` from the desired modality to profile on.
1734
+ dummy_mm_item = dummy_mm_data.get_item(modality=modality, item_index=0)
1735
+ dummy_mm_kwargs = MultiModalKwargs.from_items([dummy_mm_item])
1736
+
1737
+ batched_dummy_mm_inputs = MultiModalKwargs.batch([dummy_mm_kwargs] *
1738
+ batch_size)
1739
+ return MultiModalKwargs.as_kwargs(
1740
+ batched_dummy_mm_inputs,
1741
+ device=self.device,
1742
+ )
1743
+
1744
+
1745
+ def _get_req_paddings(min_req_size: int, max_req_size: int) -> list[int]:
1746
+ logger.info("Preparing request paddings:")
1747
+ # assert min_req_size is power of 2
1748
+ assert (min_req_size & (min_req_size - 1) == 0) and min_req_size > 0
1749
+ paddings: list = []
1750
+ num = max(MIN_NUM_SEQS, min_req_size)
1751
+ while num <= max_req_size and (len(paddings) == 0 or paddings[-1] != num):
1752
+ paddings.append(num)
1753
+ logger.info(" %d", num)
1754
+ num = _get_padded_num_reqs_with_upper_limit(num + 1, max_req_size)
1755
+ return paddings
1756
+
1757
+
1758
+ def _get_padded_num_reqs_with_upper_limit(x: int, upper_limit: int) -> int:
1759
+ res = MIN_NUM_SEQS if x <= MIN_NUM_SEQS else 1 << (x - 1).bit_length()
1760
+ return min(res, upper_limit)
1761
+
1762
+
1763
+ def _get_token_paddings(min_token_size: int, max_token_size: int,
1764
+ padding_gap: int) -> list[int]:
1765
+ """Generate a list of padding size, starting from min_token_size,
1766
+ ending with a number that can cover max_token_size
1767
+
1768
+ If padding_gap == 0 then:
1769
+ increase 2X each time (exponential)
1770
+ else:
1771
+ first increase the size to twice,
1772
+ then increase the padding size by padding_gap.
1773
+ """
1774
+ # assert min_token_size is power of 2
1775
+ assert (min_token_size & (min_token_size - 1) == 0) and min_token_size > 0
1776
+ paddings = []
1777
+ num = min_token_size
1778
+
1779
+ if padding_gap == 0:
1780
+ logger.info("Using exponential token paddings:")
1781
+ while True:
1782
+ logger.info(" %d", num)
1783
+ paddings.append(num)
1784
+ if num >= max_token_size:
1785
+ break
1786
+ num *= 2
1787
+ else:
1788
+ logger.info("Using incremental token paddings:")
1789
+ while num <= padding_gap:
1790
+ logger.info(" %d", num)
1791
+ paddings.append(num)
1792
+ num *= 2
1793
+ num //= 2
1794
+ while num < max_token_size:
1795
+ num += padding_gap
1796
+ logger.info(" %d", num)
1797
+ paddings.append(num)
1798
+
1799
+ return paddings
1800
+
1801
+
1802
+ def _get_padded_token_len(paddings: list[int], x: int) -> int:
1803
+ """Return the first element in paddings list greater or equal to x.
1804
+ """
1805
+ index = bisect.bisect_left(paddings, x)
1806
+ assert index < len(paddings)
1807
+ return paddings[index]
1808
+
1809
+
1810
+ def _get_padded_num_kv_cache_update_slices(num_tokens: int, max_num_reqs: int,
1811
+ page_size: int) -> int:
1812
+ """Calculates the padded number of KV cache update slices to avoid
1813
+ recompilation."""
1814
+ padded_num_slices = 2 * max_num_reqs + num_tokens // page_size
1815
+ padded_num_slices = min(padded_num_slices, num_tokens)
1816
+ padded_num_slices = (
1817
+ padded_num_slices + NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK - 1
1818
+ ) // NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK * \
1819
+ NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK
1820
+ return padded_num_slices
1821
+
1822
+
1823
+ def replace_set_lora(model):
1824
+
1825
+ def _tpu_set_lora(
1826
+ self,
1827
+ index: int,
1828
+ lora_a: torch.Tensor,
1829
+ lora_b: torch.Tensor,
1830
+ embeddings_tensor: Optional[torch.Tensor],
1831
+ bias: Optional[torch.Tensor] = None,
1832
+ ):
1833
+ # TODO: The integer index leads to a recompilation, but converting it
1834
+ # to a tensor doesn't seem to work anymore. This might be fixed with a
1835
+ # later release of torch_xla.
1836
+ self._original_set_lora(index, lora_a, lora_b, embeddings_tensor, bias)
1837
+ xm.mark_step()
1838
+
1839
+ def _tpu_reset_lora(self, index: int):
1840
+ self._original_reset_lora(index)
1841
+ xm.mark_step()
1842
+
1843
+ for _, module in model.named_modules():
1844
+ if isinstance(module, BaseLayerWithLoRA):
1845
+ module._original_set_lora = module.set_lora
1846
+ module._original_reset_lora = module.reset_lora
1847
+ module.set_lora = _tpu_set_lora.__get__(module, module.__class__)
1848
+ module.reset_lora = _tpu_reset_lora.__get__(
1849
+ module, module.__class__)