vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1419 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ import bisect
3
+ import gc
4
+ import time
5
+ from typing import TYPE_CHECKING, Optional, cast
6
+ from unittest.mock import patch
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.distributed
11
+ import torch.nn as nn
12
+ # TPU XLA related
13
+ import torch_xla.core.xla_model as xm
14
+ import torch_xla.runtime as xr
15
+
16
+ import vllm.envs as envs
17
+ from vllm.attention.backends.abstract import AttentionType
18
+ from vllm.attention.layer import Attention
19
+ from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher
20
+ from vllm.config import VllmConfig, get_layers_from_vllm_config
21
+ from vllm.forward_context import set_forward_context
22
+ from vllm.logger import init_logger
23
+ from vllm.model_executor.model_loader import get_model
24
+ from vllm.multimodal import MULTIMODAL_REGISTRY
25
+ from vllm.multimodal.inputs import (BatchedTensorInputs, MultiModalKwargs,
26
+ PlaceholderRange)
27
+ from vllm.multimodal.utils import group_mm_inputs_by_modality
28
+ from vllm.sequence import IntermediateTensors
29
+ from vllm.utils import LayerBlockType, cdiv, is_pin_memory_available
30
+ from vllm.v1.attention.backends.pallas import (PallasAttentionBackend,
31
+ PallasMetadata)
32
+ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
33
+ from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
34
+ KVCacheConfig, KVCacheSpec,
35
+ SlidingWindowSpec)
36
+ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors,
37
+ ModelRunnerOutput)
38
+ from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
39
+ from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler
40
+ from vllm.v1.utils import bind_kv_cache
41
+ from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
42
+
43
+ from .utils import sanity_check_mm_encoder_outputs
44
+
45
+ if TYPE_CHECKING:
46
+ from vllm.v1.core.sched.output import SchedulerOutput
47
+
48
+ logger = init_logger(__name__)
49
+
50
+ # Here we utilize the behavior that out-of-bound index is ignored.
51
+ # FIXME(woosuk): Find a more reliable way to prevent possible bugs.
52
+ _PAD_SLOT_ID = 1_000_000_000
53
+ INVALID_TOKEN_ID = -1
54
+ # Smallest output size
55
+ MIN_NUM_SEQS = 8
56
+
57
+
58
+ #########################################################
59
+ # Ways to avoid recompilation
60
+ #########################################################
61
+ #
62
+ # The model executor has two primary components:
63
+ # 1. preparing the model and sampler inputs
64
+ # 2. executing the model and sampler.
65
+ # The core idea is to avoid any TPU computation during input preparation. For
66
+ # better compilation tracking and increased flexibility, the model execution and
67
+ # sampler are divided into several distinct components.
68
+ #
69
+ # Below are the detailed steps:
70
+ #
71
+ # Step 1
72
+ # It is recommended to avoid TPU operations when preparing the model and sampler
73
+ # inputs. CPU tensors can be prepared and transferred to the XLA device using
74
+ # cpu_tensor.to(xla_device), which only triggers CPU to TPU transfers and avoids
75
+ # compilation.
76
+ #
77
+ # Step 2
78
+ # The TPU execution should be decomposed into subgraphs (4 at the moment):
79
+ # 1. the main model
80
+ # 2. selecting hidden states for each request
81
+ # 3. sampler
82
+ # 4. encoder.
83
+ # Each subgraph should be decorated in a torch.compile. This is used to make
84
+ # sure that we have the same subgraph topology in both dummy_run and
85
+ # xecute_model. The results from these subgraphs should either be passed to
86
+ # other subgraphs, or transferred from TPU to CPU using xla_tensor.cpu() for
87
+ # subsequent processing on the CPU.
88
+ #
89
+ # Step 3
90
+ # The dummy_run should be comprehensive, ensuring all potential input shapes and
91
+ # branch predictions are included as subgraph inputs to facilitate
92
+ # pre-compilation.
93
+ class TPUModelRunner:
94
+
95
+ def __init__(
96
+ self,
97
+ vllm_config: VllmConfig,
98
+ device: torch.device,
99
+ ):
100
+ self.vllm_config = vllm_config
101
+ self.model_config = vllm_config.model_config
102
+ self.cache_config = vllm_config.cache_config
103
+ self.lora_config = vllm_config.lora_config
104
+ self.load_config = vllm_config.load_config
105
+ self.parallel_config = vllm_config.parallel_config
106
+ self.scheduler_config = vllm_config.scheduler_config
107
+ self.speculative_config = vllm_config.speculative_config
108
+ self.prompt_adapter_config = vllm_config.prompt_adapter_config
109
+ self.observability_config = vllm_config.observability_config
110
+ self.device_config = vllm_config.device_config
111
+
112
+ model_config = self.model_config
113
+ cache_config = self.cache_config
114
+ scheduler_config = self.scheduler_config
115
+ parallel_config = self.parallel_config
116
+ self.device = device
117
+ self.check_recompilation = envs.VLLM_XLA_CHECK_RECOMPILATION
118
+
119
+ self.enforce_eager = model_config.enforce_eager
120
+
121
+ self.num_xla_graphs = 0
122
+ self._update_num_xla_graphs("init")
123
+
124
+ self.pin_memory = is_pin_memory_available()
125
+ self.dtype = self.model_config.dtype
126
+ self._hidden_states_dtype = self.dtype
127
+
128
+ self.is_multimodal_model = model_config.is_multimodal_model
129
+ self.sliding_window = model_config.get_sliding_window()
130
+ self.block_size = cache_config.block_size
131
+ self.max_model_len = model_config.max_model_len
132
+ self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
133
+ # InputBatch needs to work with sampling tensors greater than padding
134
+ # to avoid dynamic shapes. Also, avoid suboptimal alignment.
135
+ self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS)
136
+ self.num_tokens_paddings = _get_token_paddings(
137
+ min_token_size=16,
138
+ max_token_size=scheduler_config.max_num_batched_tokens,
139
+ padding_gap=envs.VLLM_TPU_BUCKET_PADDING_GAP)
140
+ # In case `max_num_tokens < max(num_tokens_paddings)` use the actual
141
+ # padded max value to pre-allocate data structures and pre-compile.
142
+ self.max_num_tokens = self.num_tokens_paddings[-1]
143
+
144
+ # Model-related.
145
+ self.num_attn_layers = model_config.get_num_layers_by_block_type(
146
+ parallel_config, LayerBlockType.attention)
147
+ self.num_query_heads = model_config.get_num_attention_heads(
148
+ parallel_config)
149
+ self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
150
+ self.head_size = model_config.get_head_size()
151
+ self.hidden_size = model_config.get_hidden_size()
152
+ self.vocab_size = model_config.get_vocab_size()
153
+
154
+ # Multi-modal data support
155
+ self.mm_registry = MULTIMODAL_REGISTRY
156
+ self.uses_mrope = model_config.uses_mrope
157
+ # TODO: Support M-RoPE (e.g, Qwen2-VL)
158
+ assert not self.uses_mrope, "TPU does not support M-RoPE yet."
159
+
160
+ encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
161
+ model_config=model_config,
162
+ scheduler_config=scheduler_config,
163
+ mm_registry=self.mm_registry,
164
+ )
165
+ self.max_num_encoder_input_tokens = encoder_compute_budget
166
+ self.encoder_cache_size = encoder_cache_size
167
+
168
+ # Lazy initialization
169
+ # self.model: nn.Module # Set after load_model
170
+ self.kv_caches: list[torch.Tensor] = []
171
+ # req_id -> (input_id -> encoder_output)
172
+ self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
173
+
174
+ # Request states.
175
+ self.requests: dict[str, CachedRequestState] = {}
176
+ # Persistent batch.
177
+ self.input_batch = InputBatch(
178
+ max_num_reqs=self.max_num_reqs,
179
+ max_model_len=self.max_model_len,
180
+ max_num_blocks_per_req=self.max_num_blocks_per_req,
181
+ device=self.device,
182
+ pin_memory=self.pin_memory,
183
+ vocab_size=self.vocab_size,
184
+ )
185
+
186
+ # Cached torch/numpy tensor
187
+ # The pytorch tensor and numpy array share the same buffer.
188
+ # Sometimes the numpy op is faster so we create both.
189
+ self.input_ids_cpu = torch.zeros(self.max_num_tokens,
190
+ dtype=torch.int32,
191
+ device="cpu")
192
+ self.input_ids_np = self.input_ids_cpu.numpy()
193
+
194
+ self.positions_cpu = torch.zeros(self.max_num_tokens,
195
+ dtype=torch.int32,
196
+ device="cpu")
197
+ self.positions_np = self.positions_cpu.numpy()
198
+
199
+ self.slot_mapping_cpu = torch.zeros(self.max_num_tokens,
200
+ dtype=torch.int64,
201
+ device="cpu")
202
+ self.slot_mapping_np = self.slot_mapping_cpu.numpy()
203
+ self.block_table_cpu = torch.zeros(
204
+ (self.max_num_reqs, self.max_num_blocks_per_req),
205
+ dtype=self.input_batch.block_table.get_cpu_tensor().dtype,
206
+ device="cpu")
207
+
208
+ self.query_start_loc_cpu = torch.zeros(self.max_num_tokens + 1,
209
+ dtype=torch.int32,
210
+ device="cpu",
211
+ pin_memory=self.pin_memory)
212
+ self.query_start_loc_np = self.query_start_loc_cpu.numpy()
213
+
214
+ self.seq_lens_cpu = torch.zeros(self.max_num_tokens,
215
+ dtype=torch.int32,
216
+ device="cpu",
217
+ pin_memory=self.pin_memory)
218
+ self.seq_lens_np = self.seq_lens_cpu.numpy()
219
+
220
+ # Range tensor with values [0 .. self.max_num_tokens - 1].
221
+ # Used to initialize positions / context_lens / seq_lens
222
+ # Keep in int64 to avoid overflow with long context
223
+ self.arange_np = np.arange(self.max_num_tokens, dtype=np.int64)
224
+ self.num_reqs_paddings = _get_req_paddings(
225
+ min_req_size=MIN_NUM_SEQS, max_req_size=self.max_num_reqs)
226
+
227
+ # tensors for structured decoding
228
+ self.grammar_bitmask_cpu = torch.zeros(
229
+ (self.max_num_reqs, cdiv(self.vocab_size, 32)),
230
+ dtype=torch.int32,
231
+ device="cpu",
232
+ pin_memory=self.pin_memory)
233
+ self.require_structured_out_cpu = torch.zeros(
234
+ (self.max_num_reqs, 1),
235
+ dtype=torch.bool,
236
+ device="cpu",
237
+ pin_memory=self.pin_memory)
238
+ self.structured_decode_arange = torch.arange(
239
+ 0, 32, device="cpu", pin_memory=self.pin_memory)
240
+
241
+ # Get maximum number of mm items per modality (batch size).
242
+ self.max_num_mm_items_by_modality = dict()
243
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
244
+ and self.encoder_cache_size > 0):
245
+ max_tokens_by_modality_dict = (
246
+ MULTIMODAL_REGISTRY.
247
+ get_max_tokens_per_item_by_nonzero_modality(self.model_config))
248
+ for modality, max_tokens in max_tokens_by_modality_dict.items():
249
+ # Check how many items of this modality can be supported by
250
+ # the encoder budget.
251
+ encoder_budget = min(self.max_num_encoder_input_tokens,
252
+ self.encoder_cache_size)
253
+
254
+ max_num_mm_items_encoder_budget = cdiv(encoder_budget,
255
+ max_tokens)
256
+
257
+ # Check how many items of this modality can be supported by
258
+ # the decoder budget.
259
+ max_mm_items_per_req = self.mm_registry.\
260
+ get_mm_limits_per_prompt(self.model_config)[modality]
261
+
262
+ # NOTE: We do not consider max_num_batched_tokens on purpose
263
+ # because the multimodal embeddings can be generated in advance
264
+ # and chunked prefilled.
265
+ max_num_mm_items_decoder_budget = self.max_num_reqs * \
266
+ max_mm_items_per_req
267
+
268
+ max_num_mm_items = min(max_num_mm_items_encoder_budget,
269
+ max_num_mm_items_decoder_budget)
270
+ self.max_num_mm_items_by_modality[modality] = max_num_mm_items
271
+
272
+ def _update_num_xla_graphs(self, case_str):
273
+ check_comp = self.check_recompilation and not self.enforce_eager
274
+ if not check_comp:
275
+ return
276
+
277
+ total_cached_graphs = xr.get_num_cached_compilation_graph()
278
+ new_compiled_graphs = total_cached_graphs - self.num_xla_graphs
279
+ if new_compiled_graphs == 0:
280
+ return
281
+
282
+ logger.info("Add new %d compiled XLA graphs due to %s",
283
+ new_compiled_graphs, case_str)
284
+ self.num_xla_graphs += new_compiled_graphs
285
+
286
+ def _verify_num_xla_graphs(self, case_str):
287
+ check_comp = self.check_recompilation and not self.enforce_eager
288
+ if not check_comp:
289
+ return
290
+
291
+ curr_cached_graph = xr.get_num_cached_compilation_graph()
292
+ assert self.num_xla_graphs == curr_cached_graph, (
293
+ "Recompilation after warm up is detected during {}."
294
+ " num_xla_graphs = {} curr_cached_graph = {}".format(
295
+ case_str, self.num_xla_graphs, curr_cached_graph))
296
+
297
+ def _update_states(self, scheduler_output: "SchedulerOutput") -> bool:
298
+ """Update the cached states and the persistent batch with the scheduler
299
+ output.
300
+
301
+ The updated states are used by the `_prepare_inputs` function to create
302
+ the input GPU tensors for the model.
303
+
304
+ Returns:
305
+ True if there is a new/resumed/paused/finished request.
306
+ If False, we can skip copying SamplingMetadata to the GPU.
307
+ """
308
+ # Remove finished requests from the cached states.
309
+ for req_id in scheduler_output.finished_req_ids:
310
+ self.requests.pop(req_id, None)
311
+ self.encoder_cache.pop(req_id, None)
312
+
313
+ # Remove the finished requests from the persistent batch.
314
+ # NOTE(woosuk): There could be an edge case where finished_req_ids and
315
+ # scheduled_req_ids overlap. This happens when a request is aborted and
316
+ # then resubmitted with the same ID. In this case, we treat them as two
317
+ # distinct requests - clearing the cached states for the first request
318
+ # and handling the second as a new request.
319
+ removed_req_indices: list[int] = []
320
+ for req_id in scheduler_output.finished_req_ids:
321
+ req_index = self.input_batch.remove_request(req_id)
322
+ if req_index is not None:
323
+ removed_req_indices.append(req_index)
324
+
325
+ # Free the cached encoder outputs.
326
+ for req_id, input_id in scheduler_output.free_encoder_input_ids:
327
+ encoder_outputs = self.encoder_cache.get(req_id)
328
+ if encoder_outputs is not None:
329
+ encoder_outputs.pop(input_id, None)
330
+ if not encoder_outputs:
331
+ self.encoder_cache.pop(req_id, None)
332
+
333
+ # Remove the unscheduled requests from the persistent batch.
334
+ # NOTE(woosuk): The unscheduled requests are either preempted requests
335
+ # or running requests that are not scheduled in this step. We remove
336
+ # them from the persistent batch but keep their cached states since
337
+ # they will be scheduled again sometime in the future.
338
+ scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
339
+ cached_req_ids = self.input_batch.req_id_to_index.keys()
340
+ unscheduled_req_ids = cached_req_ids - scheduled_req_ids
341
+ # NOTE(woosuk): The persistent batch optimization assumes that
342
+ # consecutive batches contain mostly the same requests. If batches
343
+ # have low request overlap (e.g., alternating between two distinct
344
+ # sets of requests), this optimization becomes very inefficient.
345
+ for req_id in unscheduled_req_ids:
346
+ req_index = self.input_batch.remove_request(req_id)
347
+ assert req_index is not None
348
+ removed_req_indices.append(req_index)
349
+
350
+ req_ids_to_add: list[str] = []
351
+ # Add new requests to the cached states.
352
+ for new_req_data in scheduler_output.scheduled_new_reqs:
353
+ req_id = new_req_data.req_id
354
+ sampling_params = new_req_data.sampling_params
355
+
356
+ self.requests[req_id] = CachedRequestState(
357
+ req_id=req_id,
358
+ prompt_token_ids=new_req_data.prompt_token_ids,
359
+ mm_inputs=new_req_data.mm_inputs,
360
+ mm_positions=new_req_data.mm_positions,
361
+ sampling_params=sampling_params,
362
+ generator=None,
363
+ block_ids=new_req_data.block_ids,
364
+ num_computed_tokens=new_req_data.num_computed_tokens,
365
+ output_token_ids=[],
366
+ lora_request=new_req_data.lora_request,
367
+ )
368
+
369
+ req_ids_to_add.append(req_id)
370
+
371
+ # Update the states of the running/resumed requests.
372
+ for req_data in scheduler_output.scheduled_cached_reqs:
373
+ req_id = req_data.req_id
374
+ req_state = self.requests[req_id]
375
+
376
+ # Update the cached states.
377
+ req_state.num_computed_tokens = req_data.num_computed_tokens
378
+ if not req_data.resumed_from_preemption:
379
+ # Append the new blocks to the existing block IDs.
380
+ req_state.block_ids.extend(req_data.new_block_ids)
381
+ else:
382
+ # The request is resumed from preemption.
383
+ # Replace the existing block IDs with the new ones.
384
+ req_state.block_ids = req_data.new_block_ids
385
+
386
+ req_index = self.input_batch.req_id_to_index.get(req_id)
387
+ if req_index is None:
388
+ # The request is not in the persistent batch.
389
+ # The request was either preempted and resumed later, or was not
390
+ # scheduled in the previous step and needs to be added again.
391
+ req_ids_to_add.append(req_id)
392
+ continue
393
+
394
+ # Update the persistent batch.
395
+ self.input_batch.num_computed_tokens_cpu[req_index] = (
396
+ req_data.num_computed_tokens)
397
+ self.input_batch.block_table.append_row(req_data.new_block_ids,
398
+ req_index)
399
+
400
+ # Add the new or resumed requests to the persistent batch.
401
+ # The smaller empty indices are filled first.
402
+ removed_req_indices = sorted(removed_req_indices, reverse=True)
403
+ for req_id in req_ids_to_add:
404
+ req_state = self.requests[req_id]
405
+ if removed_req_indices:
406
+ # Fill the empty index.
407
+ req_index = removed_req_indices.pop()
408
+ else:
409
+ # Append to the end.
410
+ req_index = None
411
+ self.input_batch.add_request(req_state, req_index)
412
+
413
+ # Condense the batched states if there are empty indices.
414
+ if removed_req_indices:
415
+ self.input_batch.condense(removed_req_indices)
416
+
417
+ return len(unscheduled_req_ids) > 0 or len(req_ids_to_add) > 0
418
+
419
+ def get_model(self) -> nn.Module:
420
+ assert self.model is not None
421
+ return self.model
422
+
423
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
424
+ """
425
+ Generates the KVCacheSpec by parsing the kv cache format from each
426
+ Attention module in the static forward context.
427
+ Returns:
428
+ KVCacheSpec: A dictionary mapping layer names to their KV cache
429
+ format. Layers that do not need KV cache are not included.
430
+ """
431
+
432
+ layers = get_layers_from_vllm_config(self.vllm_config, Attention)
433
+ block_size = self.vllm_config.cache_config.block_size
434
+ kv_cache_spec: dict[str, KVCacheSpec] = {}
435
+ for layer_name, attn_module in layers.items():
436
+ if attn_module.attn_type == AttentionType.DECODER:
437
+ if attn_module.sliding_window is not None:
438
+ kv_cache_spec[layer_name] = SlidingWindowSpec(
439
+ block_size=block_size,
440
+ num_kv_heads=attn_module.num_kv_heads,
441
+ head_size=attn_module.head_size,
442
+ dtype=attn_module.dtype,
443
+ sliding_window=attn_module.sliding_window,
444
+ use_mla=False,
445
+ )
446
+ else:
447
+ kv_cache_spec[layer_name] = FullAttentionSpec(
448
+ block_size=block_size,
449
+ num_kv_heads=attn_module.num_kv_heads,
450
+ head_size=attn_module.head_size,
451
+ dtype=attn_module.dtype,
452
+ use_mla=False,
453
+ )
454
+ elif attn_module.attn_type in (AttentionType.ENCODER,
455
+ AttentionType.ENCODER_ONLY):
456
+ # encoder-only attention does not need KV cache.
457
+ continue
458
+ elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
459
+ raise NotImplementedError
460
+ else:
461
+ raise ValueError(
462
+ f"Unknown attention type: {attn_module.attn_type}")
463
+
464
+ return kv_cache_spec
465
+
466
+ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"):
467
+ total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
468
+ assert total_num_scheduled_tokens > 0
469
+ num_reqs = self.input_batch.num_reqs
470
+ assert num_reqs > 0
471
+
472
+ # Get the number of scheduled tokens for each request.
473
+ num_scheduled_tokens_per_req = []
474
+ max_num_scheduled_tokens_all_reqs = 0
475
+ for req_id in self.input_batch.req_ids[:num_reqs]:
476
+ assert req_id is not None
477
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
478
+ num_scheduled_tokens_per_req.append(num_tokens)
479
+ max_num_scheduled_tokens_all_reqs = max(
480
+ max_num_scheduled_tokens_all_reqs, num_tokens)
481
+ num_scheduled_tokens_per_req = np.array(num_scheduled_tokens_per_req,
482
+ dtype=np.int32)
483
+ assert max_num_scheduled_tokens_all_reqs > 0
484
+
485
+ # Get request indices.
486
+ # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
487
+ # For each scheduled token, what are the corresponding req index.
488
+ req_indices = np.repeat(self.arange_np[:num_reqs],
489
+ num_scheduled_tokens_per_req)
490
+
491
+ # Get batched arange.
492
+ # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
493
+ # For each scheduled token, what is its position in corresponding req.
494
+ arange = np.concatenate(
495
+ [self.arange_np[:n] for n in num_scheduled_tokens_per_req])
496
+
497
+ # Get positions.
498
+ positions_np = self.positions_np[:total_num_scheduled_tokens]
499
+ np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
500
+ arange,
501
+ out=positions_np)
502
+
503
+ # Get token indices.
504
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
505
+ # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
506
+ # where M is the max_model_len.
507
+ token_indices = (positions_np +
508
+ req_indices * self.input_batch.token_ids_cpu.shape[1])
509
+
510
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
511
+ # because torch.index_select is much faster than np.take for large
512
+ # tensors.
513
+ torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
514
+ 0,
515
+ torch.from_numpy(token_indices),
516
+ out=self.input_ids_cpu[:total_num_scheduled_tokens])
517
+
518
+ # Calculate the slot mapping.
519
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
520
+ # -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1]
521
+ # where K is the max_num_blocks_per_req and the block size is 2.
522
+ # NOTE(woosuk): We can't simply use `token_indices // block_size` here
523
+ # because M (max_model_len) is not necessarily divisible by block_size.
524
+ # req_indices: # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
525
+ block_table_indices = (req_indices * self.max_num_blocks_per_req +
526
+ positions_np // self.block_size)
527
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
528
+ # because torch.index_select is much faster than np.take for large
529
+ # tensors.
530
+ block_table_cpu = self.input_batch.block_table.get_cpu_tensor()
531
+ block_numbers = block_table_cpu.flatten()[block_table_indices].numpy()
532
+ block_offsets = positions_np % self.block_size
533
+ np.add(block_numbers * self.block_size,
534
+ block_offsets,
535
+ out=self.slot_mapping_np[:total_num_scheduled_tokens])
536
+
537
+ # Prepare the attention metadata.
538
+ self.query_start_loc_np[0] = 0
539
+ np.cumsum(num_scheduled_tokens_per_req,
540
+ out=self.query_start_loc_np[1:num_reqs + 1])
541
+ self.query_start_loc_np[num_reqs + 1:] = 1
542
+
543
+ self.seq_lens_np[:num_reqs] = (
544
+ self.input_batch.num_computed_tokens_cpu[:num_reqs] +
545
+ num_scheduled_tokens_per_req)
546
+
547
+ # Do the padding and copy the tensors to the TPU.
548
+ padded_total_num_scheduled_tokens = _get_padded_token_len(
549
+ self.num_tokens_paddings, total_num_scheduled_tokens)
550
+ # Zero out to avoid spurious values from prev iteration (last cp chunk)
551
+ self.input_ids_cpu[
552
+ total_num_scheduled_tokens:padded_total_num_scheduled_tokens] = 0
553
+ self.input_ids = self.input_ids_cpu[:
554
+ padded_total_num_scheduled_tokens].to(
555
+ self.device)
556
+ self.position_ids = self.positions_cpu[:
557
+ padded_total_num_scheduled_tokens].to(
558
+ self.device)
559
+ self.slot_mapping_cpu[total_num_scheduled_tokens:] = _PAD_SLOT_ID
560
+ slot_mapping = self.slot_mapping_cpu[:
561
+ padded_total_num_scheduled_tokens].to(
562
+ self.device)
563
+ block_tables = self.block_table_cpu[:self.max_num_reqs]
564
+ block_tables[:num_reqs, :self.max_num_blocks_per_req] = (
565
+ self.input_batch.block_table.get_cpu_tensor()[:num_reqs])
566
+ block_tables = block_tables.to(self.device)
567
+ query_start_loc = self.query_start_loc_cpu[:self.max_num_reqs + 1].to(
568
+ self.device)
569
+ seq_lens = self.seq_lens_cpu[:self.max_num_reqs].to(self.device)
570
+
571
+ attn_metadata = PallasMetadata(
572
+ slot_mapping=slot_mapping,
573
+ block_tables=block_tables,
574
+ context_lens=seq_lens,
575
+ query_start_loc=query_start_loc,
576
+ num_seqs=torch.tensor([num_reqs],
577
+ dtype=torch.int32,
578
+ device=self.device),
579
+ )
580
+ # NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial
581
+ # request in the batch. While we should not sample any token from this
582
+ # partial request, we do so for simplicity. We will ignore the sampled
583
+ # token from the partial request.
584
+ # TODO: Support prompt logprobs.
585
+ padded_num_reqs = _get_padded_num_reqs_with_upper_limit(
586
+ num_reqs, self.max_num_reqs)
587
+ # Indices at which we sample (positions of last token in the sequence).
588
+ # Padded to avoid recompiling when `num_reqs` varies.
589
+ logits_indices = self.query_start_loc_cpu[1:padded_num_reqs + 1] - 1
590
+ logits_indices = logits_indices.to(self.device)
591
+ return attn_metadata, logits_indices, padded_num_reqs
592
+
593
+ def _scatter_placeholders(
594
+ self,
595
+ embeds: torch.Tensor,
596
+ is_embed: Optional[torch.Tensor],
597
+ ) -> torch.Tensor:
598
+ if is_embed is None:
599
+ return embeds
600
+
601
+ placeholders = embeds.new_full(
602
+ (is_embed.shape[0], embeds.shape[-1]),
603
+ fill_value=torch.nan,
604
+ )
605
+ placeholders[is_embed] = embeds
606
+ return placeholders
607
+
608
+ def _gather_placeholders(
609
+ self,
610
+ placeholders: torch.Tensor,
611
+ is_embed: Optional[torch.Tensor],
612
+ ) -> torch.Tensor:
613
+ if is_embed is None:
614
+ return placeholders
615
+
616
+ return placeholders[is_embed]
617
+
618
+ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
619
+ scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
620
+ if not scheduled_encoder_inputs:
621
+ return
622
+
623
+ # Batch the multi-modal inputs.
624
+ mm_inputs = list[MultiModalKwargs]()
625
+ req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
626
+ for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
627
+ req_state = self.requests[req_id]
628
+
629
+ for mm_input_id in encoder_input_ids:
630
+ mm_inputs.append(req_state.mm_inputs[mm_input_id])
631
+ req_ids_pos.append(
632
+ (req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
633
+
634
+ # Batch mm inputs as much as we can: if a request in the batch has
635
+ # multiple modalities or a different modality than the previous one,
636
+ # we process it separately to preserve item order.
637
+ # FIXME(ywang96): This is a hacky way to deal with multiple modalities
638
+ # in the same batch while still being able to benefit from batching
639
+ # multimodal inputs. The proper solution should be reordering the
640
+ # encoder outputs.
641
+ grouped_mm_inputs_list = group_mm_inputs_by_modality(mm_inputs)
642
+
643
+ encoder_outputs = []
644
+ for grouped_mm_inputs in grouped_mm_inputs_list:
645
+ batched_mm_inputs = MultiModalKwargs.batch(grouped_mm_inputs)
646
+ batched_mm_inputs = MultiModalKwargs.as_kwargs(batched_mm_inputs,
647
+ device=self.device)
648
+
649
+ # Run the encoder.
650
+ # `curr_group_outputs` is either of the following:
651
+ # 1. A tensor of shape (num_items, feature_size, hidden_size)
652
+ # in case feature_size is fixed across all multimodal items.
653
+ # 2. A list or tuple (length: num_items) of tensors, each of shape
654
+ # (feature_size, hidden_size) in case the feature size is dynamic
655
+ # depending on the input multimodal items.
656
+ xm.mark_step()
657
+ curr_group_outputs = self.model.get_multimodal_embeddings(
658
+ **batched_mm_inputs)
659
+ xm.mark_step()
660
+
661
+ sanity_check_mm_encoder_outputs(
662
+ curr_group_outputs,
663
+ expected_num_items=len(grouped_mm_inputs),
664
+ )
665
+
666
+ if isinstance(curr_group_outputs, torch.Tensor):
667
+ encoder_outputs.append(curr_group_outputs)
668
+ else:
669
+ assert isinstance(curr_group_outputs, (list, tuple))
670
+ for output in curr_group_outputs:
671
+ encoder_outputs.append(output)
672
+
673
+ # Cache the encoder outputs.
674
+ # NOTE (NickLucche) here we diverge from logic in other runners, as we
675
+ # assume to only have whole mm items to process. Hence we avoid the
676
+ # intrinsic dynamism that `scatter_mm_placeholders` introduces.
677
+ for (req_id, input_id, pos_info), output in zip(
678
+ req_ids_pos,
679
+ encoder_outputs,
680
+ ):
681
+ if req_id not in self.encoder_cache:
682
+ self.encoder_cache[req_id] = {}
683
+ assert pos_info.is_embed is None, "Expected all positions to be"\
684
+ " contiguous and embeddings."
685
+ self.encoder_cache[req_id][input_id] = output
686
+
687
+ def _gather_mm_embeddings(
688
+ self,
689
+ scheduler_output: "SchedulerOutput",
690
+ ) -> list[torch.Tensor]:
691
+ mm_embeds: list[torch.Tensor] = []
692
+ for req_id in self.input_batch.req_ids:
693
+ num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
694
+ req_id]
695
+ req_state = self.requests[req_id]
696
+ num_computed_tokens = req_state.num_computed_tokens
697
+ mm_positions = req_state.mm_positions
698
+ # TODO unroll loop and assume/enforce --disable_chunked_mm_input
699
+ # NOTE (NickLucche) here we diverge from logic in other runners, as
700
+ # we assume to only have whole mm items to process. Hence we avoid
701
+ # the intrinsic dynamism that `gather_mm_placeholders` introduces.
702
+ for i, pos_info in enumerate(mm_positions):
703
+ start_pos = pos_info.offset
704
+ num_encoder_tokens = pos_info.length
705
+
706
+ # The encoder output is needed if the two ranges overlap:
707
+ # [num_computed_tokens,
708
+ # num_computed_tokens + num_scheduled_tokens) and
709
+ # [start_pos, start_pos + num_encoder_tokens)
710
+ if start_pos >= num_computed_tokens + num_scheduled_tokens:
711
+ # The encoder output is not needed in this step.
712
+ break
713
+ if start_pos + num_encoder_tokens <= num_computed_tokens:
714
+ # The encoder output is already processed and stored
715
+ # in the decoder's KV cache.
716
+ continue
717
+
718
+ assert req_id in self.encoder_cache
719
+ assert i in self.encoder_cache[req_id]
720
+ assert pos_info.is_embed is None, "Expected all positions to"\
721
+ " be contiguous and embeddings."
722
+ encoder_output = self.encoder_cache[req_id][i]
723
+ mm_embeds.append(encoder_output)
724
+ return mm_embeds
725
+
726
+ def _get_model_inputs(self, input_ids: torch.Tensor,
727
+ mm_embeds: list[torch.Tensor]):
728
+ if self.is_multimodal_model:
729
+ # NOTE(woosuk): To unify token ids and soft tokens (vision
730
+ # embeddings), we always use embeddings (rather than token ids)
731
+ # as input to the multimodal model, even when the input is text.
732
+ if mm_embeds:
733
+ inputs_embeds = self.model.get_input_embeddings(
734
+ input_ids, mm_embeds)
735
+ else:
736
+ inputs_embeds = self.model.get_input_embeddings(input_ids)
737
+ return None, inputs_embeds
738
+ else:
739
+ # For text-only models, we use token ids as input.
740
+ # While it is possible to use embeddings as input just like the
741
+ # multimodal models, it is not desirable for performance since
742
+ # then the embedding layer is not included in the CUDA graph.
743
+ return input_ids, None
744
+
745
+ @torch.no_grad()
746
+ def execute_model(
747
+ self,
748
+ scheduler_output: "SchedulerOutput",
749
+ intermediate_tensors: Optional[IntermediateTensors] = None,
750
+ ) -> ModelRunnerOutput:
751
+ # Update cached state
752
+ self._update_states(scheduler_output)
753
+ if not scheduler_output.total_num_scheduled_tokens:
754
+ # Return empty ModelRunnerOutput if there's no work to do.
755
+ return EMPTY_MODEL_RUNNER_OUTPUT
756
+
757
+ if self.is_multimodal_model:
758
+ # Run the multimodal encoder if any.
759
+ self._execute_mm_encoder(scheduler_output)
760
+ mm_embeds = self._gather_mm_embeddings(scheduler_output)
761
+ else:
762
+ mm_embeds = []
763
+ xm.mark_step()
764
+ # Prepare inputs
765
+ attn_metadata, logits_indices, padded_num_reqs = self._prepare_inputs(
766
+ scheduler_output)
767
+ input_ids, inputs_embeds = self._get_model_inputs(
768
+ self.input_ids, mm_embeds)
769
+ xm.mark_step()
770
+ num_reqs = self.input_batch.num_reqs
771
+ # Run the decoder
772
+ with set_forward_context(attn_metadata, self.vllm_config):
773
+ hidden_states = self.model(
774
+ input_ids=input_ids,
775
+ positions=self.position_ids,
776
+ inputs_embeds=inputs_embeds,
777
+ )
778
+ hidden_states = self.select_hidden_states(hidden_states,
779
+ logits_indices)
780
+ logits = self.compute_logits(hidden_states)
781
+ tpu_sampling_metadata = TPUSupportedSamplingMetadata.\
782
+ from_input_batch(self.input_batch, padded_num_reqs, self.device)
783
+ if scheduler_output.grammar_bitmask is not None:
784
+ require_struct_decoding, grammar_bitmask_padded, arange = \
785
+ self.prepare_structured_decoding_input(logits, scheduler_output)
786
+ logits = self.structured_decode(require_struct_decoding,
787
+ grammar_bitmask_padded, logits,
788
+ arange)
789
+ selected_token_ids = self.sample_from_logits(logits,
790
+ tpu_sampling_metadata)
791
+ # Remove padding on cpu and keep dynamic op outside of xla graph.
792
+ selected_token_ids = selected_token_ids.cpu()[:num_reqs]
793
+
794
+ # Update the cache state concurrently. Code above will not block until
795
+ # we use `selected_token_ids`. Add mark_step if post-processing changes
796
+ request_seq_lens: list[tuple[int, CachedRequestState, int]] = []
797
+ discard_sampled_tokens_req_indices = []
798
+ for i, req_id in zip(range(num_reqs), self.input_batch.req_ids):
799
+ assert req_id is not None
800
+ req_state = self.requests[req_id]
801
+ seq_len = (req_state.num_computed_tokens +
802
+ scheduler_output.num_scheduled_tokens[req_id])
803
+ if seq_len >= req_state.num_tokens:
804
+ request_seq_lens.append((i, req_state, seq_len))
805
+ else:
806
+ # Ignore the sampled token from the partial request.
807
+ # Rewind the generator state as if the token was not sampled.
808
+ generator = self.input_batch.generators.get(i)
809
+ if generator is not None:
810
+ # This relies on cuda-specific torch-internal impl details
811
+ generator.set_offset(generator.get_offset() - 4)
812
+
813
+ # Record the index of the request that should not be sampled,
814
+ # so that we could clear the sampled tokens before returning.
815
+ discard_sampled_tokens_req_indices.append(i)
816
+
817
+ assert all(
818
+ req_id is not None for req_id in
819
+ self.input_batch.req_ids[:num_reqs]), "req_ids contains None"
820
+ req_ids = cast(list[str], self.input_batch.req_ids[:num_reqs])
821
+
822
+ prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
823
+ for req_id in self.input_batch.req_ids[:num_reqs]:
824
+ prompt_logprobs_dict[req_id] = None
825
+
826
+ max_gen_len = selected_token_ids.shape[-1]
827
+ if max_gen_len == 1:
828
+ valid_sampled_token_ids = selected_token_ids.tolist()
829
+
830
+ # Mask out the sampled tokens that should not be sampled.
831
+ # TODO: Keep in sync with gpu_model_runner.py, in particular
832
+ # the "else" case here
833
+ for i in discard_sampled_tokens_req_indices:
834
+ valid_sampled_token_ids[i].clear()
835
+
836
+ # Append sampled tokens
837
+ for i, req_state, seq_len in request_seq_lens:
838
+ token_id = valid_sampled_token_ids[i][0]
839
+ self.input_batch.token_ids_cpu[i, seq_len] = token_id
840
+ req_state.output_token_ids.append(token_id)
841
+ self.input_batch.num_tokens[i] += 1
842
+
843
+ else:
844
+ valid_mask = selected_token_ids != INVALID_TOKEN_ID
845
+ gen_lens = valid_mask.sum(dim=1).tolist()
846
+ valid_sampled_token_ids = [
847
+ seq.tolist()
848
+ for seq in selected_token_ids[valid_mask].split(gen_lens)
849
+ ]
850
+ self.input_batch.num_tokens[:num_reqs] += gen_lens
851
+ for i, req_state, seq_len in request_seq_lens:
852
+ target_slice = slice(seq_len - gen_lens[i] + 1, seq_len + 1)
853
+ self.input_batch.token_ids_cpu[
854
+ i, target_slice] = valid_sampled_token_ids[i]
855
+ req_state.output_token_ids.extend(valid_sampled_token_ids[i])
856
+
857
+ model_runner_output = ModelRunnerOutput(
858
+ req_ids=req_ids,
859
+ req_id_to_index=self.input_batch.req_id_to_index,
860
+ sampled_token_ids=valid_sampled_token_ids,
861
+ spec_token_ids=None,
862
+ logprobs=None,
863
+ prompt_logprobs_dict=prompt_logprobs_dict,
864
+ )
865
+
866
+ # Check there are no new graphs compiled - all the graphs should be
867
+ # captured and compiled during warm up.
868
+ self._verify_num_xla_graphs("execute_model")
869
+
870
+ return model_runner_output
871
+
872
+ def load_model(self) -> None:
873
+ self.device = self.device_config.device
874
+
875
+ # NOTE(woosuk): While the executor assigns the TP ranks to the worker
876
+ # process, the ranks can be different from the ranks internally assigned
877
+ # by the xm runtime. Therefore, there is a mismatch in the rank
878
+ # assignment between the gloo (cpu) runtime and the xm (tpu) runtime.
879
+ # This is not a problem in linear layers because all-reduce is
880
+ # rank-agnostic. However, it matters for all-gather as the ranks
881
+ # determine the order of concatenating the output tensors.
882
+ # As a workaround, we use the xm's rank assignment only when loading
883
+ # the embedding weights.
884
+ xm_tp_rank = xr.global_ordinal()
885
+ with patch(
886
+ "vllm.model_executor.layers.vocab_parallel_embedding."
887
+ "get_tensor_model_parallel_rank",
888
+ return_value=xm_tp_rank):
889
+ model = get_model(vllm_config=self.vllm_config)
890
+ # Sync all pending XLA execution during model initialization and weight
891
+ # loading.
892
+ xm.mark_step()
893
+ xm.wait_device_ops()
894
+ self.model = model
895
+ self.sampler = TPUSampler()
896
+
897
+ @torch.no_grad()
898
+ def _dummy_run(self, num_tokens: int) -> None:
899
+ if self.is_multimodal_model:
900
+ input_ids = None
901
+ inputs_embeds = torch.zeros((num_tokens, self.hidden_size),
902
+ dtype=self.dtype,
903
+ device=self.device)
904
+ else:
905
+ input_ids = torch.zeros((num_tokens),
906
+ dtype=torch.int32,
907
+ device=self.device)
908
+ inputs_embeds = None
909
+ actual_num_reqs = min(num_tokens, self.max_num_reqs)
910
+ position_ids = torch.zeros(num_tokens,
911
+ dtype=torch.int32,
912
+ device=self.device)
913
+ slot_mapping = torch.zeros(num_tokens,
914
+ dtype=torch.int64,
915
+ device=self.device)
916
+ block_tables = torch.zeros(
917
+ (self.max_num_reqs, self.block_table_cpu.shape[1]),
918
+ dtype=torch.int32,
919
+ device=self.device)
920
+ query_lens = [1] * self.max_num_reqs
921
+ query_start_loc = torch.cumsum(torch.tensor([0] + query_lens,
922
+ dtype=torch.int32),
923
+ dim=0,
924
+ dtype=torch.int32).to(self.device)
925
+ context_lens = torch.ones((self.max_num_reqs, ),
926
+ dtype=torch.int32,
927
+ device=self.device)
928
+ num_seqs = torch.tensor([actual_num_reqs],
929
+ dtype=torch.int32,
930
+ device=self.device)
931
+ attn_metadata = PallasMetadata(
932
+ slot_mapping=slot_mapping,
933
+ block_tables=block_tables,
934
+ context_lens=context_lens,
935
+ query_start_loc=query_start_loc,
936
+ num_seqs=num_seqs,
937
+ )
938
+
939
+ if self.is_multimodal_model:
940
+ torch._dynamo.mark_dynamic(inputs_embeds, 0)
941
+ else:
942
+ torch._dynamo.mark_dynamic(input_ids, 0)
943
+ torch._dynamo.mark_dynamic(position_ids, 0)
944
+ torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0)
945
+
946
+ with set_forward_context(attn_metadata, self.vllm_config, 0):
947
+ out = self.model(input_ids=input_ids,
948
+ positions=position_ids,
949
+ inputs_embeds=inputs_embeds)
950
+ self._hidden_states_dtype = out.dtype
951
+
952
+ def _precompile_mm_encoder(self) -> None:
953
+ # Pre-compile MM encoder for all supported data modalities.
954
+ hf_config = self.vllm_config.model_config.hf_config
955
+ for mode, max_items_by_mode in \
956
+ self.max_num_mm_items_by_modality.items():
957
+ logger.info(
958
+ "Compiling Multimodal %s Encoder with different input"
959
+ " shapes.", mode)
960
+ start = time.perf_counter()
961
+ # No padding for MM encoder just yet.
962
+ for num_items in range(1, max_items_by_mode + 1):
963
+ logger.info(" -- mode: %s items: %d", mode, num_items)
964
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
965
+ mode, num_items)
966
+ # Run multimodal encoder.
967
+ xm.mark_step()
968
+ mm_embeds = self.model.\
969
+ get_multimodal_embeddings(**batched_dummy_mm_inputs)
970
+ xm.mark_step()
971
+ num_patches = mm_embeds[0].shape[0]
972
+ items_size = num_patches * num_items
973
+
974
+ # NOTE (NickLucche) pre-compile `get_input_embeddings` when mm
975
+ # embeddings are present. We assume `--disable-mm-chunked`,
976
+ # hence only whole items can be scheduled. This implies we just
977
+ # need to compile when `num_items` fit the (padded) `input_ids`
978
+ for num_tokens in self.num_tokens_paddings:
979
+ if num_tokens >= items_size:
980
+ # XLA Workaround: if torch.zeros(..device) is used, XLA
981
+ # compiles a scalar+expansion op, which won't match
982
+ # the graph generated at runtime. CPU->TPU must be used
983
+ placeholders_ids = torch.zeros(num_tokens,
984
+ dtype=torch.int32,
985
+ device="cpu")
986
+ # Align placeholders and actual num mm_embeddings.
987
+ placeholders_ids[:items_size] = \
988
+ hf_config.image_token_index
989
+
990
+ placeholders_ids = placeholders_ids.to(self.device)
991
+ # Assign outputs or the graph will be cut short.
992
+ a, b = self._get_model_inputs(placeholders_ids,
993
+ [mm_embeds])
994
+ assert a is None
995
+ xm.mark_step()
996
+
997
+ # Pre-compile `get_input_embeddings` when mm_embeddings are not
998
+ # present. Chunk is only made of text, no mm_placeholders.
999
+ for num_tokens in self.num_tokens_paddings:
1000
+ placeholders_ids = torch.zeros(num_tokens,
1001
+ dtype=torch.int32,
1002
+ device="cpu")
1003
+ placeholders_ids = placeholders_ids.to(self.device)
1004
+ a, b = self._get_model_inputs(placeholders_ids, [])
1005
+ assert a is None
1006
+ xm.mark_step()
1007
+
1008
+ xm.wait_device_ops()
1009
+ end = time.perf_counter()
1010
+ logger.info(
1011
+ "Multimodal %s Encoder compilation finished in in %.2f "
1012
+ "[secs].", mode, end - start)
1013
+
1014
+ def _precompile_backbone(self) -> None:
1015
+ logger.info("Compiling the model with different input shapes.")
1016
+ start = time.perf_counter()
1017
+ for num_tokens in self.num_tokens_paddings:
1018
+ logger.info(" -- num_tokens: %d", num_tokens)
1019
+ self._dummy_run(num_tokens)
1020
+ xm.wait_device_ops()
1021
+ end = time.perf_counter()
1022
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1023
+ self._update_num_xla_graphs("model backbone")
1024
+
1025
+ def _precompile_select_hidden_states(self) -> None:
1026
+ # Compile hidden state selection function for bucketed
1027
+ # n_tokens x max_num_reqs. Graph is really small so this is fine.
1028
+ logger.info(
1029
+ "Compiling select_hidden_states with different input shapes.")
1030
+ start = time.perf_counter()
1031
+ hsize = self.model_config.get_hidden_size()
1032
+ for num_tokens in self.num_tokens_paddings:
1033
+ dummy_hidden = torch.zeros((num_tokens, hsize),
1034
+ device=self.device,
1035
+ dtype=self._hidden_states_dtype)
1036
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1037
+ for num_reqs in self.num_reqs_paddings:
1038
+ indices = torch.zeros(num_reqs,
1039
+ dtype=torch.int32,
1040
+ device=self.device)
1041
+ torch._dynamo.mark_dynamic(indices, 0)
1042
+ self.select_hidden_states(dummy_hidden, indices)
1043
+ logger.info(" -- num_tokens: %d, num_seqs: %d", num_tokens,
1044
+ num_reqs)
1045
+ # Requests can't be more than tokens. But do compile for the
1046
+ # next bigger value in case num_tokens uses bucketed padding.
1047
+ if num_reqs >= min(num_tokens, self.max_num_reqs):
1048
+ break
1049
+ xm.wait_device_ops()
1050
+ end = time.perf_counter()
1051
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1052
+ self._update_num_xla_graphs("select_hidden_states")
1053
+
1054
+ def _precompile_compute_logits(self) -> None:
1055
+ logger.info("Compiling compute_logits with different input shapes.")
1056
+ start = time.perf_counter()
1057
+ hsize = self.model_config.get_hidden_size()
1058
+ for num_reqs in self.num_reqs_paddings:
1059
+ dummy_hidden = torch.zeros((num_reqs, hsize),
1060
+ device=self.device,
1061
+ dtype=self._hidden_states_dtype)
1062
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1063
+ self.compute_logits(dummy_hidden)
1064
+ logger.info(" -- num_seqs: %d", num_reqs)
1065
+ xm.wait_device_ops()
1066
+ end = time.perf_counter()
1067
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1068
+ self._update_num_xla_graphs("compute_logits")
1069
+
1070
+ def _precompile_structured_decoding(self) -> None:
1071
+ logger.info(
1072
+ "Compiling structured_decoding with different input shapes.")
1073
+ start = time.perf_counter()
1074
+ for num_reqs in self.num_reqs_paddings:
1075
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1076
+ device=self.device,
1077
+ dtype=self._hidden_states_dtype)
1078
+ dummy_require_struct_decoding = \
1079
+ self.require_structured_out_cpu[:num_reqs].to(self.device)
1080
+ dummy_grammar_bitmask = \
1081
+ self.grammar_bitmask_cpu[:num_reqs].to(self.device)
1082
+ # The first dimension of the above 3 dummy tensors cannot be
1083
+ # mark_dynamic because some operations in structured_decode require
1084
+ # them to be static.
1085
+ arange = self.structured_decode_arange.to(self.device)
1086
+ self.structured_decode(dummy_require_struct_decoding,
1087
+ dummy_grammar_bitmask, dummy_logits, arange)
1088
+ logger.info(" -- num_seqs: %d", num_reqs)
1089
+ xm.wait_device_ops()
1090
+ end = time.perf_counter()
1091
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1092
+ self._update_num_xla_graphs("structured_decoding")
1093
+
1094
+ def _precompile_sample_from_logits(self) -> None:
1095
+ logger.info(
1096
+ "Compiling sample_from_logits with different input shapes.")
1097
+ start = time.perf_counter()
1098
+ for num_reqs in self.num_reqs_paddings:
1099
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1100
+ device=self.device,
1101
+ dtype=self._hidden_states_dtype)
1102
+ # The first dimension of dummy_logits cannot be mark_dynamic
1103
+ # because some operations in the sampler require it to be static.
1104
+ for all_greedy in [False, True]:
1105
+ generate_params_if_all_greedy = not all_greedy
1106
+ sampling_metadata = (
1107
+ TPUSupportedSamplingMetadata.from_input_batch(
1108
+ self.input_batch,
1109
+ num_reqs,
1110
+ self.device,
1111
+ generate_params_if_all_greedy,
1112
+ ))
1113
+ sampling_metadata.all_greedy = all_greedy
1114
+ self.sample_from_logits(dummy_logits, sampling_metadata)
1115
+ logger.info(" -- num_seqs: %d", num_reqs)
1116
+ xm.wait_device_ops()
1117
+ end = time.perf_counter()
1118
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1119
+ self._update_num_xla_graphs("sample_from_logits")
1120
+
1121
+ def capture_model(self) -> None:
1122
+ """
1123
+ Precompile all the subgraphs with possible input shapes.
1124
+ """
1125
+ self._precompile_mm_encoder()
1126
+ self._precompile_backbone()
1127
+ self._precompile_select_hidden_states()
1128
+ self._precompile_compute_logits()
1129
+ self._precompile_structured_decoding()
1130
+ self._precompile_sample_from_logits()
1131
+
1132
+ def profile_run(
1133
+ self,
1134
+ num_tokens: int,
1135
+ ) -> None:
1136
+ # Profile with multimodal encoder & encoder cache.
1137
+ # TODO: handle encoder-decoder models once we support them.
1138
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
1139
+ and self.encoder_cache_size > 0):
1140
+
1141
+ # NOTE: Currently model is profiled with a single non-text
1142
+ # modality with the max possible input tokens even when
1143
+ # it supports multiple.
1144
+ dummy_data_modality, max_num_mm_items = max(
1145
+ self.max_num_mm_items_by_modality.items(), key=lambda t: t[1])
1146
+
1147
+ encoder_budget = min(self.max_num_encoder_input_tokens,
1148
+ self.encoder_cache_size)
1149
+
1150
+ logger.info(
1151
+ "Encoder cache will be initialized with a budget of %d tokens,"
1152
+ " and profiled with %s %s items of the maximum feature size.",
1153
+ encoder_budget, max_num_mm_items, dummy_data_modality)
1154
+
1155
+ # Create dummy batch of multimodal inputs.
1156
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1157
+ dummy_data_modality, max_num_mm_items)
1158
+
1159
+ # Run multimodal encoder.
1160
+ # Isolate encoder graph from post-processing to minimize
1161
+ # impact of recompilation until it's fixed.
1162
+ start = time.perf_counter()
1163
+ xm.mark_step()
1164
+ dummy_encoder_outputs = self.model.get_multimodal_embeddings(
1165
+ **batched_dummy_mm_inputs)
1166
+ xm.mark_step()
1167
+ xm.wait_device_ops()
1168
+ end = time.perf_counter()
1169
+ logger.info(
1170
+ "Multimodal Encoder profiling finished in in %.2f [secs].",
1171
+ end - start)
1172
+
1173
+ assert len(dummy_encoder_outputs) == max_num_mm_items, (
1174
+ "Expected dimension 0 of encoder outputs to match the number "
1175
+ f"of multimodal data items: {max_num_mm_items}, got "
1176
+ f"{len(dummy_encoder_outputs)=} instead. This is most likely "
1177
+ "due to the 'get_multimodal_embeddings' method of the model "
1178
+ "not implemented correctly.")
1179
+
1180
+ # Cache the dummy encoder outputs.
1181
+ self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
1182
+
1183
+ # Trigger compilation for general shape.
1184
+ self._dummy_run(num_tokens)
1185
+
1186
+ xm.mark_step()
1187
+ xm.wait_device_ops()
1188
+ self.encoder_cache.clear()
1189
+ gc.collect()
1190
+
1191
+ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
1192
+ """
1193
+ Initialize KV cache based on `kv_cache_config`.
1194
+ Args:
1195
+ kv_cache_config: Configuration for the KV cache, including the KV
1196
+ cache size of each layer
1197
+ """
1198
+ if len(kv_cache_config.kv_cache_groups) > 1:
1199
+ raise NotImplementedError(
1200
+ "Hybrid models with more than one KV cache type are not "
1201
+ "supported yet.")
1202
+
1203
+ kv_caches: dict[str, torch.Tensor] = {}
1204
+
1205
+ for kv_cache_group in kv_cache_config.kv_cache_groups:
1206
+ kv_cache_spec = kv_cache_group.kv_cache_spec
1207
+ for layer_name in kv_cache_group.layer_names:
1208
+ tensor_config = kv_cache_config.tensors[layer_name]
1209
+ assert tensor_config.size % kv_cache_spec.page_size_bytes == 0
1210
+ num_blocks = tensor_config.size // kv_cache_spec.page_size_bytes
1211
+ if isinstance(kv_cache_spec, AttentionSpec):
1212
+ kv_cache_shape = PallasAttentionBackend.get_kv_cache_shape(
1213
+ num_blocks, kv_cache_spec.block_size,
1214
+ kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
1215
+ dtype = kv_cache_spec.dtype
1216
+
1217
+ tpu_kv_cache = torch.zeros(kv_cache_shape,
1218
+ dtype=dtype,
1219
+ device=self.device)
1220
+
1221
+ kv_caches[layer_name] = tpu_kv_cache
1222
+ else:
1223
+ raise NotImplementedError
1224
+
1225
+ bind_kv_cache(
1226
+ kv_caches,
1227
+ self.vllm_config.compilation_config.static_forward_context,
1228
+ self.kv_caches)
1229
+
1230
+ def reset_dynamo_cache(self):
1231
+ if self.is_multimodal_model:
1232
+ compiled_model = self.model.get_language_model().model
1233
+ else:
1234
+ compiled_model = self.model.model
1235
+ if isinstance(compiled_model, TorchCompileWrapperWithCustomDispatcher):
1236
+ logger.info("Clear dynamo cache and cached dynamo bytecode.")
1237
+ torch._dynamo.eval_frame.remove_from_cache(
1238
+ compiled_model.original_code_object)
1239
+ compiled_model.compiled_codes.clear()
1240
+
1241
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1242
+ def select_hidden_states(self, hidden_states, indices_do_sample):
1243
+ return hidden_states[indices_do_sample]
1244
+
1245
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1246
+ def compute_logits(self,
1247
+ sample_hidden_states: torch.Tensor) -> torch.Tensor:
1248
+ return self.model.compute_logits(sample_hidden_states, None)
1249
+
1250
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1251
+ def sample_from_logits(
1252
+ self, logits: torch.Tensor,
1253
+ sampling_metadata: TPUSupportedSamplingMetadata) -> torch.Tensor:
1254
+ if sampling_metadata.all_greedy:
1255
+ out_tokens = torch.argmax(logits, dim=-1, keepdim=True)
1256
+ else:
1257
+ out_tokens = self.sampler(logits,
1258
+ sampling_metadata).sampled_token_ids
1259
+ return out_tokens
1260
+
1261
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1262
+ def structured_decode(self, require_struct_decoding: torch.Tensor,
1263
+ grammar_bitmask: torch.Tensor, logits: torch.Tensor,
1264
+ arange: torch.Tensor) -> torch.Tensor:
1265
+ return torch.where(
1266
+ require_struct_decoding,
1267
+ self.apply_grammar_bitmask(logits, grammar_bitmask, arange),
1268
+ logits)
1269
+
1270
+ def apply_grammar_bitmask(self, logits: torch.Tensor,
1271
+ grammar_bitmask: torch.Tensor,
1272
+ arange: torch.Tensor):
1273
+ assert (logits.shape[0] == grammar_bitmask.shape[0])
1274
+ logits_cloned = logits.clone()
1275
+ for i in range(logits.shape[0]):
1276
+ unpacked_bitmask = (torch.bitwise_right_shift(
1277
+ grammar_bitmask[i][:, None], arange[None, :]) & 1) == 0
1278
+ unpacked_bitmask = unpacked_bitmask.reshape(-1)[:self.vocab_size]
1279
+ logits_cloned[i] = logits_cloned[i].masked_fill(
1280
+ unpacked_bitmask, -float("inf"))
1281
+ return logits_cloned
1282
+
1283
+ def get_multimodal_embeddings(self, *args, **kwargs):
1284
+ return self.model.get_multimodal_embeddings(*args, **kwargs)
1285
+
1286
+ def get_input_embeddings(self, *args, **kwargs):
1287
+ return self.model.get_input_embeddings(*args, **kwargs)
1288
+
1289
+ def prepare_structured_decoding_input(
1290
+ self, logits: torch.Tensor, scheduler_output: "SchedulerOutput"
1291
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
1292
+ grammar_bitmask = scheduler_output.grammar_bitmask
1293
+ assert grammar_bitmask is not None
1294
+ num_reqs, _ = logits.shape
1295
+
1296
+ # Reset pre-allocated tensors
1297
+ self.grammar_bitmask_cpu.zero_()
1298
+ self.require_structured_out_cpu.zero_()
1299
+
1300
+ # We receive the structured output bitmask from the scheduler, but the
1301
+ # indices of the requests in the batch may not match the indices of
1302
+ # the bitmask since the scheduler doesn't know how the tpu runner is
1303
+ # ordering the requests in the batch. We need to match the order of
1304
+ # bitmask with the order of requests
1305
+ struct_out_indices: list[int] = []
1306
+ mask_indices: list[int] = []
1307
+ for req_id in self.input_batch.req_ids:
1308
+ mask_index = scheduler_output.structured_output_request_ids.get(
1309
+ req_id)
1310
+ if mask_index is None:
1311
+ continue
1312
+ batch_index = self.input_batch.req_id_to_index[req_id]
1313
+ struct_out_indices.append(batch_index)
1314
+ mask_indices.append(mask_index)
1315
+ self.grammar_bitmask_cpu[struct_out_indices] = torch.from_numpy(
1316
+ grammar_bitmask[mask_indices])
1317
+ # It's not guaranteed that all requests in this batch require
1318
+ # structured output, so create a bool tensor to represent
1319
+ # the requests that need structured output.
1320
+ struct_out_indices = torch.tensor(struct_out_indices, dtype=torch.long)
1321
+ self.require_structured_out_cpu[struct_out_indices] = True
1322
+ return self.require_structured_out_cpu[:num_reqs].to(logits.device), \
1323
+ self.grammar_bitmask_cpu[:num_reqs].to(logits.device), \
1324
+ self.structured_decode_arange.to(logits.device)
1325
+
1326
+ def _get_mm_dummy_batch(self, modality: str,
1327
+ batch_size: int) -> BatchedTensorInputs:
1328
+ # Dummy data for pre-compiling multimodal models.
1329
+ dummy_request_data = self.mm_registry.get_decoder_dummy_data(
1330
+ model_config=self.model_config,
1331
+ seq_len=self.max_num_tokens,
1332
+ )
1333
+ dummy_mm_data = dummy_request_data.multi_modal_data
1334
+
1335
+ # Dummy data definition in V0 may contain multiple multimodal items
1336
+ # (e.g, multiple images) for a single request, therefore here we
1337
+ # always replicate first item by max_num_mm_items times since in V1
1338
+ # they are scheduled to be processed separately.
1339
+ assert isinstance(dummy_mm_data, MultiModalKwargs), (
1340
+ "Expected dummy multimodal data to be of type "
1341
+ f"MultiModalKwargs, got {type(dummy_mm_data)=} instead. "
1342
+ "This is most likely due to the model not having a merged "
1343
+ "processor.")
1344
+
1345
+ # When models have a merged processor, their dummy data is
1346
+ # already batched `MultiModalKwargs`, therefore we take the first
1347
+ # `MultiModalKwargsItem` from the desired modality to profile on.
1348
+ dummy_mm_item = dummy_mm_data.get_item(modality=modality, item_index=0)
1349
+ dummy_mm_kwargs = MultiModalKwargs.from_items([dummy_mm_item])
1350
+
1351
+ batched_dummy_mm_inputs = MultiModalKwargs.batch([dummy_mm_kwargs] *
1352
+ batch_size)
1353
+ return MultiModalKwargs.as_kwargs(batched_dummy_mm_inputs,
1354
+ device=self.device)
1355
+
1356
+
1357
+ def _get_req_paddings(min_req_size: int, max_req_size: int) -> list[int]:
1358
+ logger.info("Preparing request paddings:")
1359
+ # assert min_req_size is power of 2
1360
+ assert (min_req_size & (min_req_size - 1) == 0) and min_req_size > 0
1361
+ paddings: list = []
1362
+ num = max(MIN_NUM_SEQS, min_req_size)
1363
+ while num <= max_req_size and (len(paddings) == 0 or paddings[-1] != num):
1364
+ paddings.append(num)
1365
+ logger.info(" %d", num)
1366
+ num = _get_padded_num_reqs_with_upper_limit(num + 1, max_req_size)
1367
+ return paddings
1368
+
1369
+
1370
+ def _get_padded_num_reqs_with_upper_limit(x: int, upper_limit: int) -> int:
1371
+ res = MIN_NUM_SEQS if x <= MIN_NUM_SEQS else 1 << (x - 1).bit_length()
1372
+ return min(res, upper_limit)
1373
+
1374
+
1375
+ def _get_token_paddings(min_token_size: int, max_token_size: int,
1376
+ padding_gap: int) -> list[int]:
1377
+ """Generate a list of padding size, starting from min_token_size,
1378
+ ending with a number that can cover max_token_size
1379
+
1380
+ If padding_gap == 0 then:
1381
+ increase 2X each time (exponential)
1382
+ else:
1383
+ first increase the size to twice,
1384
+ then increase the padding size by padding_gap.
1385
+ """
1386
+ # assert min_token_size is power of 2
1387
+ assert (min_token_size & (min_token_size - 1) == 0) and min_token_size > 0
1388
+ paddings = []
1389
+ num = min_token_size
1390
+
1391
+ if padding_gap == 0:
1392
+ logger.info("Using exponential token paddings:")
1393
+ while True:
1394
+ logger.info(" %d", num)
1395
+ paddings.append(num)
1396
+ if num >= max_token_size:
1397
+ break
1398
+ num *= 2
1399
+ else:
1400
+ logger.info("Using incremental token paddings:")
1401
+ while num <= padding_gap:
1402
+ logger.info(" %d", num)
1403
+ paddings.append(num)
1404
+ num *= 2
1405
+ num //= 2
1406
+ while num < max_token_size:
1407
+ num += padding_gap
1408
+ logger.info(" %d", num)
1409
+ paddings.append(num)
1410
+
1411
+ return paddings
1412
+
1413
+
1414
+ def _get_padded_token_len(paddings: list[int], x: int) -> int:
1415
+ """Return the first element in paddings list greater or equal to x.
1416
+ """
1417
+ index = bisect.bisect_left(paddings, x)
1418
+ assert index < len(paddings)
1419
+ return paddings[index]