vllm-cpu-avx512bf16 0.9.0.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1175) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1742 -0
  4. vllm/_ipex_ops.py +243 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +15 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +44 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +33 -0
  16. vllm/assets/video.py +114 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +305 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1494 -0
  23. vllm/attention/backends/flash_attn.py +999 -0
  24. vllm/attention/backends/flashinfer.py +1100 -0
  25. vllm/attention/backends/flashmla.py +242 -0
  26. vllm/attention/backends/hpu_attn.py +309 -0
  27. vllm/attention/backends/ipex_attn.py +394 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1381 -0
  30. vllm/attention/backends/pallas.py +347 -0
  31. vllm/attention/backends/placeholder_attn.py +399 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +970 -0
  34. vllm/attention/backends/torch_sdpa.py +691 -0
  35. vllm/attention/backends/triton_mla.py +113 -0
  36. vllm/attention/backends/utils.py +609 -0
  37. vllm/attention/backends/xformers.py +798 -0
  38. vllm/attention/layer.py +452 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +245 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +367 -0
  45. vllm/attention/ops/flashmla.py +115 -0
  46. vllm/attention/ops/hpu_paged_attn.py +87 -0
  47. vllm/attention/ops/ipex_attn.py +194 -0
  48. vllm/attention/ops/merge_attn_states.py +42 -0
  49. vllm/attention/ops/nki_flash_attn.py +905 -0
  50. vllm/attention/ops/paged_attn.py +255 -0
  51. vllm/attention/ops/prefix_prefill.py +901 -0
  52. vllm/attention/ops/rocm_aiter_mla.py +99 -0
  53. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  54. vllm/attention/ops/triton_decode_attention.py +673 -0
  55. vllm/attention/ops/triton_flash_attention.py +1374 -0
  56. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  57. vllm/attention/ops/triton_unified_attention.py +337 -0
  58. vllm/attention/selector.py +186 -0
  59. vllm/attention/utils/fa_utils.py +54 -0
  60. vllm/beam_search.py +82 -0
  61. vllm/benchmarks/__init__.py +0 -0
  62. vllm/benchmarks/datasets.py +921 -0
  63. vllm/benchmarks/endpoint_request_func.py +160 -0
  64. vllm/benchmarks/latency.py +184 -0
  65. vllm/benchmarks/serve.py +925 -0
  66. vllm/benchmarks/throughput.py +609 -0
  67. vllm/benchmarks/utils.py +69 -0
  68. vllm/collect_env.py +818 -0
  69. vllm/compilation/__init__.py +0 -0
  70. vllm/compilation/activation_quant_fusion.py +88 -0
  71. vllm/compilation/backends.py +560 -0
  72. vllm/compilation/base_piecewise_backend.py +71 -0
  73. vllm/compilation/collective_fusion.py +126 -0
  74. vllm/compilation/compiler_interface.py +533 -0
  75. vllm/compilation/counter.py +33 -0
  76. vllm/compilation/cuda_piecewise_backend.py +213 -0
  77. vllm/compilation/decorators.py +249 -0
  78. vllm/compilation/fix_functionalization.py +190 -0
  79. vllm/compilation/fusion.py +617 -0
  80. vllm/compilation/fx_utils.py +61 -0
  81. vllm/compilation/inductor_pass.py +114 -0
  82. vllm/compilation/monitor.py +38 -0
  83. vllm/compilation/multi_output_match.py +108 -0
  84. vllm/compilation/noop_elimination.py +136 -0
  85. vllm/compilation/pass_manager.py +77 -0
  86. vllm/compilation/sequence_parallelism.py +267 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  88. vllm/compilation/vllm_inductor_pass.py +66 -0
  89. vllm/compilation/wrapper.py +129 -0
  90. vllm/config.py +4600 -0
  91. vllm/connections.py +173 -0
  92. vllm/core/__init__.py +0 -0
  93. vllm/core/block/__init__.py +0 -0
  94. vllm/core/block/block_table.py +398 -0
  95. vllm/core/block/common.py +370 -0
  96. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  97. vllm/core/block/interfaces.py +318 -0
  98. vllm/core/block/naive_block.py +465 -0
  99. vllm/core/block/prefix_caching_block.py +1134 -0
  100. vllm/core/block/utils.py +27 -0
  101. vllm/core/block_manager.py +520 -0
  102. vllm/core/evictor.py +156 -0
  103. vllm/core/interfaces.py +134 -0
  104. vllm/core/placeholder_block_space_manager.py +99 -0
  105. vllm/core/scheduler.py +2092 -0
  106. vllm/device_allocator/__init__.py +0 -0
  107. vllm/device_allocator/cumem.py +280 -0
  108. vllm/distributed/__init__.py +5 -0
  109. vllm/distributed/communication_op.py +40 -0
  110. vllm/distributed/device_communicators/__init__.py +0 -0
  111. vllm/distributed/device_communicators/all2all.py +126 -0
  112. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  113. vllm/distributed/device_communicators/cpu_communicator.py +144 -0
  114. vllm/distributed/device_communicators/cuda_communicator.py +167 -0
  115. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  116. vllm/distributed/device_communicators/custom_all_reduce.py +303 -0
  117. vllm/distributed/device_communicators/custom_all_reduce_utils.py +258 -0
  118. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  119. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  120. vllm/distributed/device_communicators/pynccl.py +217 -0
  121. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  122. vllm/distributed/device_communicators/shm_broadcast.py +541 -0
  123. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  124. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  125. vllm/distributed/kv_events.py +296 -0
  126. vllm/distributed/kv_transfer/README.md +29 -0
  127. vllm/distributed/kv_transfer/__init__.py +11 -0
  128. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  129. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  130. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  131. vllm/distributed/kv_transfer/kv_connector/factory.py +126 -0
  132. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  133. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +202 -0
  134. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +91 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +5 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +259 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +133 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +189 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +851 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  142. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  145. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  146. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  147. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  149. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  150. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  151. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  152. vllm/distributed/parallel_state.py +1294 -0
  153. vllm/distributed/utils.py +520 -0
  154. vllm/engine/__init__.py +0 -0
  155. vllm/engine/arg_utils.py +1649 -0
  156. vllm/engine/async_llm_engine.py +1274 -0
  157. vllm/engine/async_timeout.py +191 -0
  158. vllm/engine/llm_engine.py +2153 -0
  159. vllm/engine/metrics.py +717 -0
  160. vllm/engine/metrics_types.py +96 -0
  161. vllm/engine/multiprocessing/__init__.py +188 -0
  162. vllm/engine/multiprocessing/client.py +755 -0
  163. vllm/engine/multiprocessing/engine.py +459 -0
  164. vllm/engine/output_processor/__init__.py +0 -0
  165. vllm/engine/output_processor/interfaces.py +74 -0
  166. vllm/engine/output_processor/multi_step.py +215 -0
  167. vllm/engine/output_processor/single_step.py +144 -0
  168. vllm/engine/output_processor/stop_checker.py +130 -0
  169. vllm/engine/output_processor/util.py +27 -0
  170. vllm/engine/protocol.py +310 -0
  171. vllm/entrypoints/__init__.py +0 -0
  172. vllm/entrypoints/api_server.py +177 -0
  173. vllm/entrypoints/chat_utils.py +1298 -0
  174. vllm/entrypoints/cli/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  176. vllm/entrypoints/cli/benchmark/base.py +38 -0
  177. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  178. vllm/entrypoints/cli/benchmark/main.py +53 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  180. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  181. vllm/entrypoints/cli/collect_env.py +34 -0
  182. vllm/entrypoints/cli/main.py +62 -0
  183. vllm/entrypoints/cli/openai.py +204 -0
  184. vllm/entrypoints/cli/serve.py +141 -0
  185. vllm/entrypoints/cli/types.py +24 -0
  186. vllm/entrypoints/launcher.py +146 -0
  187. vllm/entrypoints/llm.py +1503 -0
  188. vllm/entrypoints/logger.py +49 -0
  189. vllm/entrypoints/openai/__init__.py +0 -0
  190. vllm/entrypoints/openai/api_server.py +1376 -0
  191. vllm/entrypoints/openai/cli_args.py +306 -0
  192. vllm/entrypoints/openai/logits_processors.py +89 -0
  193. vllm/entrypoints/openai/protocol.py +1890 -0
  194. vllm/entrypoints/openai/run_batch.py +439 -0
  195. vllm/entrypoints/openai/serving_chat.py +1192 -0
  196. vllm/entrypoints/openai/serving_classification.py +159 -0
  197. vllm/entrypoints/openai/serving_completion.py +590 -0
  198. vllm/entrypoints/openai/serving_embedding.py +200 -0
  199. vllm/entrypoints/openai/serving_engine.py +985 -0
  200. vllm/entrypoints/openai/serving_models.py +314 -0
  201. vllm/entrypoints/openai/serving_pooling.py +231 -0
  202. vllm/entrypoints/openai/serving_score.py +432 -0
  203. vllm/entrypoints/openai/serving_tokenization.py +151 -0
  204. vllm/entrypoints/openai/serving_transcription.py +421 -0
  205. vllm/entrypoints/openai/tool_parsers/__init__.py +22 -0
  206. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  207. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +369 -0
  208. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +258 -0
  209. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +236 -0
  210. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  211. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +215 -0
  212. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +307 -0
  213. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +302 -0
  214. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +266 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  216. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +111 -0
  217. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +296 -0
  218. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  219. vllm/entrypoints/score_utils.py +49 -0
  220. vllm/entrypoints/ssl.py +74 -0
  221. vllm/entrypoints/utils.py +219 -0
  222. vllm/env_override.py +34 -0
  223. vllm/envs.py +896 -0
  224. vllm/executor/__init__.py +0 -0
  225. vllm/executor/executor_base.py +400 -0
  226. vllm/executor/mp_distributed_executor.py +243 -0
  227. vllm/executor/msgspec_utils.py +29 -0
  228. vllm/executor/multiproc_worker_utils.py +312 -0
  229. vllm/executor/ray_distributed_executor.py +700 -0
  230. vllm/executor/ray_utils.py +398 -0
  231. vllm/executor/uniproc_executor.py +138 -0
  232. vllm/forward_context.py +147 -0
  233. vllm/inputs/__init__.py +40 -0
  234. vllm/inputs/data.py +330 -0
  235. vllm/inputs/parse.py +150 -0
  236. vllm/inputs/preprocess.py +908 -0
  237. vllm/inputs/registry.py +214 -0
  238. vllm/jsontree.py +79 -0
  239. vllm/logger.py +211 -0
  240. vllm/logging_utils/__init__.py +7 -0
  241. vllm/logging_utils/dump_input.py +84 -0
  242. vllm/logging_utils/formatter.py +17 -0
  243. vllm/logits_process.py +118 -0
  244. vllm/lora/__init__.py +0 -0
  245. vllm/lora/fully_sharded_layers.py +354 -0
  246. vllm/lora/layers.py +1284 -0
  247. vllm/lora/lora.py +198 -0
  248. vllm/lora/models.py +817 -0
  249. vllm/lora/ops/__init__.py +0 -0
  250. vllm/lora/ops/torch_ops/__init__.py +15 -0
  251. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  252. vllm/lora/ops/triton_ops/__init__.py +11 -0
  253. vllm/lora/ops/triton_ops/kernel_utils.py +242 -0
  254. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  255. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  256. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  257. vllm/lora/ops/triton_ops/utils.py +119 -0
  258. vllm/lora/ops/xla_ops/__init__.py +6 -0
  259. vllm/lora/ops/xla_ops/lora_ops.py +106 -0
  260. vllm/lora/ops/xla_ops/pallas.py +133 -0
  261. vllm/lora/peft_helper.py +135 -0
  262. vllm/lora/punica_wrapper/__init__.py +9 -0
  263. vllm/lora/punica_wrapper/punica_base.py +484 -0
  264. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  265. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  266. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  267. vllm/lora/punica_wrapper/punica_selector.py +19 -0
  268. vllm/lora/punica_wrapper/punica_tpu.py +325 -0
  269. vllm/lora/punica_wrapper/utils.py +163 -0
  270. vllm/lora/request.py +98 -0
  271. vllm/lora/resolver.py +84 -0
  272. vllm/lora/utils.py +239 -0
  273. vllm/lora/worker_manager.py +253 -0
  274. vllm/model_executor/__init__.py +15 -0
  275. vllm/model_executor/custom_op.py +151 -0
  276. vllm/model_executor/guided_decoding/__init__.py +180 -0
  277. vllm/model_executor/guided_decoding/guidance_decoding.py +62 -0
  278. vllm/model_executor/guided_decoding/guidance_logits_processors.py +103 -0
  279. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  280. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  281. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  282. vllm/model_executor/guided_decoding/outlines_logits_processors.py +283 -0
  283. vllm/model_executor/guided_decoding/utils.py +241 -0
  284. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  285. vllm/model_executor/layers/__init__.py +0 -0
  286. vllm/model_executor/layers/activation.py +368 -0
  287. vllm/model_executor/layers/fused_moe/__init__.py +53 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  449. vllm/model_executor/layers/fused_moe/cutlass_moe.py +382 -0
  450. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +227 -0
  451. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +755 -0
  452. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +231 -0
  453. vllm/model_executor/layers/fused_moe/fused_moe.py +1722 -0
  454. vllm/model_executor/layers/fused_moe/layer.py +1366 -0
  455. vllm/model_executor/layers/fused_moe/modular_kernel.py +364 -0
  456. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +242 -0
  457. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  458. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +188 -0
  459. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  460. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +146 -0
  461. vllm/model_executor/layers/fused_moe/prepare_finalize.py +60 -0
  462. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +372 -0
  463. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +112 -0
  464. vllm/model_executor/layers/fused_moe/utils.py +97 -0
  465. vllm/model_executor/layers/layernorm.py +287 -0
  466. vllm/model_executor/layers/lightning_attn.py +651 -0
  467. vllm/model_executor/layers/linear.py +1523 -0
  468. vllm/model_executor/layers/logits_processor.py +196 -0
  469. vllm/model_executor/layers/mamba/__init__.py +0 -0
  470. vllm/model_executor/layers/mamba/mamba2_metadata.py +124 -0
  471. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  472. vllm/model_executor/layers/mamba/mamba_mixer2.py +615 -0
  473. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  474. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  475. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +413 -0
  476. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  477. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  478. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  479. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  480. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  481. vllm/model_executor/layers/pooler.py +343 -0
  482. vllm/model_executor/layers/quantization/__init__.py +156 -0
  483. vllm/model_executor/layers/quantization/aqlm.py +375 -0
  484. vllm/model_executor/layers/quantization/auto_round.py +308 -0
  485. vllm/model_executor/layers/quantization/awq.py +185 -0
  486. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  487. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  488. vllm/model_executor/layers/quantization/base_config.py +150 -0
  489. vllm/model_executor/layers/quantization/bitblas.py +460 -0
  490. vllm/model_executor/layers/quantization/bitsandbytes.py +397 -0
  491. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  492. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +644 -0
  493. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1252 -0
  494. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +21 -0
  495. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  496. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  497. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  498. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +92 -0
  499. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +120 -0
  500. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  501. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  502. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  503. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  504. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +214 -0
  505. vllm/model_executor/layers/quantization/deepspeedfp.py +194 -0
  506. vllm/model_executor/layers/quantization/experts_int8.py +195 -0
  507. vllm/model_executor/layers/quantization/fbgemm_fp8.py +171 -0
  508. vllm/model_executor/layers/quantization/fp8.py +876 -0
  509. vllm/model_executor/layers/quantization/gguf.py +564 -0
  510. vllm/model_executor/layers/quantization/gptq.py +277 -0
  511. vllm/model_executor/layers/quantization/gptq_bitblas.py +444 -0
  512. vllm/model_executor/layers/quantization/gptq_marlin.py +647 -0
  513. vllm/model_executor/layers/quantization/gptq_marlin_24.py +296 -0
  514. vllm/model_executor/layers/quantization/hqq_marlin.py +331 -0
  515. vllm/model_executor/layers/quantization/ipex_quant.py +249 -0
  516. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  517. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  518. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  519. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  520. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  521. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  522. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  523. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +130 -0
  524. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  525. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  526. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  527. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  528. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  529. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  530. vllm/model_executor/layers/quantization/kv_cache.py +138 -0
  531. vllm/model_executor/layers/quantization/marlin.py +260 -0
  532. vllm/model_executor/layers/quantization/modelopt.py +734 -0
  533. vllm/model_executor/layers/quantization/moe_wna16.py +448 -0
  534. vllm/model_executor/layers/quantization/neuron_quant.py +68 -0
  535. vllm/model_executor/layers/quantization/ptpc_fp8.py +126 -0
  536. vllm/model_executor/layers/quantization/qqq.py +274 -0
  537. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  538. vllm/model_executor/layers/quantization/quark/quark.py +440 -0
  539. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  540. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +8 -0
  541. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  542. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +125 -0
  543. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +145 -0
  544. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  545. vllm/model_executor/layers/quantization/quark/utils.py +104 -0
  546. vllm/model_executor/layers/quantization/schema.py +85 -0
  547. vllm/model_executor/layers/quantization/torchao.py +143 -0
  548. vllm/model_executor/layers/quantization/tpu_int8.py +120 -0
  549. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  550. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  551. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +207 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  754. vllm/model_executor/layers/quantization/utils/fp8_utils.py +611 -0
  755. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  756. vllm/model_executor/layers/quantization/utils/int8_utils.py +484 -0
  757. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  758. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  759. vllm/model_executor/layers/quantization/utils/marlin_utils.py +475 -0
  760. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +277 -0
  761. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +324 -0
  762. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  763. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +463 -0
  764. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +125 -0
  765. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +44 -0
  766. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +61 -0
  767. vllm/model_executor/layers/quantization/utils/quant_utils.py +572 -0
  768. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  769. vllm/model_executor/layers/rejection_sampler.py +405 -0
  770. vllm/model_executor/layers/resampler.py +269 -0
  771. vllm/model_executor/layers/rotary_embedding.py +1861 -0
  772. vllm/model_executor/layers/sampler.py +1203 -0
  773. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  774. vllm/model_executor/layers/typical_acceptance_sampler.py +165 -0
  775. vllm/model_executor/layers/utils.py +99 -0
  776. vllm/model_executor/layers/vocab_parallel_embedding.py +486 -0
  777. vllm/model_executor/model_loader/__init__.py +75 -0
  778. vllm/model_executor/model_loader/base_loader.py +24 -0
  779. vllm/model_executor/model_loader/bitsandbytes_loader.py +582 -0
  780. vllm/model_executor/model_loader/default_loader.py +295 -0
  781. vllm/model_executor/model_loader/dummy_loader.py +37 -0
  782. vllm/model_executor/model_loader/gguf_loader.py +113 -0
  783. vllm/model_executor/model_loader/neuron.py +475 -0
  784. vllm/model_executor/model_loader/neuronx_distributed.py +622 -0
  785. vllm/model_executor/model_loader/runai_streamer_loader.py +120 -0
  786. vllm/model_executor/model_loader/sharded_state_loader.py +211 -0
  787. vllm/model_executor/model_loader/tensorizer.py +632 -0
  788. vllm/model_executor/model_loader/tensorizer_loader.py +122 -0
  789. vllm/model_executor/model_loader/utils.py +301 -0
  790. vllm/model_executor/model_loader/weight_utils.py +781 -0
  791. vllm/model_executor/models/__init__.py +27 -0
  792. vllm/model_executor/models/adapters.py +247 -0
  793. vllm/model_executor/models/aimv2.py +199 -0
  794. vllm/model_executor/models/arctic.py +558 -0
  795. vllm/model_executor/models/aria.py +656 -0
  796. vllm/model_executor/models/aya_vision.py +461 -0
  797. vllm/model_executor/models/baichuan.py +473 -0
  798. vllm/model_executor/models/bamba.py +542 -0
  799. vllm/model_executor/models/bart.py +937 -0
  800. vllm/model_executor/models/bert.py +517 -0
  801. vllm/model_executor/models/bert_with_rope.py +714 -0
  802. vllm/model_executor/models/blip.py +338 -0
  803. vllm/model_executor/models/blip2.py +717 -0
  804. vllm/model_executor/models/bloom.py +372 -0
  805. vllm/model_executor/models/chameleon.py +1135 -0
  806. vllm/model_executor/models/chatglm.py +477 -0
  807. vllm/model_executor/models/clip.py +411 -0
  808. vllm/model_executor/models/commandr.py +471 -0
  809. vllm/model_executor/models/constant_size_cache.py +136 -0
  810. vllm/model_executor/models/dbrx.py +471 -0
  811. vllm/model_executor/models/deepseek.py +485 -0
  812. vllm/model_executor/models/deepseek_mtp.py +268 -0
  813. vllm/model_executor/models/deepseek_v2.py +842 -0
  814. vllm/model_executor/models/deepseek_vl2.py +647 -0
  815. vllm/model_executor/models/eagle.py +259 -0
  816. vllm/model_executor/models/exaone.py +550 -0
  817. vllm/model_executor/models/fairseq2_llama.py +153 -0
  818. vllm/model_executor/models/falcon.py +509 -0
  819. vllm/model_executor/models/falcon_h1.py +684 -0
  820. vllm/model_executor/models/florence2.py +1102 -0
  821. vllm/model_executor/models/fuyu.py +388 -0
  822. vllm/model_executor/models/gemma.py +424 -0
  823. vllm/model_executor/models/gemma2.py +424 -0
  824. vllm/model_executor/models/gemma3.py +532 -0
  825. vllm/model_executor/models/gemma3_mm.py +708 -0
  826. vllm/model_executor/models/glm.py +22 -0
  827. vllm/model_executor/models/glm4.py +304 -0
  828. vllm/model_executor/models/glm4v.py +647 -0
  829. vllm/model_executor/models/gpt2.py +327 -0
  830. vllm/model_executor/models/gpt_bigcode.py +334 -0
  831. vllm/model_executor/models/gpt_j.py +338 -0
  832. vllm/model_executor/models/gpt_neox.py +331 -0
  833. vllm/model_executor/models/granite.py +492 -0
  834. vllm/model_executor/models/granite_speech.py +778 -0
  835. vllm/model_executor/models/granitemoe.py +436 -0
  836. vllm/model_executor/models/granitemoehybrid.py +585 -0
  837. vllm/model_executor/models/granitemoeshared.py +340 -0
  838. vllm/model_executor/models/gritlm.py +223 -0
  839. vllm/model_executor/models/grok1.py +545 -0
  840. vllm/model_executor/models/h2ovl.py +545 -0
  841. vllm/model_executor/models/idefics2_vision_model.py +388 -0
  842. vllm/model_executor/models/idefics3.py +767 -0
  843. vllm/model_executor/models/interfaces.py +571 -0
  844. vllm/model_executor/models/interfaces_base.py +163 -0
  845. vllm/model_executor/models/intern_vit.py +475 -0
  846. vllm/model_executor/models/internlm2.py +454 -0
  847. vllm/model_executor/models/internlm2_ve.py +146 -0
  848. vllm/model_executor/models/internvl.py +1405 -0
  849. vllm/model_executor/models/jais.py +372 -0
  850. vllm/model_executor/models/jamba.py +591 -0
  851. vllm/model_executor/models/kimi_vl.py +576 -0
  852. vllm/model_executor/models/llama.py +643 -0
  853. vllm/model_executor/models/llama4.py +531 -0
  854. vllm/model_executor/models/llama_eagle.py +166 -0
  855. vllm/model_executor/models/llama_eagle3.py +257 -0
  856. vllm/model_executor/models/llava.py +865 -0
  857. vllm/model_executor/models/llava_next.py +585 -0
  858. vllm/model_executor/models/llava_next_video.py +470 -0
  859. vllm/model_executor/models/llava_onevision.py +955 -0
  860. vllm/model_executor/models/mamba.py +272 -0
  861. vllm/model_executor/models/mamba2.py +302 -0
  862. vllm/model_executor/models/mamba_cache.py +75 -0
  863. vllm/model_executor/models/medusa.py +218 -0
  864. vllm/model_executor/models/mimo.py +191 -0
  865. vllm/model_executor/models/mimo_mtp.py +284 -0
  866. vllm/model_executor/models/minicpm.py +590 -0
  867. vllm/model_executor/models/minicpm3.py +229 -0
  868. vllm/model_executor/models/minicpmo.py +758 -0
  869. vllm/model_executor/models/minicpmv.py +1286 -0
  870. vllm/model_executor/models/minimax_cache.py +35 -0
  871. vllm/model_executor/models/minimax_text_01.py +1303 -0
  872. vllm/model_executor/models/minimax_vl_01.py +363 -0
  873. vllm/model_executor/models/mistral3.py +603 -0
  874. vllm/model_executor/models/mixtral.py +487 -0
  875. vllm/model_executor/models/mixtral_quant.py +452 -0
  876. vllm/model_executor/models/mllama.py +1623 -0
  877. vllm/model_executor/models/mllama4.py +838 -0
  878. vllm/model_executor/models/mlp_speculator.py +205 -0
  879. vllm/model_executor/models/modernbert.py +329 -0
  880. vllm/model_executor/models/module_mapping.py +71 -0
  881. vllm/model_executor/models/molmo.py +1567 -0
  882. vllm/model_executor/models/moonvit.py +629 -0
  883. vllm/model_executor/models/mpt.py +330 -0
  884. vllm/model_executor/models/nemotron.py +507 -0
  885. vllm/model_executor/models/nemotron_nas.py +483 -0
  886. vllm/model_executor/models/nvlm_d.py +215 -0
  887. vllm/model_executor/models/olmo.py +388 -0
  888. vllm/model_executor/models/olmo2.py +413 -0
  889. vllm/model_executor/models/olmoe.py +446 -0
  890. vllm/model_executor/models/opt.py +411 -0
  891. vllm/model_executor/models/orion.py +348 -0
  892. vllm/model_executor/models/ovis.py +554 -0
  893. vllm/model_executor/models/paligemma.py +397 -0
  894. vllm/model_executor/models/persimmon.py +343 -0
  895. vllm/model_executor/models/phi.py +355 -0
  896. vllm/model_executor/models/phi3.py +18 -0
  897. vllm/model_executor/models/phi3_small.py +464 -0
  898. vllm/model_executor/models/phi3v.py +722 -0
  899. vllm/model_executor/models/phi4mm.py +1245 -0
  900. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  901. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  902. vllm/model_executor/models/phimoe.py +664 -0
  903. vllm/model_executor/models/pixtral.py +1315 -0
  904. vllm/model_executor/models/plamo2.py +737 -0
  905. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  906. vllm/model_executor/models/qwen.py +361 -0
  907. vllm/model_executor/models/qwen2.py +567 -0
  908. vllm/model_executor/models/qwen2_5_omni_thinker.py +903 -0
  909. vllm/model_executor/models/qwen2_5_vl.py +1171 -0
  910. vllm/model_executor/models/qwen2_audio.py +409 -0
  911. vllm/model_executor/models/qwen2_moe.py +539 -0
  912. vllm/model_executor/models/qwen2_rm.py +131 -0
  913. vllm/model_executor/models/qwen2_vl.py +1410 -0
  914. vllm/model_executor/models/qwen3.py +320 -0
  915. vllm/model_executor/models/qwen3_moe.py +534 -0
  916. vllm/model_executor/models/qwen_vl.py +784 -0
  917. vllm/model_executor/models/registry.py +618 -0
  918. vllm/model_executor/models/roberta.py +273 -0
  919. vllm/model_executor/models/siglip.py +523 -0
  920. vllm/model_executor/models/skyworkr1v.py +950 -0
  921. vllm/model_executor/models/smolvlm.py +51 -0
  922. vllm/model_executor/models/solar.py +505 -0
  923. vllm/model_executor/models/stablelm.py +342 -0
  924. vllm/model_executor/models/starcoder2.py +355 -0
  925. vllm/model_executor/models/telechat2.py +139 -0
  926. vllm/model_executor/models/teleflm.py +78 -0
  927. vllm/model_executor/models/transformers.py +507 -0
  928. vllm/model_executor/models/ultravox.py +655 -0
  929. vllm/model_executor/models/utils.py +730 -0
  930. vllm/model_executor/models/vision.py +146 -0
  931. vllm/model_executor/models/whisper.py +746 -0
  932. vllm/model_executor/models/zamba2.py +1008 -0
  933. vllm/model_executor/parameter.py +458 -0
  934. vllm/model_executor/pooling_metadata.py +71 -0
  935. vllm/model_executor/sampling_metadata.py +596 -0
  936. vllm/model_executor/utils.py +53 -0
  937. vllm/multimodal/__init__.py +32 -0
  938. vllm/multimodal/audio.py +105 -0
  939. vllm/multimodal/base.py +218 -0
  940. vllm/multimodal/hasher.py +117 -0
  941. vllm/multimodal/image.py +96 -0
  942. vllm/multimodal/inputs.py +872 -0
  943. vllm/multimodal/parse.py +460 -0
  944. vllm/multimodal/processing.py +1894 -0
  945. vllm/multimodal/profiling.py +273 -0
  946. vllm/multimodal/registry.py +330 -0
  947. vllm/multimodal/utils.py +392 -0
  948. vllm/multimodal/video.py +197 -0
  949. vllm/outputs.py +525 -0
  950. vllm/platforms/__init__.py +290 -0
  951. vllm/platforms/cpu.py +205 -0
  952. vllm/platforms/cuda.py +461 -0
  953. vllm/platforms/hpu.py +105 -0
  954. vllm/platforms/interface.py +492 -0
  955. vllm/platforms/neuron.py +152 -0
  956. vllm/platforms/rocm.py +388 -0
  957. vllm/platforms/tpu.py +215 -0
  958. vllm/platforms/xpu.py +155 -0
  959. vllm/plugins/__init__.py +86 -0
  960. vllm/plugins/lora_resolvers/README.md +15 -0
  961. vllm/plugins/lora_resolvers/__init__.py +0 -0
  962. vllm/plugins/lora_resolvers/filesystem_resolver.py +49 -0
  963. vllm/pooling_params.py +53 -0
  964. vllm/profiler/__init__.py +0 -0
  965. vllm/profiler/layerwise_profile.py +374 -0
  966. vllm/profiler/utils.py +147 -0
  967. vllm/prompt_adapter/__init__.py +0 -0
  968. vllm/prompt_adapter/layers.py +82 -0
  969. vllm/prompt_adapter/models.py +357 -0
  970. vllm/prompt_adapter/request.py +36 -0
  971. vllm/prompt_adapter/utils.py +97 -0
  972. vllm/prompt_adapter/worker_manager.py +178 -0
  973. vllm/py.typed +2 -0
  974. vllm/reasoning/__init__.py +14 -0
  975. vllm/reasoning/abs_reasoning_parsers.py +191 -0
  976. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  977. vllm/reasoning/granite_reasoning_parser.py +362 -0
  978. vllm/reasoning/qwen3_reasoning_parser.py +150 -0
  979. vllm/sampling_params.py +590 -0
  980. vllm/scalar_type.py +346 -0
  981. vllm/scripts.py +14 -0
  982. vllm/sequence.py +1567 -0
  983. vllm/spec_decode/__init__.py +0 -0
  984. vllm/spec_decode/batch_expansion.py +505 -0
  985. vllm/spec_decode/draft_model_runner.py +349 -0
  986. vllm/spec_decode/interfaces.py +98 -0
  987. vllm/spec_decode/medusa_worker.py +137 -0
  988. vllm/spec_decode/metrics.py +212 -0
  989. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  990. vllm/spec_decode/mqa_scorer.py +159 -0
  991. vllm/spec_decode/multi_step_worker.py +422 -0
  992. vllm/spec_decode/ngram_worker.py +195 -0
  993. vllm/spec_decode/proposer_worker_base.py +58 -0
  994. vllm/spec_decode/smaller_tp_proposer_worker.py +195 -0
  995. vllm/spec_decode/spec_decode_worker.py +1325 -0
  996. vllm/spec_decode/target_model_runner.py +44 -0
  997. vllm/spec_decode/top1_proposer.py +274 -0
  998. vllm/spec_decode/util.py +276 -0
  999. vllm/test_utils.py +129 -0
  1000. vllm/third_party/__init__.py +0 -0
  1001. vllm/third_party/pynvml.py +6139 -0
  1002. vllm/tracing.py +130 -0
  1003. vllm/transformers_utils/__init__.py +23 -0
  1004. vllm/transformers_utils/chat_templates/__init__.py +4 -0
  1005. vllm/transformers_utils/chat_templates/registry.py +59 -0
  1006. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1007. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1008. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1009. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1010. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1011. vllm/transformers_utils/config.py +835 -0
  1012. vllm/transformers_utils/configs/__init__.py +58 -0
  1013. vllm/transformers_utils/configs/arctic.py +206 -0
  1014. vllm/transformers_utils/configs/chatglm.py +71 -0
  1015. vllm/transformers_utils/configs/cohere2.py +194 -0
  1016. vllm/transformers_utils/configs/dbrx.py +279 -0
  1017. vllm/transformers_utils/configs/deepseek_vl2.py +215 -0
  1018. vllm/transformers_utils/configs/eagle.py +84 -0
  1019. vllm/transformers_utils/configs/exaone.py +189 -0
  1020. vllm/transformers_utils/configs/falcon.py +89 -0
  1021. vllm/transformers_utils/configs/h2ovl.py +15 -0
  1022. vllm/transformers_utils/configs/internvl.py +53 -0
  1023. vllm/transformers_utils/configs/jais.py +237 -0
  1024. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  1025. vllm/transformers_utils/configs/medusa.py +62 -0
  1026. vllm/transformers_utils/configs/minimax_text_01.py +69 -0
  1027. vllm/transformers_utils/configs/minimax_vl_01.py +70 -0
  1028. vllm/transformers_utils/configs/mllama.py +30 -0
  1029. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  1030. vllm/transformers_utils/configs/moonvit.py +32 -0
  1031. vllm/transformers_utils/configs/mpt.py +179 -0
  1032. vllm/transformers_utils/configs/nemotron.py +204 -0
  1033. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  1034. vllm/transformers_utils/configs/ovis.py +183 -0
  1035. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  1036. vllm/transformers_utils/configs/solar.py +246 -0
  1037. vllm/transformers_utils/configs/telechat2.py +63 -0
  1038. vllm/transformers_utils/configs/ultravox.py +107 -0
  1039. vllm/transformers_utils/detokenizer.py +167 -0
  1040. vllm/transformers_utils/detokenizer_utils.py +188 -0
  1041. vllm/transformers_utils/processor.py +220 -0
  1042. vllm/transformers_utils/processors/__init__.py +7 -0
  1043. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1044. vllm/transformers_utils/processors/ovis.py +419 -0
  1045. vllm/transformers_utils/s3_utils.py +161 -0
  1046. vllm/transformers_utils/tokenizer.py +301 -0
  1047. vllm/transformers_utils/tokenizer_base.py +148 -0
  1048. vllm/transformers_utils/tokenizer_group.py +119 -0
  1049. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  1050. vllm/transformers_utils/tokenizers/mistral.py +490 -0
  1051. vllm/transformers_utils/utils.py +98 -0
  1052. vllm/triton_utils/__init__.py +13 -0
  1053. vllm/triton_utils/importing.py +49 -0
  1054. vllm/usage/__init__.py +0 -0
  1055. vllm/usage/usage_lib.py +255 -0
  1056. vllm/utils.py +2844 -0
  1057. vllm/v1/__init__.py +0 -0
  1058. vllm/v1/attention/__init__.py +0 -0
  1059. vllm/v1/attention/backends/__init__.py +0 -0
  1060. vllm/v1/attention/backends/flash_attn.py +833 -0
  1061. vllm/v1/attention/backends/flashinfer.py +639 -0
  1062. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1063. vllm/v1/attention/backends/mla/common.py +926 -0
  1064. vllm/v1/attention/backends/mla/flashmla.py +150 -0
  1065. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +221 -0
  1066. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1067. vllm/v1/attention/backends/pallas.py +235 -0
  1068. vllm/v1/attention/backends/triton_attn.py +279 -0
  1069. vllm/v1/attention/backends/utils.py +18 -0
  1070. vllm/v1/core/__init__.py +0 -0
  1071. vllm/v1/core/block_pool.py +328 -0
  1072. vllm/v1/core/encoder_cache_manager.py +149 -0
  1073. vllm/v1/core/kv_cache_manager.py +372 -0
  1074. vllm/v1/core/kv_cache_utils.py +748 -0
  1075. vllm/v1/core/sched/__init__.py +0 -0
  1076. vllm/v1/core/sched/interface.py +143 -0
  1077. vllm/v1/core/sched/output.py +153 -0
  1078. vllm/v1/core/sched/scheduler.py +1015 -0
  1079. vllm/v1/core/sched/utils.py +22 -0
  1080. vllm/v1/core/single_type_kv_cache_manager.py +358 -0
  1081. vllm/v1/engine/__init__.py +171 -0
  1082. vllm/v1/engine/async_llm.py +546 -0
  1083. vllm/v1/engine/core.py +801 -0
  1084. vllm/v1/engine/core_client.py +1020 -0
  1085. vllm/v1/engine/detokenizer.py +260 -0
  1086. vllm/v1/engine/exceptions.py +16 -0
  1087. vllm/v1/engine/llm_engine.py +316 -0
  1088. vllm/v1/engine/logprobs.py +198 -0
  1089. vllm/v1/engine/mm_input_cache.py +90 -0
  1090. vllm/v1/engine/output_processor.py +427 -0
  1091. vllm/v1/engine/parallel_sampling.py +132 -0
  1092. vllm/v1/engine/processor.py +398 -0
  1093. vllm/v1/executor/__init__.py +0 -0
  1094. vllm/v1/executor/abstract.py +112 -0
  1095. vllm/v1/executor/multiproc_executor.py +532 -0
  1096. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1097. vllm/v1/kv_cache_interface.py +208 -0
  1098. vllm/v1/metrics/__init__.py +0 -0
  1099. vllm/v1/metrics/loggers.py +511 -0
  1100. vllm/v1/metrics/ray_wrappers.py +120 -0
  1101. vllm/v1/metrics/reader.py +245 -0
  1102. vllm/v1/metrics/stats.py +238 -0
  1103. vllm/v1/outputs.py +115 -0
  1104. vllm/v1/request.py +191 -0
  1105. vllm/v1/sample/__init__.py +0 -0
  1106. vllm/v1/sample/metadata.py +43 -0
  1107. vllm/v1/sample/ops/__init__.py +0 -0
  1108. vllm/v1/sample/ops/bad_words.py +38 -0
  1109. vllm/v1/sample/ops/penalties.py +58 -0
  1110. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1111. vllm/v1/sample/rejection_sampler.py +630 -0
  1112. vllm/v1/sample/sampler.py +270 -0
  1113. vllm/v1/sample/tpu/__init__.py +0 -0
  1114. vllm/v1/sample/tpu/metadata.py +123 -0
  1115. vllm/v1/sample/tpu/sampler.py +144 -0
  1116. vllm/v1/serial_utils.py +313 -0
  1117. vllm/v1/spec_decode/__init__.py +0 -0
  1118. vllm/v1/spec_decode/eagle.py +424 -0
  1119. vllm/v1/spec_decode/medusa.py +61 -0
  1120. vllm/v1/spec_decode/metadata.py +61 -0
  1121. vllm/v1/spec_decode/metrics.py +177 -0
  1122. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1123. vllm/v1/spec_decode/utils.py +45 -0
  1124. vllm/v1/structured_output/__init__.py +215 -0
  1125. vllm/v1/structured_output/backend_guidance.py +244 -0
  1126. vllm/v1/structured_output/backend_types.py +133 -0
  1127. vllm/v1/structured_output/backend_xgrammar.py +317 -0
  1128. vllm/v1/structured_output/request.py +85 -0
  1129. vllm/v1/structured_output/utils.py +174 -0
  1130. vllm/v1/utils.py +294 -0
  1131. vllm/v1/worker/__init__.py +0 -0
  1132. vllm/v1/worker/block_table.py +139 -0
  1133. vllm/v1/worker/gpu_input_batch.py +680 -0
  1134. vllm/v1/worker/gpu_model_runner.py +2084 -0
  1135. vllm/v1/worker/gpu_worker.py +373 -0
  1136. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1137. vllm/v1/worker/tpu_model_runner.py +1510 -0
  1138. vllm/v1/worker/tpu_worker.py +276 -0
  1139. vllm/v1/worker/utils.py +74 -0
  1140. vllm/v1/worker/worker_base.py +64 -0
  1141. vllm/version.py +40 -0
  1142. vllm/vllm_flash_attn/.gitkeep +0 -0
  1143. vllm/worker/__init__.py +0 -0
  1144. vllm/worker/cache_engine.py +144 -0
  1145. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1146. vllm/worker/cpu_model_runner.py +671 -0
  1147. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1148. vllm/worker/cpu_worker.py +400 -0
  1149. vllm/worker/enc_dec_model_runner.py +555 -0
  1150. vllm/worker/hpu_model_runner.py +2319 -0
  1151. vllm/worker/hpu_worker.py +483 -0
  1152. vllm/worker/model_runner.py +2178 -0
  1153. vllm/worker/model_runner_base.py +281 -0
  1154. vllm/worker/multi_step_hpu_worker.py +122 -0
  1155. vllm/worker/multi_step_model_runner.py +910 -0
  1156. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1157. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1158. vllm/worker/multi_step_tpu_worker.py +107 -0
  1159. vllm/worker/multi_step_worker.py +196 -0
  1160. vllm/worker/neuron_model_runner.py +418 -0
  1161. vllm/worker/neuron_worker.py +158 -0
  1162. vllm/worker/neuronx_distributed_model_runner.py +136 -0
  1163. vllm/worker/pooling_model_runner.py +211 -0
  1164. vllm/worker/tpu_model_runner.py +908 -0
  1165. vllm/worker/tpu_worker.py +336 -0
  1166. vllm/worker/utils.py +52 -0
  1167. vllm/worker/worker.py +574 -0
  1168. vllm/worker/worker_base.py +644 -0
  1169. vllm/worker/xpu_model_runner.py +606 -0
  1170. vllm/worker/xpu_worker.py +185 -0
  1171. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/METADATA +335 -0
  1172. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/RECORD +1175 -0
  1173. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/WHEEL +5 -0
  1174. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/entry_points.txt +5 -0
  1175. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1510 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ import bisect
3
+ import gc
4
+ import time
5
+ from typing import TYPE_CHECKING, Optional, cast
6
+ from unittest.mock import patch
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.distributed
11
+ import torch.nn as nn
12
+ # TPU XLA related
13
+ import torch_xla.core.xla_model as xm
14
+ import torch_xla.runtime as xr
15
+
16
+ import vllm.envs as envs
17
+ from vllm.attention.backends.abstract import AttentionType
18
+ from vllm.attention.layer import Attention
19
+ from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher
20
+ from vllm.config import VllmConfig, get_layers_from_vllm_config
21
+ from vllm.forward_context import set_forward_context
22
+ from vllm.logger import init_logger
23
+ from vllm.model_executor.model_loader import get_model
24
+ from vllm.multimodal import MULTIMODAL_REGISTRY
25
+ from vllm.multimodal.inputs import (BatchedTensorInputs, MultiModalKwargs,
26
+ PlaceholderRange)
27
+ from vllm.multimodal.utils import group_mm_inputs_by_modality
28
+ from vllm.sequence import IntermediateTensors
29
+ from vllm.utils import LayerBlockType, cdiv, is_pin_memory_available
30
+ from vllm.v1.attention.backends.pallas import (PallasAttentionBackend,
31
+ PallasMetadata)
32
+ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget
33
+ from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec,
34
+ KVCacheConfig, KVCacheSpec,
35
+ SlidingWindowSpec)
36
+ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors,
37
+ ModelRunnerOutput)
38
+ from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
39
+ from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler
40
+ from vllm.v1.utils import bind_kv_cache
41
+ from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
42
+ from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
43
+
44
+ from .utils import sanity_check_mm_encoder_outputs
45
+
46
+ if TYPE_CHECKING:
47
+ from vllm.v1.core.sched.output import SchedulerOutput
48
+
49
+ logger = init_logger(__name__)
50
+
51
+ # Here we utilize the behavior that out-of-bound index is ignored.
52
+ # FIXME(woosuk): Find a more reliable way to prevent possible bugs.
53
+ _PAD_SLOT_ID = 1_000_000_000
54
+ INVALID_TOKEN_ID = -1
55
+ # Smallest output size
56
+ MIN_NUM_SEQS = 8
57
+
58
+
59
+ #########################################################
60
+ # Ways to avoid recompilation
61
+ #########################################################
62
+ #
63
+ # The model executor has two primary components:
64
+ # 1. preparing the model and sampler inputs
65
+ # 2. executing the model and sampler.
66
+ # The core idea is to avoid any TPU computation during input preparation. For
67
+ # better compilation tracking and increased flexibility, the model execution and
68
+ # sampler are divided into several distinct components.
69
+ #
70
+ # Below are the detailed steps:
71
+ #
72
+ # Step 1
73
+ # It is recommended to avoid TPU operations when preparing the model and sampler
74
+ # inputs. CPU tensors can be prepared and transferred to the XLA device using
75
+ # cpu_tensor.to(xla_device), which only triggers CPU to TPU transfers and avoids
76
+ # compilation.
77
+ #
78
+ # Step 2
79
+ # The TPU execution should be decomposed into subgraphs (4 at the moment):
80
+ # 1. the main model
81
+ # 2. selecting hidden states for each request
82
+ # 3. sampler
83
+ # 4. encoder.
84
+ # Each subgraph should be decorated in a torch.compile. This is used to make
85
+ # sure that we have the same subgraph topology in both dummy_run and
86
+ # xecute_model. The results from these subgraphs should either be passed to
87
+ # other subgraphs, or transferred from TPU to CPU using xla_tensor.cpu() for
88
+ # subsequent processing on the CPU.
89
+ #
90
+ # Step 3
91
+ # The dummy_run should be comprehensive, ensuring all potential input shapes and
92
+ # branch predictions are included as subgraph inputs to facilitate
93
+ # pre-compilation.
94
+ class TPUModelRunner(LoRAModelRunnerMixin):
95
+
96
+ def __init__(
97
+ self,
98
+ vllm_config: VllmConfig,
99
+ device: torch.device,
100
+ ):
101
+ self.vllm_config = vllm_config
102
+ self.model_config = vllm_config.model_config
103
+ self.cache_config = vllm_config.cache_config
104
+ self.lora_config = vllm_config.lora_config
105
+ self.load_config = vllm_config.load_config
106
+ self.parallel_config = vllm_config.parallel_config
107
+ self.scheduler_config = vllm_config.scheduler_config
108
+ self.speculative_config = vllm_config.speculative_config
109
+ self.prompt_adapter_config = vllm_config.prompt_adapter_config
110
+ self.observability_config = vllm_config.observability_config
111
+ self.device_config = vllm_config.device_config
112
+
113
+ model_config = self.model_config
114
+ cache_config = self.cache_config
115
+ scheduler_config = self.scheduler_config
116
+ parallel_config = self.parallel_config
117
+ self.device = device
118
+ self.check_recompilation = envs.VLLM_XLA_CHECK_RECOMPILATION
119
+
120
+ self.enforce_eager = model_config.enforce_eager
121
+
122
+ self.num_xla_graphs = 0
123
+ self._update_num_xla_graphs("init")
124
+
125
+ self.pin_memory = is_pin_memory_available()
126
+ self.dtype = self.model_config.dtype
127
+ self._hidden_states_dtype = self.dtype
128
+
129
+ self.is_multimodal_model = model_config.is_multimodal_model
130
+ self.sliding_window = model_config.get_sliding_window()
131
+ self.block_size = cache_config.block_size
132
+ self.max_model_len = model_config.max_model_len
133
+ self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
134
+ # InputBatch needs to work with sampling tensors greater than padding
135
+ # to avoid dynamic shapes. Also, avoid suboptimal alignment.
136
+ self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS)
137
+ self.num_tokens_paddings = _get_token_paddings(
138
+ min_token_size=16,
139
+ max_token_size=scheduler_config.max_num_batched_tokens,
140
+ padding_gap=envs.VLLM_TPU_BUCKET_PADDING_GAP)
141
+ # In case `max_num_tokens < max(num_tokens_paddings)` use the actual
142
+ # padded max value to pre-allocate data structures and pre-compile.
143
+ self.max_num_tokens = self.num_tokens_paddings[-1]
144
+
145
+ # Model-related.
146
+ self.num_attn_layers = model_config.get_num_layers_by_block_type(
147
+ parallel_config, LayerBlockType.attention)
148
+ self.num_query_heads = model_config.get_num_attention_heads(
149
+ parallel_config)
150
+ self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
151
+ self.head_size = model_config.get_head_size()
152
+ self.hidden_size = model_config.get_hidden_size()
153
+ self.vocab_size = model_config.get_vocab_size()
154
+
155
+ # Multi-modal data support
156
+ self.mm_registry = MULTIMODAL_REGISTRY
157
+ self.uses_mrope = model_config.uses_mrope
158
+ # TODO: Support M-RoPE (e.g, Qwen2-VL)
159
+ assert not self.uses_mrope, "TPU does not support M-RoPE yet."
160
+
161
+ encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
162
+ model_config=model_config,
163
+ scheduler_config=scheduler_config,
164
+ mm_registry=self.mm_registry,
165
+ )
166
+ self.max_num_encoder_input_tokens = encoder_compute_budget
167
+ self.encoder_cache_size = encoder_cache_size
168
+
169
+ # Lazy initialization
170
+ # self.model: nn.Module # Set after load_model
171
+ self.kv_caches: list[torch.Tensor] = []
172
+ # req_id -> (input_id -> encoder_output)
173
+ self.encoder_cache: dict[str, dict[int, torch.Tensor]] = {}
174
+ # self.input_batch: InputBatch # Persistent batch.
175
+
176
+ # Request states.
177
+ self.requests: dict[str, CachedRequestState] = {}
178
+
179
+ # Cached torch/numpy tensor
180
+ # The pytorch tensor and numpy array share the same buffer.
181
+ # Sometimes the numpy op is faster so we create both.
182
+ self.input_ids_cpu = torch.zeros(self.max_num_tokens,
183
+ dtype=torch.int32,
184
+ device="cpu")
185
+
186
+ self.positions_cpu = torch.zeros(self.max_num_tokens,
187
+ dtype=torch.int32,
188
+ device="cpu")
189
+ self.positions_np = self.positions_cpu.numpy()
190
+
191
+ self.block_table_cpu = torch.zeros(
192
+ (self.max_num_reqs, self.max_num_blocks_per_req),
193
+ dtype=torch.int32,
194
+ device="cpu")
195
+
196
+ self.query_start_loc_cpu = torch.zeros(self.max_num_tokens + 1,
197
+ dtype=torch.int32,
198
+ device="cpu",
199
+ pin_memory=self.pin_memory)
200
+ self.query_start_loc_np = self.query_start_loc_cpu.numpy()
201
+
202
+ self.seq_lens_cpu = torch.zeros(self.max_num_tokens,
203
+ dtype=torch.int32,
204
+ device="cpu",
205
+ pin_memory=self.pin_memory)
206
+ self.seq_lens_np = self.seq_lens_cpu.numpy()
207
+
208
+ # Range tensor with values [0 .. self.max_num_tokens - 1].
209
+ # Used to initialize positions / context_lens / seq_lens
210
+ # Keep in int64 to avoid overflow with long context
211
+ self.arange_np = np.arange(self.max_num_tokens, dtype=np.int64)
212
+ self.num_reqs_paddings = _get_req_paddings(
213
+ min_req_size=MIN_NUM_SEQS, max_req_size=self.max_num_reqs)
214
+
215
+ # tensors for structured decoding
216
+ self.grammar_bitmask_cpu = torch.zeros(
217
+ (self.max_num_reqs, cdiv(self.vocab_size, 32)),
218
+ dtype=torch.int32,
219
+ device="cpu",
220
+ pin_memory=self.pin_memory)
221
+ self.require_structured_out_cpu = torch.zeros(
222
+ (self.max_num_reqs, 1),
223
+ dtype=torch.bool,
224
+ device="cpu",
225
+ pin_memory=self.pin_memory)
226
+ self.structured_decode_arange = torch.arange(
227
+ 0, 32, device="cpu", pin_memory=self.pin_memory)
228
+
229
+ # Get maximum number of mm items per modality (batch size).
230
+ self.max_num_mm_items_by_modality = dict()
231
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
232
+ and self.encoder_cache_size > 0):
233
+ max_tokens_by_modality_dict = (
234
+ MULTIMODAL_REGISTRY.
235
+ get_max_tokens_per_item_by_nonzero_modality(self.model_config))
236
+ for modality, max_tokens in max_tokens_by_modality_dict.items():
237
+ # Check how many items of this modality can be supported by
238
+ # the encoder budget.
239
+ encoder_budget = min(self.max_num_encoder_input_tokens,
240
+ self.encoder_cache_size)
241
+
242
+ max_num_mm_items_encoder_budget = cdiv(encoder_budget,
243
+ max_tokens)
244
+
245
+ # Check how many items of this modality can be supported by
246
+ # the decoder budget.
247
+ max_mm_items_per_req = self.mm_registry.\
248
+ get_mm_limits_per_prompt(self.model_config)[modality]
249
+
250
+ # NOTE: We do not consider max_num_batched_tokens on purpose
251
+ # because the multimodal embeddings can be generated in advance
252
+ # and chunked prefilled.
253
+ max_num_mm_items_decoder_budget = self.max_num_reqs * \
254
+ max_mm_items_per_req
255
+
256
+ max_num_mm_items = min(max_num_mm_items_encoder_budget,
257
+ max_num_mm_items_decoder_budget)
258
+ self.max_num_mm_items_by_modality[modality] = max_num_mm_items
259
+
260
+ def _update_num_xla_graphs(self, case_str):
261
+ check_comp = self.check_recompilation and not self.enforce_eager
262
+ if not check_comp:
263
+ return
264
+
265
+ total_cached_graphs = xr.get_num_cached_compilation_graph()
266
+ new_compiled_graphs = total_cached_graphs - self.num_xla_graphs
267
+ if new_compiled_graphs == 0:
268
+ return
269
+
270
+ logger.info("Add new %d compiled XLA graphs due to %s",
271
+ new_compiled_graphs, case_str)
272
+ self.num_xla_graphs += new_compiled_graphs
273
+
274
+ def _verify_num_xla_graphs(self, case_str):
275
+ check_comp = self.check_recompilation and not self.enforce_eager
276
+ if not check_comp:
277
+ return
278
+
279
+ curr_cached_graph = xr.get_num_cached_compilation_graph()
280
+ assert self.num_xla_graphs == curr_cached_graph, (
281
+ "Recompilation after warm up is detected during {}."
282
+ " num_xla_graphs = {} curr_cached_graph = {}".format(
283
+ case_str, self.num_xla_graphs, curr_cached_graph))
284
+
285
+ def _update_states(self, scheduler_output: "SchedulerOutput") -> bool:
286
+ """Update the cached states and the persistent batch with the scheduler
287
+ output.
288
+
289
+ The updated states are used by the `_prepare_inputs` function to create
290
+ the input GPU tensors for the model.
291
+
292
+ Returns:
293
+ True if there is a new/resumed/paused/finished request.
294
+ If False, we can skip copying SamplingMetadata to the GPU.
295
+ """
296
+ # Remove finished requests from the cached states.
297
+ for req_id in scheduler_output.finished_req_ids:
298
+ self.requests.pop(req_id, None)
299
+ self.encoder_cache.pop(req_id, None)
300
+
301
+ # Remove the finished requests from the persistent batch.
302
+ # NOTE(woosuk): There could be an edge case where finished_req_ids and
303
+ # scheduled_req_ids overlap. This happens when a request is aborted and
304
+ # then resubmitted with the same ID. In this case, we treat them as two
305
+ # distinct requests - clearing the cached states for the first request
306
+ # and handling the second as a new request.
307
+ removed_req_indices: list[int] = []
308
+ for req_id in scheduler_output.finished_req_ids:
309
+ req_index = self.input_batch.remove_request(req_id)
310
+ if req_index is not None:
311
+ removed_req_indices.append(req_index)
312
+
313
+ # Free the cached encoder outputs.
314
+ for req_id, input_id in scheduler_output.free_encoder_input_ids:
315
+ encoder_outputs = self.encoder_cache.get(req_id)
316
+ if encoder_outputs is not None:
317
+ encoder_outputs.pop(input_id, None)
318
+ if not encoder_outputs:
319
+ self.encoder_cache.pop(req_id, None)
320
+
321
+ # Remove the unscheduled requests from the persistent batch.
322
+ # NOTE(woosuk): The unscheduled requests are either preempted requests
323
+ # or running requests that are not scheduled in this step. We remove
324
+ # them from the persistent batch but keep their cached states since
325
+ # they will be scheduled again sometime in the future.
326
+ scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
327
+ cached_req_ids = self.input_batch.req_id_to_index.keys()
328
+ unscheduled_req_ids = cached_req_ids - scheduled_req_ids
329
+ # NOTE(woosuk): The persistent batch optimization assumes that
330
+ # consecutive batches contain mostly the same requests. If batches
331
+ # have low request overlap (e.g., alternating between two distinct
332
+ # sets of requests), this optimization becomes very inefficient.
333
+ for req_id in unscheduled_req_ids:
334
+ req_index = self.input_batch.remove_request(req_id)
335
+ assert req_index is not None
336
+ removed_req_indices.append(req_index)
337
+
338
+ req_ids_to_add: list[str] = []
339
+ # Add new requests to the cached states.
340
+ for new_req_data in scheduler_output.scheduled_new_reqs:
341
+ req_id = new_req_data.req_id
342
+ sampling_params = new_req_data.sampling_params
343
+
344
+ self.requests[req_id] = CachedRequestState(
345
+ req_id=req_id,
346
+ prompt_token_ids=new_req_data.prompt_token_ids,
347
+ mm_inputs=new_req_data.mm_inputs,
348
+ mm_positions=new_req_data.mm_positions,
349
+ sampling_params=sampling_params,
350
+ generator=None,
351
+ block_ids=new_req_data.block_ids,
352
+ num_computed_tokens=new_req_data.num_computed_tokens,
353
+ output_token_ids=[],
354
+ lora_request=new_req_data.lora_request,
355
+ )
356
+
357
+ req_ids_to_add.append(req_id)
358
+
359
+ # Update the states of the running/resumed requests.
360
+ for req_data in scheduler_output.scheduled_cached_reqs:
361
+ req_id = req_data.req_id
362
+ req_state = self.requests[req_id]
363
+
364
+ # Update the cached states.
365
+ req_state.num_computed_tokens = req_data.num_computed_tokens
366
+ if not req_data.resumed_from_preemption:
367
+ # Append the new blocks to the existing block IDs.
368
+ req_state.block_ids.extend(req_data.new_block_ids)
369
+ else:
370
+ # The request is resumed from preemption.
371
+ # Replace the existing block IDs with the new ones.
372
+ req_state.block_ids = req_data.new_block_ids
373
+
374
+ req_index = self.input_batch.req_id_to_index.get(req_id)
375
+ if req_index is None:
376
+ # The request is not in the persistent batch.
377
+ # The request was either preempted and resumed later, or was not
378
+ # scheduled in the previous step and needs to be added again.
379
+ req_ids_to_add.append(req_id)
380
+ continue
381
+
382
+ # Update the persistent batch.
383
+ self.input_batch.num_computed_tokens_cpu[req_index] = (
384
+ req_data.num_computed_tokens)
385
+ self.input_batch.block_table.append_row(req_data.new_block_ids,
386
+ req_index)
387
+
388
+ # Add the new or resumed requests to the persistent batch.
389
+ # The smaller empty indices are filled first.
390
+ removed_req_indices = sorted(removed_req_indices, reverse=True)
391
+ for req_id in req_ids_to_add:
392
+ req_state = self.requests[req_id]
393
+ if removed_req_indices:
394
+ # Fill the empty index.
395
+ req_index = removed_req_indices.pop()
396
+ else:
397
+ # Append to the end.
398
+ req_index = None
399
+ self.input_batch.add_request(req_state, req_index)
400
+
401
+ # Condense the batched states if there are empty indices.
402
+ if removed_req_indices:
403
+ self.input_batch.condense(removed_req_indices)
404
+
405
+ return len(unscheduled_req_ids) > 0 or len(req_ids_to_add) > 0
406
+
407
+ def get_model(self) -> nn.Module:
408
+ assert self.model is not None
409
+ return self.model
410
+
411
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
412
+ """
413
+ Generates the KVCacheSpec by parsing the kv cache format from each
414
+ Attention module in the static forward context.
415
+ Returns:
416
+ KVCacheSpec: A dictionary mapping layer names to their KV cache
417
+ format. Layers that do not need KV cache are not included.
418
+ """
419
+
420
+ layers = get_layers_from_vllm_config(self.vllm_config, Attention)
421
+ block_size = self.vllm_config.cache_config.block_size
422
+ kv_cache_spec: dict[str, KVCacheSpec] = {}
423
+ for layer_name, attn_module in layers.items():
424
+ if attn_module.attn_type == AttentionType.DECODER:
425
+ if attn_module.sliding_window is not None:
426
+ kv_cache_spec[layer_name] = SlidingWindowSpec(
427
+ block_size=block_size,
428
+ num_kv_heads=attn_module.num_kv_heads,
429
+ head_size=attn_module.head_size,
430
+ dtype=attn_module.dtype,
431
+ sliding_window=attn_module.sliding_window,
432
+ use_mla=False,
433
+ )
434
+ else:
435
+ kv_cache_spec[layer_name] = FullAttentionSpec(
436
+ block_size=block_size,
437
+ num_kv_heads=attn_module.num_kv_heads,
438
+ head_size=attn_module.head_size,
439
+ dtype=attn_module.dtype,
440
+ use_mla=False,
441
+ )
442
+ elif attn_module.attn_type in (AttentionType.ENCODER,
443
+ AttentionType.ENCODER_ONLY):
444
+ # encoder-only attention does not need KV cache.
445
+ continue
446
+ elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
447
+ raise NotImplementedError
448
+ else:
449
+ raise ValueError(
450
+ f"Unknown attention type: {attn_module.attn_type}")
451
+
452
+ return kv_cache_spec
453
+
454
+ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"):
455
+ total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
456
+ assert total_num_scheduled_tokens > 0
457
+ num_reqs = self.input_batch.num_reqs
458
+ assert num_reqs > 0
459
+
460
+ # Get the number of scheduled tokens for each request.
461
+ num_scheduled_tokens_per_req = []
462
+ max_num_scheduled_tokens_all_reqs = 0
463
+ for req_id in self.input_batch.req_ids[:num_reqs]:
464
+ assert req_id is not None
465
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
466
+ num_scheduled_tokens_per_req.append(num_tokens)
467
+ max_num_scheduled_tokens_all_reqs = max(
468
+ max_num_scheduled_tokens_all_reqs, num_tokens)
469
+ num_scheduled_tokens_per_req = np.array(num_scheduled_tokens_per_req,
470
+ dtype=np.int32)
471
+ assert max_num_scheduled_tokens_all_reqs > 0
472
+
473
+ # Get request indices.
474
+ # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
475
+ # For each scheduled token, what are the corresponding req index.
476
+ req_indices = np.repeat(self.arange_np[:num_reqs],
477
+ num_scheduled_tokens_per_req)
478
+
479
+ # Get batched arange.
480
+ # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
481
+ # For each scheduled token, what is its position in corresponding req.
482
+ arange = np.concatenate(
483
+ [self.arange_np[:n] for n in num_scheduled_tokens_per_req])
484
+
485
+ # Get positions.
486
+ positions_np = self.positions_np[:total_num_scheduled_tokens]
487
+ np.add(self.input_batch.num_computed_tokens_cpu[req_indices],
488
+ arange,
489
+ out=positions_np)
490
+
491
+ # Get token indices.
492
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
493
+ # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
494
+ # where M is the max_model_len.
495
+ token_indices = (positions_np +
496
+ req_indices * self.input_batch.token_ids_cpu.shape[1])
497
+
498
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
499
+ # because torch.index_select is much faster than np.take for large
500
+ # tensors.
501
+ torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(),
502
+ 0,
503
+ torch.from_numpy(token_indices),
504
+ out=self.input_ids_cpu[:total_num_scheduled_tokens])
505
+
506
+ # Calculate the slot mapping.
507
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
508
+ # -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1]
509
+ # where K is the max_num_blocks_per_req and the block size is 2.
510
+ # NOTE(woosuk): We can't simply use `token_indices // block_size` here
511
+ # because M (max_model_len) is not necessarily divisible by block_size.
512
+ # req_indices: # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
513
+ block_table_indices = (req_indices * self.max_num_blocks_per_req +
514
+ positions_np // self.block_size)
515
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
516
+ # because torch.index_select is much faster than np.take for large
517
+ # tensors.
518
+ block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor()
519
+ block_numbers = block_table_cpu.flatten()[block_table_indices].numpy()
520
+ block_offsets = positions_np % self.block_size
521
+ np.add(block_numbers * self.block_size,
522
+ block_offsets,
523
+ out=self.input_batch.block_table[0].
524
+ slot_mapping_np[:total_num_scheduled_tokens])
525
+
526
+ # Prepare the attention metadata.
527
+ self.query_start_loc_np[0] = 0
528
+ np.cumsum(num_scheduled_tokens_per_req,
529
+ out=self.query_start_loc_np[1:num_reqs + 1])
530
+ self.query_start_loc_np[num_reqs + 1:] = 1
531
+
532
+ self.seq_lens_np[:num_reqs] = (
533
+ self.input_batch.num_computed_tokens_cpu[:num_reqs] +
534
+ num_scheduled_tokens_per_req)
535
+
536
+ # Do the padding and copy the tensors to the TPU.
537
+ padded_total_num_scheduled_tokens = _get_padded_token_len(
538
+ self.num_tokens_paddings, total_num_scheduled_tokens)
539
+ # Zero out to avoid spurious values from prev iteration (last cp chunk)
540
+ self.input_ids_cpu[
541
+ total_num_scheduled_tokens:padded_total_num_scheduled_tokens] = 0
542
+ self.input_ids = self.input_ids_cpu[:
543
+ padded_total_num_scheduled_tokens].to(
544
+ self.device)
545
+ self.position_ids = self.positions_cpu[:
546
+ padded_total_num_scheduled_tokens].to(
547
+ self.device)
548
+ self.input_batch.block_table[0].slot_mapping_cpu[
549
+ total_num_scheduled_tokens:] = _PAD_SLOT_ID
550
+ slot_mapping = (
551
+ self.input_batch.block_table[0].
552
+ slot_mapping_cpu[:padded_total_num_scheduled_tokens].to(
553
+ self.device))
554
+ block_tables = self.block_table_cpu[:self.max_num_reqs]
555
+ block_tables[:num_reqs, :self.max_num_blocks_per_req] = (
556
+ self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs])
557
+ block_tables = block_tables.to(self.device)
558
+ query_start_loc = self.query_start_loc_cpu[:self.max_num_reqs + 1].to(
559
+ self.device)
560
+ seq_lens = self.seq_lens_cpu[:self.max_num_reqs].to(self.device)
561
+
562
+ if self.lora_config is not None:
563
+ # We need to respect padding when activating LoRA adapters
564
+ padded_num_scheduled_tokens_per_req = np.copy(
565
+ num_scheduled_tokens_per_req
566
+ ) # Copying to avoid accidental state corruption bugs
567
+ padded_num_scheduled_tokens_per_req[-1] += \
568
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
569
+
570
+ self.set_active_loras(self.input_batch,
571
+ padded_num_scheduled_tokens_per_req)
572
+
573
+ attn_metadata = PallasMetadata(
574
+ slot_mapping=slot_mapping,
575
+ block_tables=block_tables,
576
+ context_lens=seq_lens,
577
+ query_start_loc=query_start_loc,
578
+ num_seqs=torch.tensor([num_reqs],
579
+ dtype=torch.int32,
580
+ device=self.device),
581
+ )
582
+ # NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial
583
+ # request in the batch. While we should not sample any token from this
584
+ # partial request, we do so for simplicity. We will ignore the sampled
585
+ # token from the partial request.
586
+ # TODO: Support prompt logprobs.
587
+ padded_num_reqs = _get_padded_num_reqs_with_upper_limit(
588
+ num_reqs, self.max_num_reqs)
589
+ # Indices at which we sample (positions of last token in the sequence).
590
+ # Padded to avoid recompiling when `num_reqs` varies.
591
+ logits_indices = self.query_start_loc_cpu[1:padded_num_reqs + 1] - 1
592
+ logits_indices = logits_indices.to(self.device)
593
+
594
+ layer_names = get_layers_from_vllm_config(self.vllm_config,
595
+ Attention).keys()
596
+ per_layer_attn_metadata = {
597
+ layer_name: attn_metadata
598
+ for layer_name in layer_names
599
+ }
600
+ return per_layer_attn_metadata, logits_indices, padded_num_reqs
601
+
602
+ def _scatter_placeholders(
603
+ self,
604
+ embeds: torch.Tensor,
605
+ is_embed: Optional[torch.Tensor],
606
+ ) -> torch.Tensor:
607
+ if is_embed is None:
608
+ return embeds
609
+
610
+ placeholders = embeds.new_full(
611
+ (is_embed.shape[0], embeds.shape[-1]),
612
+ fill_value=torch.nan,
613
+ )
614
+ placeholders[is_embed] = embeds
615
+ return placeholders
616
+
617
+ def _gather_placeholders(
618
+ self,
619
+ placeholders: torch.Tensor,
620
+ is_embed: Optional[torch.Tensor],
621
+ ) -> torch.Tensor:
622
+ if is_embed is None:
623
+ return placeholders
624
+
625
+ return placeholders[is_embed]
626
+
627
+ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
628
+ scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
629
+ if not scheduled_encoder_inputs:
630
+ return
631
+
632
+ # Batch the multi-modal inputs.
633
+ mm_inputs = list[MultiModalKwargs]()
634
+ req_ids_pos = list[tuple[str, int, PlaceholderRange]]()
635
+ for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
636
+ req_state = self.requests[req_id]
637
+
638
+ for mm_input_id in encoder_input_ids:
639
+ mm_inputs.append(req_state.mm_inputs[mm_input_id])
640
+ req_ids_pos.append(
641
+ (req_id, mm_input_id, req_state.mm_positions[mm_input_id]))
642
+
643
+ # Batch mm inputs as much as we can: if a request in the batch has
644
+ # multiple modalities or a different modality than the previous one,
645
+ # we process it separately to preserve item order.
646
+ # FIXME(ywang96): This is a hacky way to deal with multiple modalities
647
+ # in the same batch while still being able to benefit from batching
648
+ # multimodal inputs. The proper solution should be reordering the
649
+ # encoder outputs.
650
+ grouped_mm_inputs_list = group_mm_inputs_by_modality(mm_inputs)
651
+
652
+ encoder_outputs = []
653
+ for grouped_mm_inputs in grouped_mm_inputs_list:
654
+ batched_mm_inputs = MultiModalKwargs.batch(grouped_mm_inputs)
655
+ batched_mm_inputs = MultiModalKwargs.as_kwargs(
656
+ batched_mm_inputs,
657
+ dtype=self.model_config.dtype,
658
+ device=self.device,
659
+ )
660
+
661
+ # Run the encoder.
662
+ # `curr_group_outputs` is either of the following:
663
+ # 1. A tensor of shape (num_items, feature_size, hidden_size)
664
+ # in case feature_size is fixed across all multimodal items.
665
+ # 2. A list or tuple (length: num_items) of tensors, each of shape
666
+ # (feature_size, hidden_size) in case the feature size is dynamic
667
+ # depending on the input multimodal items.
668
+ xm.mark_step()
669
+ curr_group_outputs = self.model.get_multimodal_embeddings(
670
+ **batched_mm_inputs)
671
+ xm.mark_step()
672
+
673
+ sanity_check_mm_encoder_outputs(
674
+ curr_group_outputs,
675
+ expected_num_items=len(grouped_mm_inputs),
676
+ )
677
+
678
+ if isinstance(curr_group_outputs, torch.Tensor):
679
+ encoder_outputs.append(curr_group_outputs)
680
+ else:
681
+ assert isinstance(curr_group_outputs, (list, tuple))
682
+ for output in curr_group_outputs:
683
+ encoder_outputs.append(output)
684
+
685
+ # Cache the encoder outputs.
686
+ # NOTE (NickLucche) here we diverge from logic in other runners, as we
687
+ # assume to only have whole mm items to process. Hence we avoid the
688
+ # intrinsic dynamism that `scatter_mm_placeholders` introduces.
689
+ for (req_id, input_id, pos_info), output in zip(
690
+ req_ids_pos,
691
+ encoder_outputs,
692
+ ):
693
+ if req_id not in self.encoder_cache:
694
+ self.encoder_cache[req_id] = {}
695
+ assert pos_info.is_embed is None, "Expected all positions to be"\
696
+ " contiguous and embeddings."
697
+ self.encoder_cache[req_id][input_id] = output
698
+
699
+ def _gather_mm_embeddings(
700
+ self,
701
+ scheduler_output: "SchedulerOutput",
702
+ ) -> list[torch.Tensor]:
703
+ mm_embeds: list[torch.Tensor] = []
704
+ for req_id in self.input_batch.req_ids:
705
+ num_scheduled_tokens = scheduler_output.num_scheduled_tokens[
706
+ req_id]
707
+ req_state = self.requests[req_id]
708
+ num_computed_tokens = req_state.num_computed_tokens
709
+ mm_positions = req_state.mm_positions
710
+ # TODO unroll loop and assume/enforce --disable_chunked_mm_input
711
+ # NOTE (NickLucche) here we diverge from logic in other runners, as
712
+ # we assume to only have whole mm items to process. Hence we avoid
713
+ # the intrinsic dynamism that `gather_mm_placeholders` introduces.
714
+ for i, pos_info in enumerate(mm_positions):
715
+ start_pos = pos_info.offset
716
+ num_encoder_tokens = pos_info.length
717
+
718
+ # The encoder output is needed if the two ranges overlap:
719
+ # [num_computed_tokens,
720
+ # num_computed_tokens + num_scheduled_tokens) and
721
+ # [start_pos, start_pos + num_encoder_tokens)
722
+ if start_pos >= num_computed_tokens + num_scheduled_tokens:
723
+ # The encoder output is not needed in this step.
724
+ break
725
+ if start_pos + num_encoder_tokens <= num_computed_tokens:
726
+ # The encoder output is already processed and stored
727
+ # in the decoder's KV cache.
728
+ continue
729
+
730
+ assert req_id in self.encoder_cache
731
+ assert i in self.encoder_cache[req_id]
732
+ assert pos_info.is_embed is None, "Expected all positions to"\
733
+ " be contiguous and embeddings."
734
+ encoder_output = self.encoder_cache[req_id][i]
735
+ mm_embeds.append(encoder_output)
736
+ return mm_embeds
737
+
738
+ def _get_model_inputs(self, input_ids: torch.Tensor,
739
+ mm_embeds: list[torch.Tensor]):
740
+ if self.is_multimodal_model:
741
+ # NOTE(woosuk): To unify token ids and soft tokens (vision
742
+ # embeddings), we always use embeddings (rather than token ids)
743
+ # as input to the multimodal model, even when the input is text.
744
+ if mm_embeds:
745
+ inputs_embeds = self.model.get_input_embeddings(
746
+ input_ids, mm_embeds)
747
+ else:
748
+ inputs_embeds = self.model.get_input_embeddings(input_ids)
749
+ return None, inputs_embeds
750
+ else:
751
+ # For text-only models, we use token ids as input.
752
+ # While it is possible to use embeddings as input just like the
753
+ # multimodal models, it is not desirable for performance since
754
+ # then the embedding layer is not included in the CUDA graph.
755
+ return input_ids, None
756
+
757
+ @torch.no_grad()
758
+ def execute_model(
759
+ self,
760
+ scheduler_output: "SchedulerOutput",
761
+ intermediate_tensors: Optional[IntermediateTensors] = None,
762
+ ) -> ModelRunnerOutput:
763
+ # Update cached state
764
+ self._update_states(scheduler_output)
765
+ if not scheduler_output.total_num_scheduled_tokens:
766
+ # Return empty ModelRunnerOutput if there's no work to do.
767
+ return EMPTY_MODEL_RUNNER_OUTPUT
768
+
769
+ if self.is_multimodal_model:
770
+ # Run the multimodal encoder if any.
771
+ self._execute_mm_encoder(scheduler_output)
772
+ mm_embeds = self._gather_mm_embeddings(scheduler_output)
773
+ else:
774
+ mm_embeds = []
775
+ xm.mark_step()
776
+ # Prepare inputs
777
+ attn_metadata, logits_indices, padded_num_reqs = self._prepare_inputs(
778
+ scheduler_output)
779
+ input_ids, inputs_embeds = self._get_model_inputs(
780
+ self.input_ids, mm_embeds)
781
+ xm.mark_step()
782
+ num_reqs = self.input_batch.num_reqs
783
+ # Run the decoder
784
+ with set_forward_context(
785
+ attn_metadata,
786
+ self.vllm_config,
787
+ num_tokens=scheduler_output.total_num_scheduled_tokens):
788
+ hidden_states = self.model(
789
+ input_ids=input_ids,
790
+ positions=self.position_ids,
791
+ inputs_embeds=inputs_embeds,
792
+ )
793
+ hidden_states = self.select_hidden_states(hidden_states,
794
+ logits_indices)
795
+ logits = self.compute_logits(hidden_states)
796
+ tpu_sampling_metadata = TPUSupportedSamplingMetadata.\
797
+ from_input_batch(self.input_batch, padded_num_reqs, self.device)
798
+ if scheduler_output.grammar_bitmask is not None:
799
+ require_struct_decoding, grammar_bitmask_padded, arange = \
800
+ self.prepare_structured_decoding_input(logits, scheduler_output)
801
+ logits = self.structured_decode(require_struct_decoding,
802
+ grammar_bitmask_padded, logits,
803
+ arange)
804
+ selected_token_ids = self.sample_from_logits(logits,
805
+ tpu_sampling_metadata)
806
+
807
+ # NOTE (NickLucche) Use the original logits (before any penalties or
808
+ # temperature scaling) for the top-k logprobs. We can't enforce it due
809
+ # to recompilations outside torch.compiled code, so just make sure
810
+ # `sample_from_logits` does not modify the logits in-place.
811
+ logprobs = self.gather_logprobs(logits, selected_token_ids) \
812
+ if tpu_sampling_metadata.logprobs else None
813
+
814
+ # Remove padding on cpu and keep dynamic op outside of xla graph.
815
+ selected_token_ids = selected_token_ids.cpu()[:num_reqs]
816
+ logprobs_lists = logprobs.tolists() \
817
+ if tpu_sampling_metadata.logprobs else None
818
+
819
+ # Update the cache state concurrently. Code above will not block until
820
+ # we use `selected_token_ids`. Add mark_step if post-processing changes
821
+ request_seq_lens: list[tuple[int, CachedRequestState, int]] = []
822
+ discard_sampled_tokens_req_indices = []
823
+ for i, req_id in zip(range(num_reqs), self.input_batch.req_ids):
824
+ assert req_id is not None
825
+ req_state = self.requests[req_id]
826
+ seq_len = (req_state.num_computed_tokens +
827
+ scheduler_output.num_scheduled_tokens[req_id])
828
+ if seq_len >= req_state.num_tokens:
829
+ request_seq_lens.append((i, req_state, seq_len))
830
+ else:
831
+ # Ignore the sampled token from the partial request.
832
+ # Rewind the generator state as if the token was not sampled.
833
+ generator = self.input_batch.generators.get(i)
834
+ if generator is not None:
835
+ # This relies on cuda-specific torch-internal impl details
836
+ generator.set_offset(generator.get_offset() - 4)
837
+
838
+ # Record the index of the request that should not be sampled,
839
+ # so that we could clear the sampled tokens before returning.
840
+ discard_sampled_tokens_req_indices.append(i)
841
+
842
+ assert all(
843
+ req_id is not None for req_id in
844
+ self.input_batch.req_ids[:num_reqs]), "req_ids contains None"
845
+ req_ids = cast(list[str], self.input_batch.req_ids[:num_reqs])
846
+
847
+ prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] = {}
848
+ for req_id in self.input_batch.req_ids[:num_reqs]:
849
+ prompt_logprobs_dict[req_id] = None
850
+
851
+ max_gen_len = selected_token_ids.shape[-1]
852
+ if max_gen_len == 1:
853
+ valid_sampled_token_ids = selected_token_ids.tolist()
854
+
855
+ # Mask out the sampled tokens that should not be sampled.
856
+ # TODO: Keep in sync with gpu_model_runner.py, in particular
857
+ # the "else" case here
858
+ for i in discard_sampled_tokens_req_indices:
859
+ valid_sampled_token_ids[i].clear()
860
+
861
+ # Append sampled tokens
862
+ for i, req_state, seq_len in request_seq_lens:
863
+ token_id = valid_sampled_token_ids[i][0]
864
+ self.input_batch.token_ids_cpu[i, seq_len] = token_id
865
+ req_state.output_token_ids.append(token_id)
866
+ self.input_batch.num_tokens[i] += 1
867
+
868
+ else:
869
+ valid_mask = selected_token_ids != INVALID_TOKEN_ID
870
+ gen_lens = valid_mask.sum(dim=1).tolist()
871
+ valid_sampled_token_ids = [
872
+ seq.tolist()
873
+ for seq in selected_token_ids[valid_mask].split(gen_lens)
874
+ ]
875
+ self.input_batch.num_tokens[:num_reqs] += gen_lens
876
+ for i, req_state, seq_len in request_seq_lens:
877
+ target_slice = slice(seq_len - gen_lens[i] + 1, seq_len + 1)
878
+ self.input_batch.token_ids_cpu[
879
+ i, target_slice] = valid_sampled_token_ids[i]
880
+ req_state.output_token_ids.extend(valid_sampled_token_ids[i])
881
+
882
+ model_runner_output = ModelRunnerOutput(
883
+ req_ids=req_ids,
884
+ req_id_to_index=self.input_batch.req_id_to_index,
885
+ sampled_token_ids=valid_sampled_token_ids,
886
+ spec_token_ids=None,
887
+ logprobs=logprobs_lists,
888
+ prompt_logprobs_dict=prompt_logprobs_dict,
889
+ )
890
+
891
+ # Check there are no new graphs compiled - all the graphs should be
892
+ # captured and compiled during warm up.
893
+ self._verify_num_xla_graphs("execute_model")
894
+
895
+ return model_runner_output
896
+
897
+ def load_model(self) -> None:
898
+ self.device = self.device_config.device
899
+
900
+ # NOTE(woosuk): While the executor assigns the TP ranks to the worker
901
+ # process, the ranks can be different from the ranks internally assigned
902
+ # by the xm runtime. Therefore, there is a mismatch in the rank
903
+ # assignment between the gloo (cpu) runtime and the xm (tpu) runtime.
904
+ # This is not a problem in linear layers because all-reduce is
905
+ # rank-agnostic. However, it matters for all-gather as the ranks
906
+ # determine the order of concatenating the output tensors.
907
+ # As a workaround, we use the xm's rank assignment only when loading
908
+ # the embedding weights.
909
+ xm_tp_rank = xr.global_ordinal()
910
+ with patch(
911
+ "vllm.model_executor.layers.vocab_parallel_embedding."
912
+ "get_tensor_model_parallel_rank",
913
+ return_value=xm_tp_rank):
914
+ model = get_model(vllm_config=self.vllm_config)
915
+ if self.lora_config is not None:
916
+ model = self.load_lora_model(model, self.model_config,
917
+ self.scheduler_config,
918
+ self.lora_config, self.device)
919
+
920
+ # Sync all pending XLA execution during model initialization and weight
921
+ # loading.
922
+ xm.mark_step()
923
+ xm.wait_device_ops()
924
+ self.model = model
925
+ self.sampler = TPUSampler()
926
+
927
+ @torch.no_grad()
928
+ def _dummy_run(self, num_tokens: int) -> None:
929
+ if self.is_multimodal_model:
930
+ input_ids = None
931
+ inputs_embeds = torch.zeros((num_tokens, self.hidden_size),
932
+ dtype=self.dtype,
933
+ device=self.device)
934
+ else:
935
+ input_ids = torch.zeros((num_tokens),
936
+ dtype=torch.int32,
937
+ device=self.device)
938
+ inputs_embeds = None
939
+ actual_num_reqs = min(num_tokens, self.max_num_reqs)
940
+ position_ids = torch.zeros(num_tokens,
941
+ dtype=torch.int32,
942
+ device=self.device)
943
+ slot_mapping = torch.zeros(num_tokens,
944
+ dtype=torch.int64,
945
+ device=self.device)
946
+ block_tables = torch.zeros(
947
+ (self.max_num_reqs, self.block_table_cpu.shape[1]),
948
+ dtype=torch.int32,
949
+ device=self.device)
950
+ query_lens = [1] * self.max_num_reqs
951
+ query_start_loc = torch.cumsum(torch.tensor([0] + query_lens,
952
+ dtype=torch.int32),
953
+ dim=0,
954
+ dtype=torch.int32).to(self.device)
955
+ context_lens = torch.ones((self.max_num_reqs, ),
956
+ dtype=torch.int32,
957
+ device=self.device)
958
+ num_seqs = torch.tensor([actual_num_reqs],
959
+ dtype=torch.int32,
960
+ device=self.device)
961
+ attn_metadata = PallasMetadata(
962
+ slot_mapping=slot_mapping,
963
+ block_tables=block_tables,
964
+ context_lens=context_lens,
965
+ query_start_loc=query_start_loc,
966
+ num_seqs=num_seqs,
967
+ )
968
+
969
+ if self.is_multimodal_model:
970
+ torch._dynamo.mark_dynamic(inputs_embeds, 0)
971
+ else:
972
+ torch._dynamo.mark_dynamic(input_ids, 0)
973
+ torch._dynamo.mark_dynamic(position_ids, 0)
974
+ torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0)
975
+
976
+ layer_names = get_layers_from_vllm_config(self.vllm_config,
977
+ Attention).keys()
978
+ per_layer_attn_metadata = {
979
+ layer_name: attn_metadata
980
+ for layer_name in layer_names
981
+ }
982
+
983
+ with self.maybe_dummy_run_with_lora(
984
+ self.lora_config,
985
+ np.array([num_tokens], dtype=np.int32)), set_forward_context(
986
+ per_layer_attn_metadata, self.vllm_config, 0):
987
+ out = self.model(input_ids=input_ids,
988
+ positions=position_ids,
989
+ inputs_embeds=inputs_embeds)
990
+ self._hidden_states_dtype = out.dtype
991
+
992
+ def _precompile_mm_encoder(self) -> None:
993
+ # Pre-compile MM encoder for all supported data modalities.
994
+ hf_config = self.vllm_config.model_config.hf_config
995
+ for mode, max_items_by_mode in \
996
+ self.max_num_mm_items_by_modality.items():
997
+ logger.info(
998
+ "Compiling Multimodal %s Encoder with different input"
999
+ " shapes.", mode)
1000
+ start = time.perf_counter()
1001
+ # No padding for MM encoder just yet.
1002
+ for num_items in range(1, max_items_by_mode + 1):
1003
+ logger.info(" -- mode: %s items: %d", mode, num_items)
1004
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1005
+ mode, num_items)
1006
+ # Run multimodal encoder.
1007
+ xm.mark_step()
1008
+ mm_embeds = self.model.\
1009
+ get_multimodal_embeddings(**batched_dummy_mm_inputs)
1010
+ xm.mark_step()
1011
+ num_patches = mm_embeds[0].shape[0]
1012
+ items_size = num_patches * num_items
1013
+
1014
+ # NOTE (NickLucche) pre-compile `get_input_embeddings` when mm
1015
+ # embeddings are present. We assume `--disable-mm-chunked`,
1016
+ # hence only whole items can be scheduled. This implies we just
1017
+ # need to compile when `num_items` fit the (padded) `input_ids`
1018
+ for num_tokens in self.num_tokens_paddings:
1019
+ if num_tokens >= items_size:
1020
+ # XLA Workaround: if torch.zeros(..device) is used, XLA
1021
+ # compiles a scalar+expansion op, which won't match
1022
+ # the graph generated at runtime. CPU->TPU must be used
1023
+ placeholders_ids = torch.zeros(num_tokens,
1024
+ dtype=torch.int32,
1025
+ device="cpu")
1026
+ # Align placeholders and actual num mm_embeddings.
1027
+ placeholders_ids[:items_size] = \
1028
+ hf_config.image_token_index
1029
+
1030
+ placeholders_ids = placeholders_ids.to(self.device)
1031
+ # Assign outputs or the graph will be cut short.
1032
+ a, b = self._get_model_inputs(placeholders_ids,
1033
+ [mm_embeds])
1034
+ assert a is None
1035
+ xm.mark_step()
1036
+
1037
+ # Pre-compile `get_input_embeddings` when mm_embeddings are not
1038
+ # present. Chunk is only made of text, no mm_placeholders.
1039
+ for num_tokens in self.num_tokens_paddings:
1040
+ placeholders_ids = torch.zeros(num_tokens,
1041
+ dtype=torch.int32,
1042
+ device="cpu")
1043
+ placeholders_ids = placeholders_ids.to(self.device)
1044
+ a, b = self._get_model_inputs(placeholders_ids, [])
1045
+ assert a is None
1046
+ xm.mark_step()
1047
+
1048
+ xm.wait_device_ops()
1049
+ end = time.perf_counter()
1050
+ logger.info(
1051
+ "Multimodal %s Encoder compilation finished in in %.2f "
1052
+ "[secs].", mode, end - start)
1053
+
1054
+ def _precompile_backbone(self) -> None:
1055
+ logger.info("Compiling the model with different input shapes.")
1056
+ start = time.perf_counter()
1057
+ for num_tokens in self.num_tokens_paddings:
1058
+ logger.info(" -- num_tokens: %d", num_tokens)
1059
+ self._dummy_run(num_tokens)
1060
+ xm.wait_device_ops()
1061
+ end = time.perf_counter()
1062
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1063
+ self._update_num_xla_graphs("model backbone")
1064
+
1065
+ def _precompile_select_hidden_states(self) -> None:
1066
+ # Compile hidden state selection function for bucketed
1067
+ # n_tokens x max_num_reqs. Graph is really small so this is fine.
1068
+ logger.info(
1069
+ "Compiling select_hidden_states with different input shapes.")
1070
+ start = time.perf_counter()
1071
+ hsize = self.model_config.get_hidden_size()
1072
+ for num_tokens in self.num_tokens_paddings:
1073
+ dummy_hidden = torch.zeros((num_tokens, hsize),
1074
+ device=self.device,
1075
+ dtype=self._hidden_states_dtype)
1076
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1077
+ for num_reqs in self.num_reqs_paddings:
1078
+ indices = torch.zeros(num_reqs,
1079
+ dtype=torch.int32,
1080
+ device=self.device)
1081
+ torch._dynamo.mark_dynamic(indices, 0)
1082
+ self.select_hidden_states(dummy_hidden, indices)
1083
+ logger.info(" -- num_tokens: %d, num_seqs: %d", num_tokens,
1084
+ num_reqs)
1085
+ # Requests can't be more than tokens. But do compile for the
1086
+ # next bigger value in case num_tokens uses bucketed padding.
1087
+ if num_reqs >= min(num_tokens, self.max_num_reqs):
1088
+ break
1089
+ xm.wait_device_ops()
1090
+ end = time.perf_counter()
1091
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1092
+ self._update_num_xla_graphs("select_hidden_states")
1093
+
1094
+ def _precompile_compute_logits(self) -> None:
1095
+ logger.info("Compiling compute_logits with different input shapes.")
1096
+ start = time.perf_counter()
1097
+ hsize = self.model_config.get_hidden_size()
1098
+ for num_reqs in self.num_reqs_paddings:
1099
+ dummy_hidden = torch.zeros((num_reqs, hsize),
1100
+ device=self.device,
1101
+ dtype=self._hidden_states_dtype)
1102
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1103
+ self.compute_logits(dummy_hidden)
1104
+ logger.info(" -- num_seqs: %d", num_reqs)
1105
+ xm.wait_device_ops()
1106
+ end = time.perf_counter()
1107
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1108
+ self._update_num_xla_graphs("compute_logits")
1109
+
1110
+ def _precompile_structured_decoding(self) -> None:
1111
+ logger.info(
1112
+ "Compiling structured_decoding with different input shapes.")
1113
+ start = time.perf_counter()
1114
+ for num_reqs in self.num_reqs_paddings:
1115
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1116
+ device=self.device,
1117
+ dtype=self._hidden_states_dtype)
1118
+ dummy_require_struct_decoding = \
1119
+ self.require_structured_out_cpu[:num_reqs].to(self.device)
1120
+ dummy_grammar_bitmask = \
1121
+ self.grammar_bitmask_cpu[:num_reqs].to(self.device)
1122
+ # The first dimension of the above 3 dummy tensors cannot be
1123
+ # mark_dynamic because some operations in structured_decode require
1124
+ # them to be static.
1125
+ arange = self.structured_decode_arange.to(self.device)
1126
+ self.structured_decode(dummy_require_struct_decoding,
1127
+ dummy_grammar_bitmask, dummy_logits, arange)
1128
+ logger.info(" -- num_seqs: %d", num_reqs)
1129
+ xm.wait_device_ops()
1130
+ end = time.perf_counter()
1131
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1132
+ self._update_num_xla_graphs("structured_decoding")
1133
+
1134
+ def _precompile_sample_from_logits(self) -> None:
1135
+ logger.info(
1136
+ "Compiling sample_from_logits with different input shapes.")
1137
+ start = time.perf_counter()
1138
+ for num_reqs in self.num_reqs_paddings:
1139
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1140
+ device=self.device,
1141
+ dtype=self._hidden_states_dtype)
1142
+ # The first dimension of dummy_logits cannot be mark_dynamic
1143
+ # because some operations in the sampler require it to be static.
1144
+ for all_greedy in [False, True]:
1145
+ generate_params_if_all_greedy = not all_greedy
1146
+ sampling_metadata = (
1147
+ TPUSupportedSamplingMetadata.from_input_batch(
1148
+ self.input_batch,
1149
+ num_reqs,
1150
+ self.device,
1151
+ generate_params_if_all_greedy,
1152
+ ))
1153
+ sampling_metadata.all_greedy = all_greedy
1154
+ self.sample_from_logits(dummy_logits, sampling_metadata)
1155
+ logger.info(" -- num_seqs: %d", num_reqs)
1156
+ xm.wait_device_ops()
1157
+ end = time.perf_counter()
1158
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1159
+ self._update_num_xla_graphs("sample_from_logits")
1160
+
1161
+ def _precompile_gather_logprobs(self) -> None:
1162
+ logger.info("Compiling gather_logprobs with different input shapes.")
1163
+ start = time.perf_counter()
1164
+ for num_reqs in self.num_reqs_paddings:
1165
+ dummy_logits = torch.zeros((num_reqs, self.vocab_size),
1166
+ device=self.device,
1167
+ dtype=self._hidden_states_dtype)
1168
+ dummy_tokens = torch.zeros((num_reqs, 1),
1169
+ dtype=torch.int64).to(self.device)
1170
+ self.gather_logprobs(dummy_logits, dummy_tokens)
1171
+ logger.info(" -- num_seqs: %d", num_reqs)
1172
+ xm.wait_device_ops()
1173
+ end = time.perf_counter()
1174
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1175
+ self._update_num_xla_graphs("gather_logprobs")
1176
+
1177
+ def capture_model(self) -> None:
1178
+ """
1179
+ Precompile all the subgraphs with possible input shapes.
1180
+ """
1181
+ self._precompile_mm_encoder()
1182
+ self._precompile_backbone()
1183
+ self._precompile_select_hidden_states()
1184
+ self._precompile_compute_logits()
1185
+ self._precompile_structured_decoding()
1186
+ self._precompile_sample_from_logits()
1187
+ self._precompile_gather_logprobs()
1188
+
1189
+ def profile_run(
1190
+ self,
1191
+ num_tokens: int,
1192
+ ) -> None:
1193
+ # Profile with multimodal encoder & encoder cache.
1194
+ # TODO: handle encoder-decoder models once we support them.
1195
+ if (self.is_multimodal_model and self.max_num_encoder_input_tokens > 0
1196
+ and self.encoder_cache_size > 0):
1197
+
1198
+ # NOTE: Currently model is profiled with a single non-text
1199
+ # modality with the max possible input tokens even when
1200
+ # it supports multiple.
1201
+ dummy_data_modality, max_num_mm_items = max(
1202
+ self.max_num_mm_items_by_modality.items(), key=lambda t: t[1])
1203
+
1204
+ encoder_budget = min(self.max_num_encoder_input_tokens,
1205
+ self.encoder_cache_size)
1206
+
1207
+ logger.info(
1208
+ "Encoder cache will be initialized with a budget of %d tokens,"
1209
+ " and profiled with %s %s items of the maximum feature size.",
1210
+ encoder_budget, max_num_mm_items, dummy_data_modality)
1211
+
1212
+ # Create dummy batch of multimodal inputs.
1213
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1214
+ dummy_data_modality, max_num_mm_items)
1215
+
1216
+ # Run multimodal encoder.
1217
+ # Isolate encoder graph from post-processing to minimize
1218
+ # impact of recompilation until it's fixed.
1219
+ start = time.perf_counter()
1220
+ xm.mark_step()
1221
+ dummy_encoder_outputs = self.model.get_multimodal_embeddings(
1222
+ **batched_dummy_mm_inputs)
1223
+ xm.mark_step()
1224
+ xm.wait_device_ops()
1225
+ end = time.perf_counter()
1226
+ logger.info(
1227
+ "Multimodal Encoder profiling finished in in %.2f [secs].",
1228
+ end - start)
1229
+
1230
+ assert len(dummy_encoder_outputs) == max_num_mm_items, (
1231
+ "Expected dimension 0 of encoder outputs to match the number "
1232
+ f"of multimodal data items: {max_num_mm_items}, got "
1233
+ f"{len(dummy_encoder_outputs)=} instead. This is most likely "
1234
+ "due to the 'get_multimodal_embeddings' method of the model "
1235
+ "not implemented correctly.")
1236
+
1237
+ # Cache the dummy encoder outputs.
1238
+ self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
1239
+
1240
+ # Trigger compilation for general shape.
1241
+ self._dummy_run(num_tokens)
1242
+
1243
+ xm.mark_step()
1244
+ xm.wait_device_ops()
1245
+ self.encoder_cache.clear()
1246
+ gc.collect()
1247
+
1248
+ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
1249
+ """
1250
+ Initialize KV cache based on `kv_cache_config`.
1251
+ Args:
1252
+ kv_cache_config: Configuration for the KV cache, including the KV
1253
+ cache size of each layer
1254
+ """
1255
+ if len(kv_cache_config.kv_cache_groups) > 1:
1256
+ raise NotImplementedError(
1257
+ "Hybrid models with more than one KV cache type are not "
1258
+ "supported yet.")
1259
+
1260
+ self.input_batch = InputBatch(
1261
+ max_num_reqs=self.max_num_reqs,
1262
+ max_model_len=self.max_model_len,
1263
+ max_num_batched_tokens=self.max_num_tokens,
1264
+ device=self.device,
1265
+ pin_memory=self.pin_memory,
1266
+ vocab_size=self.model_config.get_vocab_size(),
1267
+ block_size=kv_cache_config.kv_cache_groups[0].kv_cache_spec.
1268
+ block_size,
1269
+ )
1270
+ assert self.block_table_cpu.dtype == self.input_batch.block_table[
1271
+ 0].get_cpu_tensor().dtype
1272
+
1273
+ kv_caches: dict[str, torch.Tensor] = {}
1274
+
1275
+ for kv_cache_group in kv_cache_config.kv_cache_groups:
1276
+ kv_cache_spec = kv_cache_group.kv_cache_spec
1277
+ for layer_name in kv_cache_group.layer_names:
1278
+ tensor_config = kv_cache_config.tensors[layer_name]
1279
+ assert tensor_config.size % kv_cache_spec.page_size_bytes == 0
1280
+ num_blocks = tensor_config.size // kv_cache_spec.page_size_bytes
1281
+ if isinstance(kv_cache_spec, AttentionSpec):
1282
+ kv_cache_shape = PallasAttentionBackend.get_kv_cache_shape(
1283
+ num_blocks, kv_cache_spec.block_size,
1284
+ kv_cache_spec.num_kv_heads, kv_cache_spec.head_size)
1285
+ dtype = kv_cache_spec.dtype
1286
+
1287
+ tpu_kv_cache = torch.zeros(kv_cache_shape,
1288
+ dtype=dtype,
1289
+ device=self.device)
1290
+
1291
+ kv_caches[layer_name] = tpu_kv_cache
1292
+ else:
1293
+ raise NotImplementedError
1294
+
1295
+ bind_kv_cache(
1296
+ kv_caches,
1297
+ self.vllm_config.compilation_config.static_forward_context,
1298
+ self.kv_caches)
1299
+
1300
+ def reset_dynamo_cache(self):
1301
+ if self.is_multimodal_model:
1302
+ compiled_model = self.model.get_language_model().model
1303
+ else:
1304
+ compiled_model = self.model.model
1305
+ if isinstance(compiled_model, TorchCompileWrapperWithCustomDispatcher):
1306
+ logger.info("Clear dynamo cache and cached dynamo bytecode.")
1307
+ torch._dynamo.eval_frame.remove_from_cache(
1308
+ compiled_model.original_code_object)
1309
+ compiled_model.compiled_codes.clear()
1310
+
1311
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1312
+ def select_hidden_states(self, hidden_states, indices_do_sample):
1313
+ return hidden_states[indices_do_sample]
1314
+
1315
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1316
+ def compute_logits(self,
1317
+ sample_hidden_states: torch.Tensor) -> torch.Tensor:
1318
+ return self.model.compute_logits(sample_hidden_states, None)
1319
+
1320
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1321
+ def sample_from_logits(
1322
+ self, logits: torch.Tensor,
1323
+ sampling_metadata: TPUSupportedSamplingMetadata) -> torch.Tensor:
1324
+ """
1325
+ Sample with xla-friendly function. This function is to be traced
1326
+ separately from `forward` for lighter compilation overhead.
1327
+ """
1328
+ if sampling_metadata.all_greedy:
1329
+ out_tokens = torch.argmax(logits, dim=-1, keepdim=True)
1330
+ else:
1331
+ out_tokens = self.sampler(logits,
1332
+ sampling_metadata).sampled_token_ids
1333
+ return out_tokens
1334
+
1335
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1336
+ def gather_logprobs(self, logits: torch.Tensor,
1337
+ sampled_tokens: torch.Tensor) -> LogprobsTensors:
1338
+ """
1339
+ Gather the top_logprobs with corresponding tokens. Use a fixed number
1340
+ of logprobs as an alternative to having multiple pre-compiled graphs.
1341
+ Select the number of logprobs actually demanded by each request on CPU.
1342
+ """
1343
+ logprobs = self.sampler.compute_logprobs(logits)
1344
+ return self.sampler.gather_logprobs(
1345
+ logprobs,
1346
+ self.model_config.max_logprobs,
1347
+ token_ids=sampled_tokens.squeeze(-1))
1348
+
1349
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1350
+ def structured_decode(self, require_struct_decoding: torch.Tensor,
1351
+ grammar_bitmask: torch.Tensor, logits: torch.Tensor,
1352
+ arange: torch.Tensor) -> torch.Tensor:
1353
+ return torch.where(
1354
+ require_struct_decoding,
1355
+ self.apply_grammar_bitmask(logits, grammar_bitmask, arange),
1356
+ logits)
1357
+
1358
+ def apply_grammar_bitmask(self, logits: torch.Tensor,
1359
+ grammar_bitmask: torch.Tensor,
1360
+ arange: torch.Tensor):
1361
+ assert (logits.shape[0] == grammar_bitmask.shape[0])
1362
+ logits_cloned = logits.clone()
1363
+ for i in range(logits.shape[0]):
1364
+ unpacked_bitmask = (torch.bitwise_right_shift(
1365
+ grammar_bitmask[i][:, None], arange[None, :]) & 1) == 0
1366
+ unpacked_bitmask = unpacked_bitmask.reshape(-1)[:self.vocab_size]
1367
+ logits_cloned[i] = logits_cloned[i].masked_fill(
1368
+ unpacked_bitmask, -float("inf"))
1369
+ return logits_cloned
1370
+
1371
+ def get_multimodal_embeddings(self, *args, **kwargs):
1372
+ return self.model.get_multimodal_embeddings(*args, **kwargs)
1373
+
1374
+ def get_input_embeddings(self, *args, **kwargs):
1375
+ return self.model.get_input_embeddings(*args, **kwargs)
1376
+
1377
+ def prepare_structured_decoding_input(
1378
+ self, logits: torch.Tensor, scheduler_output: "SchedulerOutput"
1379
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
1380
+ grammar_bitmask = scheduler_output.grammar_bitmask
1381
+ assert grammar_bitmask is not None
1382
+ num_reqs, _ = logits.shape
1383
+
1384
+ # Reset pre-allocated tensors
1385
+ self.grammar_bitmask_cpu.zero_()
1386
+ self.require_structured_out_cpu.zero_()
1387
+
1388
+ # We receive the structured output bitmask from the scheduler, but the
1389
+ # indices of the requests in the batch may not match the indices of
1390
+ # the bitmask since the scheduler doesn't know how the tpu runner is
1391
+ # ordering the requests in the batch. We need to match the order of
1392
+ # bitmask with the order of requests
1393
+ struct_out_indices: list[int] = []
1394
+ mask_indices: list[int] = []
1395
+ for req_id in self.input_batch.req_ids:
1396
+ mask_index = scheduler_output.structured_output_request_ids.get(
1397
+ req_id)
1398
+ if mask_index is None:
1399
+ continue
1400
+ batch_index = self.input_batch.req_id_to_index[req_id]
1401
+ struct_out_indices.append(batch_index)
1402
+ mask_indices.append(mask_index)
1403
+ self.grammar_bitmask_cpu[struct_out_indices] = torch.from_numpy(
1404
+ grammar_bitmask[mask_indices])
1405
+ # It's not guaranteed that all requests in this batch require
1406
+ # structured output, so create a bool tensor to represent
1407
+ # the requests that need structured output.
1408
+ struct_out_indices = torch.tensor(struct_out_indices, dtype=torch.long)
1409
+ self.require_structured_out_cpu[struct_out_indices] = True
1410
+ return self.require_structured_out_cpu[:num_reqs].to(logits.device), \
1411
+ self.grammar_bitmask_cpu[:num_reqs].to(logits.device), \
1412
+ self.structured_decode_arange.to(logits.device)
1413
+
1414
+ def _get_mm_dummy_batch(self, modality: str,
1415
+ batch_size: int) -> BatchedTensorInputs:
1416
+ # Dummy data for pre-compiling multimodal models.
1417
+ dummy_request_data = self.mm_registry.get_decoder_dummy_data(
1418
+ model_config=self.model_config,
1419
+ seq_len=self.max_num_tokens,
1420
+ )
1421
+ dummy_mm_data = dummy_request_data.multi_modal_data
1422
+
1423
+ # Dummy data definition in V0 may contain multiple multimodal items
1424
+ # (e.g, multiple images) for a single request, therefore here we
1425
+ # always replicate first item by max_num_mm_items times since in V1
1426
+ # they are scheduled to be processed separately.
1427
+ assert isinstance(dummy_mm_data, MultiModalKwargs), (
1428
+ "Expected dummy multimodal data to be of type "
1429
+ f"MultiModalKwargs, got {type(dummy_mm_data)=} instead. "
1430
+ "This is most likely due to the model not having a merged "
1431
+ "processor.")
1432
+
1433
+ # When models have a merged processor, their dummy data is
1434
+ # already batched `MultiModalKwargs`, therefore we take the first
1435
+ # `MultiModalKwargsItem` from the desired modality to profile on.
1436
+ dummy_mm_item = dummy_mm_data.get_item(modality=modality, item_index=0)
1437
+ dummy_mm_kwargs = MultiModalKwargs.from_items([dummy_mm_item])
1438
+
1439
+ batched_dummy_mm_inputs = MultiModalKwargs.batch([dummy_mm_kwargs] *
1440
+ batch_size)
1441
+ return MultiModalKwargs.as_kwargs(
1442
+ batched_dummy_mm_inputs,
1443
+ dtype=self.model_config.dtype,
1444
+ device=self.device,
1445
+ )
1446
+
1447
+
1448
+ def _get_req_paddings(min_req_size: int, max_req_size: int) -> list[int]:
1449
+ logger.info("Preparing request paddings:")
1450
+ # assert min_req_size is power of 2
1451
+ assert (min_req_size & (min_req_size - 1) == 0) and min_req_size > 0
1452
+ paddings: list = []
1453
+ num = max(MIN_NUM_SEQS, min_req_size)
1454
+ while num <= max_req_size and (len(paddings) == 0 or paddings[-1] != num):
1455
+ paddings.append(num)
1456
+ logger.info(" %d", num)
1457
+ num = _get_padded_num_reqs_with_upper_limit(num + 1, max_req_size)
1458
+ return paddings
1459
+
1460
+
1461
+ def _get_padded_num_reqs_with_upper_limit(x: int, upper_limit: int) -> int:
1462
+ res = MIN_NUM_SEQS if x <= MIN_NUM_SEQS else 1 << (x - 1).bit_length()
1463
+ return min(res, upper_limit)
1464
+
1465
+
1466
+ def _get_token_paddings(min_token_size: int, max_token_size: int,
1467
+ padding_gap: int) -> list[int]:
1468
+ """Generate a list of padding size, starting from min_token_size,
1469
+ ending with a number that can cover max_token_size
1470
+
1471
+ If padding_gap == 0 then:
1472
+ increase 2X each time (exponential)
1473
+ else:
1474
+ first increase the size to twice,
1475
+ then increase the padding size by padding_gap.
1476
+ """
1477
+ # assert min_token_size is power of 2
1478
+ assert (min_token_size & (min_token_size - 1) == 0) and min_token_size > 0
1479
+ paddings = []
1480
+ num = min_token_size
1481
+
1482
+ if padding_gap == 0:
1483
+ logger.info("Using exponential token paddings:")
1484
+ while True:
1485
+ logger.info(" %d", num)
1486
+ paddings.append(num)
1487
+ if num >= max_token_size:
1488
+ break
1489
+ num *= 2
1490
+ else:
1491
+ logger.info("Using incremental token paddings:")
1492
+ while num <= padding_gap:
1493
+ logger.info(" %d", num)
1494
+ paddings.append(num)
1495
+ num *= 2
1496
+ num //= 2
1497
+ while num < max_token_size:
1498
+ num += padding_gap
1499
+ logger.info(" %d", num)
1500
+ paddings.append(num)
1501
+
1502
+ return paddings
1503
+
1504
+
1505
+ def _get_padded_token_len(paddings: list[int], x: int) -> int:
1506
+ """Return the first element in paddings list greater or equal to x.
1507
+ """
1508
+ index = bisect.bisect_left(paddings, x)
1509
+ assert index < len(paddings)
1510
+ return paddings[index]