vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1324 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import copy
4
+ from collections import defaultdict
5
+ from functools import cached_property
6
+ from typing import Any, Dict, List, Optional, Set, Tuple, Type
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from vllm.config import ParallelConfig, SpeculativeConfig, VllmConfig
12
+ from vllm.distributed.communication_op import (broadcast_tensor_dict,
13
+ get_tp_group,
14
+ tensor_model_parallel_gather)
15
+ from vllm.distributed.parallel_state import model_parallel_is_initialized
16
+ from vllm.logger import init_logger
17
+ from vllm.model_executor.layers.rejection_sampler import RejectionSampler
18
+ from vllm.model_executor.layers.sampler import SamplerOutput
19
+ from vllm.model_executor.layers.spec_decode_base_sampler import (
20
+ SpecDecodeBaseSampler, SpecDecodeStochasticBaseSampler)
21
+ from vllm.model_executor.layers.typical_acceptance_sampler import (
22
+ TypicalAcceptanceSampler)
23
+ from vllm.platforms import current_platform
24
+ from vllm.sequence import (VLLM_INVALID_TOKEN_ID,
25
+ CompletionSequenceGroupOutput, ExecuteModelRequest,
26
+ HiddenStates, SequenceGroupMetadata,
27
+ get_all_seq_ids_and_request_ids)
28
+ from vllm.spec_decode.batch_expansion import BatchExpansionTop1Scorer
29
+
30
+ if current_platform.is_cuda_alike():
31
+ from vllm.spec_decode.draft_model_runner import TP1DraftModelRunner
32
+
33
+ from vllm.spec_decode.interfaces import (SpeculativeProposals,
34
+ SpeculativeScorer, SpeculativeScores)
35
+ from vllm.spec_decode.medusa_worker import MedusaWorker
36
+ from vllm.spec_decode.metrics import AsyncMetricsCollector
37
+ from vllm.spec_decode.mlp_speculator_worker import MLPSpeculatorWorker
38
+ from vllm.spec_decode.mqa_scorer import MQAScorer
39
+ from vllm.spec_decode.multi_step_worker import MultiStepWorker
40
+ from vllm.spec_decode.ngram_worker import NGramWorker
41
+ from vllm.spec_decode.proposer_worker_base import ProposerWorkerBase
42
+ from vllm.spec_decode.smaller_tp_proposer_worker import SmallerTpProposerWorker
43
+ from vllm.spec_decode.target_model_runner import TargetModelRunner
44
+ from vllm.spec_decode.util import (Timer, create_logprobs_output,
45
+ create_sequence_group_output,
46
+ get_all_num_logprobs,
47
+ get_sampled_token_logprobs, nvtx_range,
48
+ split_batch_by_proposal_len)
49
+ from vllm.utils import resolve_obj_by_qualname
50
+ from vllm.worker.worker_base import LoRANotSupportedWorkerBase, WorkerBase
51
+
52
+ logger = init_logger(__name__)
53
+
54
+
55
+ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker":
56
+ """Helper method that is the entrypoint for Executors which use
57
+ WorkerWrapper. It constructs a SpecDecodeWorker from the speculative config.
58
+ """
59
+ vllm_config: VllmConfig = kwargs.get("vllm_config")
60
+ speculative_config: SpeculativeConfig = vllm_config.speculative_config
61
+ assert speculative_config is not None
62
+
63
+ if vllm_config.parallel_config.pipeline_parallel_size > 1:
64
+ raise NotImplementedError("Speculative decoding is currently "
65
+ "incompatible with pipeline parallelism")
66
+
67
+ draft_worker_kwargs = kwargs.copy()
68
+
69
+ kwargs["model_runner_cls"] = TargetModelRunner
70
+ target_worker_config = copy.deepcopy(vllm_config)
71
+ target_worker_config.parallel_config.worker_cls =\
72
+ target_worker_config.parallel_config.sd_worker_cls
73
+ cls = resolve_obj_by_qualname(
74
+ target_worker_config.parallel_config.worker_cls)
75
+ target_worker = cls(*args, **kwargs)
76
+ # Set the disable_logprobs variable in the TargetModelRunner instance
77
+ # as per its value specified in the SpeculativeConfig.
78
+ target_worker.model_runner.disable_logprobs =\
79
+ speculative_config.disable_logprobs
80
+
81
+ draft_worker_config = copy.deepcopy(vllm_config)
82
+ draft_worker_config.model_config = speculative_config.draft_model_config
83
+ draft_worker_config.quant_config = VllmConfig._get_quantization_config(
84
+ draft_worker_config.model_config,
85
+ vllm_config.load_config,
86
+ )
87
+ speculative_config.draft_parallel_config.worker_cls =\
88
+ draft_worker_config.parallel_config.sd_worker_cls
89
+ draft_worker_config.parallel_config = speculative_config.draft_parallel_config # noqa
90
+ # TODO allow draft-model specific load config.
91
+
92
+ # Override draft-model specific worker args.
93
+ draft_worker_kwargs.update(
94
+ vllm_config=draft_worker_config,
95
+ ngram_prompt_lookup_max=speculative_config.prompt_lookup_max,
96
+ ngram_prompt_lookup_min=speculative_config.prompt_lookup_min,
97
+ )
98
+
99
+ spec_decode_worker = SpecDecodeWorker.create_worker(
100
+ scorer_worker=target_worker,
101
+ draft_worker_kwargs=draft_worker_kwargs,
102
+ disable_mqa_scorer=speculative_config.disable_mqa_scorer,
103
+ disable_by_batch_size=speculative_config.disable_by_batch_size,
104
+ draft_token_acceptance_method=speculative_config.acceptance_method,
105
+ typical_acceptance_sampler_posterior_threshold=speculative_config.
106
+ posterior_threshold,
107
+ typical_acceptance_sampler_posterior_alpha=speculative_config.
108
+ posterior_alpha,
109
+ disable_logprobs=speculative_config.disable_logprobs,
110
+ disable_log_stats=speculative_config.disable_log_stats,
111
+ num_speculative_tokens=speculative_config.num_speculative_tokens,
112
+ )
113
+
114
+ return spec_decode_worker
115
+
116
+
117
+ # Reminder: Please update docs/source/features/compatibility_matrix.md
118
+ # If the feature combo become valid
119
+ class SpecDecodeWorker(LoRANotSupportedWorkerBase):
120
+ """Worker which implements speculative decoding.
121
+
122
+ Speculative decoding reduces decoding per-token latency by using a proposal
123
+ method, such as a small draft model, to speculate ahead of a larger LLM. The
124
+ probabilities of the speculative tokens are then determined by the larger
125
+ LLM, after which some verification routine determines which (if any) of the
126
+ speculative tokens are accepted by the larger LLM.
127
+
128
+ See https://github.com/vllm-project/vllm/pull/2188 and
129
+ https://github.com/vllm-project/vllm/pull/3103 for more info.
130
+
131
+ The current implementation has the following limitations:
132
+ * Only draft-model proposal is implemented (contributions for more forms are
133
+ welcome!).
134
+ * Only top-1 proposal and scoring are implemented. Tree-attention is left as
135
+ future work.
136
+ * All sequences in a batch must have the same proposal length, or zero. This
137
+ can be improved by having per-sequence speculation in the future.
138
+ * The scoring forward pass is done without an MQA kernel, which is
139
+ suboptimal especially as the batch size, proposal length, and sequence
140
+ lengths grow. Contributions to add a MQA scoring are welcome once
141
+ correctness tests pass.
142
+ More info here https://docs.google.com/document/d/1T-JaS2T1NRfdP51qzqpyakoCXxSXTtORppiwaj5asxA/edit.
143
+ """
144
+
145
+ @classmethod
146
+ def create_worker(
147
+ cls,
148
+ scorer_worker: WorkerBase,
149
+ draft_worker_kwargs: Dict[str, Any],
150
+ disable_mqa_scorer: bool,
151
+ disable_by_batch_size: Optional[int],
152
+ draft_token_acceptance_method: str,
153
+ typical_acceptance_sampler_posterior_threshold: float,
154
+ typical_acceptance_sampler_posterior_alpha: float,
155
+ disable_logprobs: bool,
156
+ disable_log_stats: bool,
157
+ num_speculative_tokens: int,
158
+ ) -> "SpecDecodeWorker":
159
+
160
+ allow_zero_draft_token_step = True
161
+ enable_lm_head_weight_load = False
162
+ num_spec_prefill_steps = 1
163
+ ngram_prompt_lookup_max = (
164
+ draft_worker_kwargs.pop("ngram_prompt_lookup_max"))
165
+ ngram_prompt_lookup_min = (
166
+ draft_worker_kwargs.pop("ngram_prompt_lookup_min"))
167
+ draft_model_config = draft_worker_kwargs["vllm_config"].model_config
168
+ draft_parallel_config: ParallelConfig = draft_worker_kwargs[
169
+ 'vllm_config'].parallel_config
170
+ if ngram_prompt_lookup_max > 0:
171
+ draft_worker_kwargs[
172
+ "device_type"] = scorer_worker.device_config.device.type
173
+ proposer_worker = NGramWorker(**draft_worker_kwargs)
174
+ proposer_worker.set_ngram_window_size(ngram_prompt_lookup_min,
175
+ ngram_prompt_lookup_max)
176
+ else:
177
+ draft_tp = draft_parallel_config.tensor_parallel_size
178
+ target_tp = scorer_worker.parallel_config.tensor_parallel_size
179
+
180
+ if draft_model_config.hf_config.model_type == "mlp_speculator":
181
+ proposer_worker = MLPSpeculatorWorker(**draft_worker_kwargs)
182
+ elif draft_model_config.hf_config.model_type == "medusa":
183
+ proposer_worker = MedusaWorker(**draft_worker_kwargs)
184
+ else:
185
+ if draft_tp == 1:
186
+ if current_platform.is_cuda_alike():
187
+ draft_worker_kwargs[
188
+ "model_runner_cls"] = TP1DraftModelRunner
189
+ else:
190
+ if draft_model_config.hf_config.model_type == "eagle":
191
+ raise NotImplementedError(
192
+ f"{draft_model_config.hf_config.model_type} "
193
+ "does not support TP > 1 yet")
194
+
195
+ allow_zero_draft_token_step = False
196
+
197
+ # Load lm_head weight for eagle in init_device
198
+ if draft_model_config.hf_config.model_type == "eagle":
199
+ enable_lm_head_weight_load = True
200
+
201
+ proposer_worker = MultiStepWorker(**draft_worker_kwargs)
202
+ if draft_model_config.hf_config.model_type == "deepseek_mtp":
203
+ num_spec_prefill_steps = \
204
+ draft_model_config.hf_config.n_predict
205
+
206
+ proposer_worker = SmallerTpProposerWorker.maybe_wrap_worker(
207
+ proposer_worker, draft_tp, target_tp)
208
+
209
+ logger.info("Configuring SpecDecodeWorker with proposer=%s",
210
+ type(proposer_worker))
211
+
212
+ spec_decode_sampler: SpecDecodeBaseSampler = None
213
+ if draft_token_acceptance_method == "rejection_sampler":
214
+ spec_decode_sampler = RejectionSampler()
215
+ elif draft_token_acceptance_method == "typical_acceptance_sampler":
216
+ spec_decode_sampler = TypicalAcceptanceSampler(
217
+ posterior_threshold=\
218
+ typical_acceptance_sampler_posterior_threshold,
219
+ posterior_alpha=typical_acceptance_sampler_posterior_alpha,
220
+ )
221
+ logger.info(
222
+ "[Speculative Decoding] Configuring"
223
+ " SpecDecodeWorker with sampler=%s", type(spec_decode_sampler))
224
+
225
+ if not disable_mqa_scorer:
226
+ if scorer_worker.model_runner.attn_backend.get_name(
227
+ ) != "FLASH_ATTN":
228
+ disable_mqa_scorer = True
229
+ logger.info(
230
+ "[Speculative Decoding] Disabling MQA scorer as the "
231
+ "MQA is only available with flash attn backend.")
232
+
233
+ if draft_model_config and \
234
+ draft_model_config.max_model_len < \
235
+ scorer_worker.model_config.max_model_len:
236
+ disable_mqa_scorer = True
237
+ logger.info(
238
+ "[Speculative Decoding] Disabling MQA scorer as the "
239
+ "draft model max_model_len is smaller than the target "
240
+ "model max_model_len.")
241
+
242
+ if not scorer_worker.model_runner.model_config.enforce_eager:
243
+ disable_mqa_scorer = True
244
+ logger.info(
245
+ "[Speculative Decoding] Disabling MQA scorer as the "
246
+ "target model is not running in eager mode.")
247
+
248
+ return SpecDecodeWorker(
249
+ proposer_worker,
250
+ scorer_worker,
251
+ disable_mqa_scorer=disable_mqa_scorer,
252
+ disable_logprobs=disable_logprobs,
253
+ disable_log_stats=disable_log_stats,
254
+ disable_by_batch_size=disable_by_batch_size,
255
+ spec_decode_sampler=spec_decode_sampler,
256
+ allow_zero_draft_token_step=allow_zero_draft_token_step,
257
+ enable_lm_head_weight_load=enable_lm_head_weight_load,
258
+ num_spec_prefill_steps=num_spec_prefill_steps)
259
+
260
+ def __init__(
261
+ self,
262
+ proposer_worker: ProposerWorkerBase,
263
+ scorer_worker: WorkerBase,
264
+ spec_decode_sampler: SpecDecodeBaseSampler,
265
+ disable_mqa_scorer: bool = False,
266
+ disable_logprobs: bool = False,
267
+ disable_log_stats: bool = False,
268
+ metrics_collector: Optional[AsyncMetricsCollector] = None,
269
+ disable_by_batch_size: Optional[int] = None,
270
+ allow_zero_draft_token_step: Optional[bool] = True,
271
+ enable_lm_head_weight_load: Optional[bool] = False,
272
+ num_spec_prefill_steps: int = 1,
273
+ ):
274
+ """
275
+ Create a SpecDecodeWorker.
276
+
277
+ Args:
278
+ proposer_worker: A worker that can produce speculative tokens for
279
+ sequences.
280
+ scorer_worker: A worker that produces probabilities of speculative
281
+ tokens according to some base model. Typically a vanilla vLLM
282
+ Worker.
283
+ spec_decode_sampler: A Torch module used to perform acceptance
284
+ sampling of the draft tokens in the verification step of
285
+ speculative decoding. Currently we support two different
286
+ types of sampler namely RejectionSampler and
287
+ TypicalAcceptanceSampler. 'spec_decode_sampler' is either an
288
+ instance of RejectionSampler or TypicalAcceptanceSampler.
289
+ disable_mqa_scorer: If set to True, disable the MQA scorer and use
290
+ the BatchExpansionTop1Scorer instead.
291
+ disable_logprobs: If set to True, token log probabilities will
292
+ not be output in both the draft worker and the target worker.
293
+ If set to False, log probabilities will be output by both.
294
+ disable_log_stats: If set to True, disable periodic printing of
295
+ speculative stage times.
296
+ disable_by_batch_size: If the batch size is larger than this,
297
+ disable speculative decoding for new incoming requests.
298
+ metrics_collector: Helper class for collecting metrics; can be set
299
+ for testing purposes.
300
+ allow_zero_draft_token_step: whether to allow a step where the draft
301
+ model generates no draft token; should disallow when the tp of
302
+ draft model is larger than 1 (TODO: #5814)
303
+ enable_lm_head_weight_load: whether to load lm_head weight for
304
+ draft models like eagle.
305
+ num_spec_prefill_steps: number of speculative prefill steps to run
306
+ before the speculative decoding starts. This is only used when
307
+ the draft model is a deepseek_mtp model that requires prefill
308
+ kv cache separately for each MTP layer.
309
+ """
310
+ self.proposer_worker = proposer_worker
311
+ self.scorer_worker = scorer_worker
312
+ scorer_runner = getattr(self.scorer_worker, "model_runner", None)
313
+ self.generators = scorer_runner.get_generators(
314
+ ) if scorer_runner else None
315
+ self.disable_by_batch_size = disable_by_batch_size or float("inf")
316
+ self.spec_decode_sampler = spec_decode_sampler
317
+ self._allow_zero_draft_token_step = allow_zero_draft_token_step
318
+ self._enable_lm_head_weight_load = enable_lm_head_weight_load
319
+ self._metrics = AsyncMetricsCollector(
320
+ self.spec_decode_sampler
321
+ ) if metrics_collector is None else metrics_collector
322
+ # Tracks the sequence IDs that received a bonus token ID in
323
+ # their last forward pass. Needed only if KV cache is being
324
+ # used for token generation such as in the case of MultiStepWorker.
325
+ self._seq_with_bonus_token_in_last_step: Set[int] = set()
326
+ # Tracks the currently active request ids and the sequence IDs
327
+ # corresponding to them
328
+ self._request_id_seq_id_mapping: Dict[str, Set[int]] = defaultdict(set)
329
+ # Tracks if the proposer worker uses the KV cache or not.
330
+
331
+ self.probs_dtype = self.spec_decode_sampler.probs_dtype
332
+ self.token_id_dtype = self.spec_decode_sampler.token_id_dtype
333
+ # Lazy initialization.
334
+ self.scorer: SpeculativeScorer
335
+ self.disable_mqa_scorer = disable_mqa_scorer
336
+
337
+ # Hidden states from target model to pass to proposer
338
+ # in the subsequent step.
339
+ self.previous_hidden_states: Optional[HiddenStates] = None
340
+ self._disable_logprobs = disable_logprobs
341
+ self._disable_log_stats = disable_log_stats
342
+ self._num_spec_prefill_steps = num_spec_prefill_steps
343
+
344
+ def init_device(self) -> None:
345
+ """Initialize both scorer and proposer models.
346
+ """
347
+ # The scorer worker model is initialized first in case the proposer
348
+ # model has a smaller TP degree than the target worker.
349
+ self.scorer_worker.init_device()
350
+ self.proposer_worker.init_device()
351
+
352
+ # NOTE(cade): load_model is not part of the WorkerBase interface.
353
+ self.scorer_worker.load_model()
354
+ self.proposer_worker.load_model()
355
+
356
+ if self._enable_lm_head_weight_load:
357
+ # NOTE(Shangming): gather lm_head weight when tp enabled
358
+ target_lm_head_weight: torch.Tensor = tensor_model_parallel_gather(
359
+ self.scorer_worker.model_runner.model_runner.model.lm_head.\
360
+ weight.data,
361
+ dim=0,
362
+ )
363
+
364
+ self.proposer_worker.maybe_load_lm_head_weight(
365
+ target_lm_head_weight)
366
+
367
+ self._metrics.init_tensors(self.rank, device_type=self.device)
368
+ if model_parallel_is_initialized():
369
+ self.spec_decode_sampler.init_tensors(get_tp_group().local_rank,
370
+ device_type=self.device)
371
+ else:
372
+ self.spec_decode_sampler.init_tensors(self.rank,
373
+ device_type=self.device)
374
+
375
+ scorer_cls: Type[SpeculativeScorer]
376
+ if self.disable_mqa_scorer:
377
+ scorer_cls = BatchExpansionTop1Scorer
378
+ logger.info("[Speculative Decoding] Use batch "
379
+ "expansion for scoring proposals.")
380
+ else:
381
+ scorer_cls = MQAScorer
382
+ logger.info(
383
+ "[Speculative Decoding] Use MQA scorer for scoring proposals.")
384
+
385
+ self.scorer = scorer_cls(scorer_worker=self.scorer_worker,
386
+ device=self.device,
387
+ vocab_size=self._vocab_size)
388
+
389
+ self._configure_model_sampler_for_spec_decode()
390
+
391
+ def load_model(self, *args, **kwargs):
392
+ pass
393
+
394
+ def _configure_model_sampler_for_spec_decode(self):
395
+ """Configure model sampler to emit GPU tensors. This allows spec decode
396
+ to keep data on device without transferring to CPU and serializing,
397
+ which significantly reduces overhead of sampling during verification.
398
+
399
+ NOTE(cade): This breaks abstraction boundaries pretty badly. The better
400
+ design is to have the "move to CPU and serialize" sampling decision be
401
+ done outside of the model/sampler; this way the "last-mile" worker
402
+ object which interfaces with the scheduler can serialize and incur the
403
+ performance hit as necessary. This allows us to run the worker several
404
+ iterations in a row without incurring the "move to CPU and serialize"
405
+ performance penalty.
406
+
407
+ Since this requires a large change to vLLM, we defer it to later and
408
+ temporarily accept this broken abstraction boundary.
409
+
410
+ NOTE(cade): This will require a special check if the proposer worker
411
+ does not have a sampler (e.g. ngram speculation).
412
+ """
413
+ (self.scorer_worker.model_runner.sampler.include_gpu_probs_tensor
414
+ ) = True
415
+ (self.scorer_worker.model_runner.sampler.
416
+ should_modify_greedy_probs_inplace) = True
417
+ self.proposer_worker.set_include_gpu_probs_tensor()
418
+ self.proposer_worker.set_should_modify_greedy_probs_inplace()
419
+
420
+ def determine_num_available_blocks(self) -> Tuple[int, int]:
421
+ """Determine the number of cache blocks to use.
422
+
423
+ This is done by profiling the scorer model (which is typically the
424
+ larger of the two). Then the total memory which would be used by the
425
+ scorer cache is divided evenly between the proposer and scorer model KV,
426
+ such that the number of blocks is equal in both KV caches.
427
+ """
428
+ num_gpu_blocks, num_cpu_blocks = (
429
+ self.scorer_worker.determine_num_available_blocks())
430
+
431
+ scorer_cache_block_size_bytes = (
432
+ self.scorer_worker.get_cache_block_size_bytes())
433
+ proposer_cache_block_size_bytes = (
434
+ self.proposer_worker.get_cache_block_size_bytes())
435
+
436
+ new_num_gpu_blocks = split_num_cache_blocks_evenly(
437
+ scorer_cache_block_size_bytes, proposer_cache_block_size_bytes,
438
+ num_gpu_blocks)
439
+ return new_num_gpu_blocks, num_cpu_blocks
440
+
441
+ def initialize_cache(self, num_gpu_blocks: int,
442
+ num_cpu_blocks: int) -> None:
443
+ """Initialize the cache engine of the scorer and proposer workers.
444
+ """
445
+ self.scorer_worker.initialize_cache(num_gpu_blocks=num_gpu_blocks,
446
+ num_cpu_blocks=num_cpu_blocks)
447
+ self.proposer_worker.initialize_cache(num_gpu_blocks=num_gpu_blocks,
448
+ num_cpu_blocks=num_cpu_blocks)
449
+
450
+ def get_model(self) -> nn.Module:
451
+ return self.scorer_worker.get_model()
452
+
453
+ @torch.inference_mode()
454
+ def execute_model(
455
+ self,
456
+ execute_model_req: Optional[ExecuteModelRequest] = None
457
+ ) -> List[SamplerOutput]:
458
+ """Perform speculative decoding on the input batch.
459
+ """
460
+ if self.rank != self._driver_rank:
461
+ self._run_non_driver_rank()
462
+ return []
463
+
464
+ if execute_model_req is None:
465
+ # This signals that there's no more requests to process for now.
466
+ # All workers are running infinite loop with broadcast_tensor_dict,
467
+ # and it stops the loop when the driver broadcasts an empty input.
468
+ # Send an empty input to notify all other workers to stop their
469
+ # execution loop.
470
+ broadcast_tensor_dict({}, src=0)
471
+ return []
472
+
473
+ self._track_finished_requests(execute_model_req)
474
+ disable_all_speculation = self._should_disable_all_speculation(
475
+ execute_model_req)
476
+ num_lookahead_slots = execute_model_req.num_lookahead_slots
477
+ all_prompt = True
478
+ atleast_one_prompt = False
479
+ all_zero_spec_tokens = True
480
+ for sgm in execute_model_req.seq_group_metadata_list:
481
+ all_prompt = all_prompt and sgm.is_prompt
482
+ atleast_one_prompt = atleast_one_prompt or sgm.is_prompt
483
+ all_zero_spec_tokens = all_zero_spec_tokens and (
484
+ sgm.num_speculative_tokens == 0)
485
+
486
+ if all_prompt and execute_model_req.seq_group_metadata_list:
487
+ assert num_lookahead_slots == 0, (
488
+ "Prompt only runs should have num_lookahead_slots equal to 0. "
489
+ "This should never happen, please file a bug at "
490
+ "https://github.com/vllm-project/vllm/issues")
491
+ # Speculative decoding is disabled in the following cases:
492
+ # 1. Prefill phase: Speculative decoding is not
493
+ # used during the prefill phase.
494
+ # 2. Auto-disable enabled: The running queue size exceeds
495
+ # the specified threshold.
496
+ # 3. No request: There are no requests in the batch, or
497
+ # none of the requests in the batch have spec decoding enabled.
498
+ # In any of these cases, the proposer and scorer workers
499
+ # are called normally.
500
+ # We expect `num_speculative_tokens` to be None for prefills.
501
+ no_spec = (num_lookahead_slots == 0 or disable_all_speculation
502
+ or all_zero_spec_tokens)
503
+
504
+ # Broadcast how many lookahead slots are scheduled for this step, and
505
+ # whether all speculation is disabled, to all non-driver workers.
506
+
507
+ # This is required as if the number of draft model runs changes
508
+ # dynamically, the non-driver workers won't know unless we perform a
509
+ # communication to inform them.
510
+
511
+ # no_spec is used to signal non-driver worker about prefill vs decode
512
+ # stage. This is needed to ensure that order of execution of proposer
513
+ # and scorer is same in both driver and non-driver workers (i.e.,
514
+ # scorer -> proposer for prefill and proposer -> scorer in decode). This
515
+ # order is needed to support models like EAGLE that take scorer states
516
+ # as inputs.
517
+ broadcast_dict = dict(
518
+ num_lookahead_slots=num_lookahead_slots,
519
+ no_spec=no_spec,
520
+ disable_all_speculation=disable_all_speculation,
521
+ # When both chunked prefill and speculative decoding are enabled
522
+ # it is possible that the same batch contains both prefill
523
+ # and decodes. If that happens in the scorer we run the batch
524
+ # as one single forward pass. However, in the proposer we
525
+ # run them as 2 different batches - one for prefill and
526
+ # the other for decodes. The variable indicates to the non-driver
527
+ # worker that there are prefills as part of the speculative batch
528
+ # and hence it needs to run an extra prefill forward pass.
529
+ run_spec_proposer_for_prefill=atleast_one_prompt,
530
+ )
531
+ broadcast_tensor_dict(broadcast_dict, src=self._driver_rank)
532
+
533
+ assert execute_model_req.seq_group_metadata_list is not None, (
534
+ "speculative decoding requires non-None seq_group_metadata_list")
535
+
536
+ self._maybe_disable_speculative_tokens(
537
+ disable_all_speculation, execute_model_req.seq_group_metadata_list)
538
+
539
+ if no_spec:
540
+ return self._run_no_spec(execute_model_req,
541
+ skip_proposer=disable_all_speculation)
542
+ return self._run_speculative_decoding_step(execute_model_req,
543
+ num_lookahead_slots)
544
+
545
+ @torch.inference_mode()
546
+ def start_worker_execution_loop(self) -> None:
547
+ """Execute model loop to perform speculative decoding
548
+ in parallel worker."""
549
+ while self._run_non_driver_rank():
550
+ pass
551
+
552
+ def _should_disable_all_speculation(
553
+ self, execute_model_req: ExecuteModelRequest) -> bool:
554
+ # When the batch size is too large, disable speculative decoding
555
+ # to stop trading off throughput for latency.
556
+ return (execute_model_req.running_queue_size
557
+ >= self.disable_by_batch_size)
558
+
559
+ def _maybe_disable_speculative_tokens(
560
+ self, disable_all_speculation: bool,
561
+ seq_group_metadata_list: List[SequenceGroupMetadata]) -> None:
562
+ if not disable_all_speculation:
563
+ return
564
+
565
+ for seq_group_metadata in seq_group_metadata_list:
566
+ # Once num_speculative_tokens is set to 0, the spec decode
567
+ # of this request will be disabled forever.
568
+ # TODO(comaniac): We currently store spec decoding specific
569
+ # state in the global data structure, but we should maintain
570
+ # this state within spec decode worker.
571
+ seq_group_metadata.num_speculative_tokens = 0
572
+
573
+ def _serialize_sampler_output_no_logprobs(
574
+ self, execute_model_req: ExecuteModelRequest,
575
+ sampler_output: SamplerOutput) -> List[SamplerOutput]:
576
+ """
577
+ Creates and returns a `SamplerOutput` with only the token IDs being
578
+ serialized to CPU and populated in `CompletionSequenceGroupOutput`.
579
+ All other parameters in `CompletionSequenceGroupOutput` related to log
580
+ probabilities are skipped.
581
+
582
+ Args:
583
+ execute_model_req (ExecuteModelRequest): The model request that
584
+ was executed.
585
+ sampler_output (SamplerOutput): The output from the sampler with
586
+ only GPU tensors populated.
587
+
588
+ Returns:
589
+ SamplerOutput: A new `SamplerOutput` instance containing a list of
590
+ `CompletionSequenceGroupOutput` objects with only token IDs
591
+ populated.
592
+ """
593
+ seq_output_prompt_logprobs = [
594
+ seq.is_prompt and seq.sampling_params.prompt_logprobs is not None
595
+ and seq.sampling_params.prompt_logprobs > 0
596
+ for seq in execute_model_req.seq_group_metadata_list
597
+ ]
598
+ # ignore slots for prompt tokens that are filled with INVALID_TOKEN_ID
599
+ sampled_token_ids_list = (sampler_output.sampled_token_ids[torch.where(
600
+ # subtracting is faster than testing for equality
601
+ sampler_output.sampled_token_ids - VLLM_INVALID_TOKEN_ID)[0]] \
602
+ if any(seq_output_prompt_logprobs) else \
603
+ sampler_output.sampled_token_ids).tolist()
604
+
605
+ seq_data_entries = [
606
+ (seq_id, seq_data) for sg in \
607
+ execute_model_req.seq_group_metadata_list \
608
+ for seq_id, seq_data in sg.seq_data.items()
609
+ ]
610
+ completion_seq_group_output_list: List[
611
+ CompletionSequenceGroupOutput] = []
612
+ output_index = 0
613
+ # Make sure the non-terminal prefill chunks are still aligned with
614
+ # their own empty output.
615
+ for idx, seq_group_meta in enumerate(
616
+ execute_model_req.seq_group_metadata_list):
617
+ needs_prompt_logprobs = seq_output_prompt_logprobs[idx]
618
+ seq_id, seq_data = seq_data_entries[idx]
619
+ if needs_prompt_logprobs:
620
+ prompt_token_ids = seq_data.get_prompt_token_ids()
621
+
622
+ # Some of these sequences may belong to non-terminal chunks,
623
+ # which may still have to report logprobs for prompts.
624
+ start = 1 if seq_data._num_computed_tokens == 0 \
625
+ else seq_data._num_computed_tokens
626
+ end = (seq_data._num_computed_tokens + \
627
+ seq_group_meta.token_chunk_size)
628
+ prompt_token_ids = prompt_token_ids[start:end]
629
+ prompt_logprobs = [
630
+ create_logprobs_output(
631
+ token_id=p_token_id,
632
+ token_id_logprob_rank=-1,
633
+ token_id_logprob=0.0,
634
+ topk_token_ids=[],
635
+ topk_logprobs=[],
636
+ ) for p_token_id in prompt_token_ids
637
+ ]
638
+ else:
639
+ prompt_logprobs = None
640
+
641
+ # Since we can get chunks here, we dont always have a sampled token
642
+ # (only on last chunk) but we still have to provide an output.
643
+ if not seq_group_meta.do_sample:
644
+ completion_seq_group_output_list.append(
645
+ CompletionSequenceGroupOutput(
646
+ samples=[], prompt_logprobs=prompt_logprobs))
647
+ continue
648
+
649
+ # Sequence with output.
650
+ completion_seq_group_output_list.append(
651
+ create_sequence_group_output(
652
+ token_id=sampled_token_ids_list[output_index][0],
653
+ token_id_logprob_rank=-1,
654
+ token_id_logprob=0.0,
655
+ seq_id=seq_id,
656
+ topk_token_ids=[],
657
+ topk_logprobs=[],
658
+ prompt_logprobs=prompt_logprobs))
659
+ output_index += 1
660
+
661
+ return [SamplerOutput(outputs=completion_seq_group_output_list)]
662
+
663
+ @nvtx_range("spec_decode_worker._run_no_spec")
664
+ def _run_no_spec(self, execute_model_req: ExecuteModelRequest,
665
+ skip_proposer: bool) -> List[SamplerOutput]:
666
+ """Run a single generation step without any speculation. The input is
667
+ sent to the proposer and scorer model so that the KV cache is consistent
668
+ between the two. When skip_proposer is True, the proposer model is
669
+ not called, meaning that the kv-cache in proposer for requests is not
670
+ updated, so they cannot enable spec decode in the rest decoding.
671
+ """
672
+
673
+ sampler_output = self.scorer_worker.execute_model(execute_model_req)
674
+ assert len(sampler_output) == 1
675
+ sampler_output = sampler_output[0]
676
+
677
+ # Store hidden states from target model execution, BxD.
678
+ hidden_states = sampler_output.hidden_states
679
+ if hidden_states is not None:
680
+ # Only decodes and prefill terminal chunks need a hidden state.
681
+ seq_group_meta_with_hidden = [
682
+ sg for sg in execute_model_req.seq_group_metadata_list
683
+ if sg.do_sample
684
+ ]
685
+ if any(seq.is_prompt for seq in seq_group_meta_with_hidden):
686
+ # Drop hidden_states with no prediction (eg non-terminal chunks)
687
+ hidden_states = hidden_states[
688
+ torch.where(sampler_output.sampled_token_ids -
689
+ VLLM_INVALID_TOKEN_ID)[0]]
690
+ if self.previous_hidden_states is None and len(
691
+ seq_group_meta_with_hidden):
692
+ self.previous_hidden_states = HiddenStates(
693
+ hidden_states, seq_group_meta_with_hidden)
694
+ elif self.previous_hidden_states and len(
695
+ seq_group_meta_with_hidden):
696
+ self.previous_hidden_states.update(hidden_states,
697
+ seq_group_meta_with_hidden)
698
+
699
+ if not skip_proposer:
700
+ # We prepare the prefill hidden states here so that there no
701
+ # additional complexity in worker for spec_decode vs non_spec_decode
702
+ # flow and execute_model doesn't need additional modifications.
703
+ execute_model_req.previous_hidden_states = \
704
+ prepare_prefill_hidden_states(
705
+ sampler_output.prefill_hidden_states)
706
+ for i in range(self._num_spec_prefill_steps):
707
+ execute_model_req.spec_step_idx = i
708
+ self.proposer_worker.execute_model(execute_model_req)
709
+
710
+ sampler_output_to_return = (self._serialize_sampler_output_no_logprobs(
711
+ execute_model_req=execute_model_req, sampler_output=sampler_output)
712
+ if self._disable_logprobs else
713
+ [sampler_output])
714
+
715
+ # Clear device tensors from sampler output. This reduces communication
716
+ # overhead when the engine runs in a different process than the workers.
717
+ sampler_output.sampled_token_probs = None
718
+ sampler_output.sampled_token_ids = None
719
+ sampler_output.logprobs = None
720
+ return sampler_output_to_return
721
+
722
+ def _run_non_driver_rank(self) -> bool:
723
+ """Run proposer and verifier model in non-driver workers. This is used
724
+ for both speculation cases (num_lookahead_slots>0) and non-speculation
725
+ cases (e.g. prefill).
726
+
727
+ Returns True if there are remaining sequences to process.
728
+ """
729
+ assert self.rank != self._driver_rank
730
+
731
+ data = broadcast_tensor_dict(src=self._driver_rank)
732
+ if not data:
733
+ return False
734
+ num_lookahead_slots = data["num_lookahead_slots"]
735
+
736
+ # In case of prefill, scorer_worker has to be run before proposer so
737
+ # that the hidden states can be propagated to proposer when needed.
738
+ if data["no_spec"]:
739
+ self.scorer_worker.execute_model()
740
+
741
+ if not data["disable_all_speculation"]:
742
+ # Even if num_lookahead_slots is zero, we want to run the
743
+ # proposer model as it may have KV.
744
+ #
745
+ # We run the proposer once per lookahead slot. In the future we
746
+ # should delegate how many times it runs to the proposer.
747
+ for _ in range(max(num_lookahead_slots, 1)):
748
+ self.proposer_worker.execute_model()
749
+
750
+ if not data["no_spec"]:
751
+ self.scorer_worker.execute_model()
752
+ if data["run_spec_proposer_for_prefill"]:
753
+ self.proposer_worker.execute_model()
754
+
755
+ return True
756
+
757
+ @nvtx_range("spec_decode_worker._run_speculative_decoding_step")
758
+ def _run_speculative_decoding_step(
759
+ self, execute_model_req: ExecuteModelRequest,
760
+ num_lookahead_slots: int) -> List[SamplerOutput]:
761
+ """Execute a single step of speculative decoding.
762
+
763
+ This invokes the proposer worker to get k speculative tokens for each
764
+ sequence, then scores each speculative token using the scoring worker.
765
+
766
+ When `enable_chunked_prefill` is set, scorer will batch decodes and
767
+ prefills, while proposer will sync its KV-cache by running an extra
768
+ forward on prefills.
769
+
770
+ Returns a list of SamplerOutput, each containing a single token per
771
+ sequence.
772
+ """
773
+ # With prefill chunking, expect requests to have prompts first
774
+ # so that backend gets prefill|decode.
775
+ assert num_lookahead_slots == execute_model_req.num_lookahead_slots
776
+
777
+ # Pass last hidden states from target model to proposer
778
+ execute_model_req.previous_hidden_states = self.previous_hidden_states
779
+ self.previous_hidden_states = None
780
+
781
+ with Timer() as proposal_timer:
782
+ # Generate proposals using draft worker.
783
+ proposals = self.proposer_worker.get_spec_proposals(
784
+ execute_model_req, self._seq_with_bonus_token_in_last_step)
785
+
786
+ if not self._allow_zero_draft_token_step and proposals.no_proposals:
787
+ #TODO: Fix it #5814
788
+ raise RuntimeError("Cannot handle cases where distributed draft "
789
+ "workers generate no tokens")
790
+
791
+ execute_model_req.previous_hidden_states = None
792
+
793
+ with Timer() as scoring_timer:
794
+ proposal_scores = self.scorer.score_proposals(
795
+ execute_model_req,
796
+ proposals,
797
+ )
798
+
799
+ _, (non_spec_seqs, non_spec_indices) = split_batch_by_proposal_len(
800
+ execute_model_req.seq_group_metadata_list, proposals.proposal_lens)
801
+ # With prefill chunking enabled, `non_spec_seqs` contains prefills too:
802
+ # discard decodes that have already been processed by proposer.
803
+ non_spec_indices = [
804
+ idx for idx in non_spec_indices
805
+ if execute_model_req.seq_group_metadata_list[idx].is_prompt
806
+ ]
807
+ if len(non_spec_indices):
808
+ all_hidden_states = proposal_scores.hidden_states
809
+ if all_hidden_states is not None:
810
+ prefill_hidden_states = all_hidden_states[non_spec_indices]
811
+ execute_model_req.previous_hidden_states = \
812
+ prepare_prefill_hidden_states(prefill_hidden_states)
813
+ # Sync proposer KV cache for prefills.
814
+ prefill_req = execute_model_req.clone(non_spec_seqs)
815
+ # TODO avoid sampling here?
816
+ self.proposer_worker.execute_model(prefill_req)
817
+
818
+ with Timer() as verification_timer:
819
+ accepted_token_ids, target_logprobs = self._verify_tokens(
820
+ execute_model_req.seq_group_metadata_list, proposal_scores,
821
+ proposals, execute_model_req.num_lookahead_slots)
822
+
823
+ stage_times = (proposal_timer.elapsed_time_ms / num_lookahead_slots,
824
+ scoring_timer.elapsed_time_ms,
825
+ verification_timer.elapsed_time_ms)
826
+
827
+ return self._create_output_sampler_list(
828
+ execute_model_req.seq_group_metadata_list,
829
+ accepted_token_ids,
830
+ target_logprobs=target_logprobs,
831
+ prompt_logprobs=proposal_scores.prompt_logprobs
832
+ if not self._disable_logprobs else None,
833
+ k=execute_model_req.num_lookahead_slots,
834
+ stage_times=stage_times)
835
+
836
+ @nvtx_range("spec_decode_worker._verify_tokens")
837
+ def _verify_tokens(
838
+ self,
839
+ seq_group_metadata_list: List[SequenceGroupMetadata],
840
+ proposal_scores: SpeculativeScores,
841
+ proposals: SpeculativeProposals,
842
+ max_proposal_len: int,
843
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
844
+ """Determine which speculative tokens are accepted using the
845
+ probabilities of each token according to the proposer and scorer models.
846
+
847
+ Returns a tuple of Tensors, one for the accepted token ids and one for
848
+ the logprobs according to the scoring model.
849
+ """
850
+ proposal_lens_list = proposals.proposal_lens.tolist()
851
+
852
+ # vLLM currently only supports proposal lens equal to zero or the batch
853
+ # proposal len. This adds some complexity (splitting the batch into spec
854
+ # and non spec sequences) and should be removed in the future. It can be
855
+ # done by supporting per-sequence proposal lens.
856
+ (_, spec_indices), (_, non_spec_indices) = split_batch_by_proposal_len(
857
+ seq_group_metadata_list, proposal_lens_list)
858
+ original_indices = spec_indices + non_spec_indices
859
+
860
+ # Get probabilities of target model, including bonus tokens.
861
+ proposal_verifier_probs = proposal_scores.probs[spec_indices]
862
+
863
+ # Get non-speculative sampled tokens from target model.
864
+ non_spec_token_ids = proposal_scores.token_ids[non_spec_indices]
865
+
866
+ # Get bonus tokens from target model.
867
+ bonus_token_ids = proposal_scores.token_ids[spec_indices, -1:]
868
+
869
+ # Get probabilities according to proposal method.
870
+ proposal_probs = proposals.proposal_probs[spec_indices]
871
+
872
+ # Get proposed tokens.
873
+ proposal_token_ids = proposals.proposal_token_ids[spec_indices]
874
+
875
+ # Sampler arguments
876
+ sampler_extra_kwargs: Dict[str, Any] = {}
877
+ if self.generators and isinstance(self.spec_decode_sampler,
878
+ SpecDecodeStochasticBaseSampler):
879
+ sampler_extra_kwargs["seeded_seqs"] = {
880
+ idx: self.generators[sgm.request_id]
881
+ for idx, sgm in enumerate(seq_group_metadata_list)
882
+ if sgm.sampling_params.seed is not None
883
+ }
884
+
885
+ accepted_token_ids = self.spec_decode_sampler(
886
+ target_with_bonus_probs=proposal_verifier_probs,
887
+ bonus_token_ids=bonus_token_ids,
888
+ draft_probs=proposal_probs,
889
+ draft_token_ids=proposal_token_ids,
890
+ **sampler_extra_kwargs,
891
+ )
892
+ # Append output tokens from non-speculative sequences to
893
+ # the accepted token ids tensor.
894
+ non_spec_token_ids = non_spec_token_ids.expand(-1, max_proposal_len +
895
+ 1).clone()
896
+ non_spec_token_ids[:, 1:] = -1
897
+ accepted_token_ids = torch.cat(
898
+ [accepted_token_ids, non_spec_token_ids])
899
+ logprobs = proposal_scores.logprobs
900
+ # Rearrange so that results are in the order of the original seq group
901
+ # metadata.
902
+ accepted_token_ids[original_indices] = accepted_token_ids.clone()
903
+
904
+ # B x K+1 x D
905
+ hidden_states = proposal_scores.hidden_states
906
+ if hidden_states is not None:
907
+ # Only get terminal hidden states for next step
908
+ terminal_metadata = [
909
+ sg for sg in seq_group_metadata_list if sg.do_sample
910
+ ]
911
+
912
+ # Contract hidden states based on accepted tokens
913
+ hs_size = hidden_states.shape[-1]
914
+ accepted_index = accepted_token_ids + 1 # Convert -1 to 0
915
+ accepted_index = accepted_index.count_nonzero(dim=1).add_(-1) # b
916
+ # Drop non-terminal prefill chunks hidden states.
917
+ hidden_states = hidden_states[accepted_index !=
918
+ VLLM_INVALID_TOKEN_ID]
919
+ accepted_index = accepted_index[accepted_index !=
920
+ VLLM_INVALID_TOKEN_ID]
921
+ assert len(accepted_index) == hidden_states.shape[0] == len(
922
+ terminal_metadata)
923
+ index = accepted_index[:, None, None].expand(-1, 1,
924
+ hs_size) # b x 1 x d
925
+ second_last_token_hidden_states = hidden_states[:, -2] # b x d
926
+ hidden_states = hidden_states.gather(1, index).squeeze(1) # b x d
927
+ # Store hidden states from target model for subsequent decode step
928
+ self.previous_hidden_states = HiddenStates(
929
+ hidden_states, terminal_metadata,
930
+ second_last_token_hidden_states)
931
+ return accepted_token_ids, logprobs
932
+
933
+ def _create_output_sampler_list(
934
+ self,
935
+ seq_group_metadata_list: List[SequenceGroupMetadata],
936
+ accepted_token_ids: torch.Tensor, # shape: [batch_size, k+1]
937
+ target_logprobs: torch.Tensor, # shape: [batch_size, k+1, vocab_size]
938
+ prompt_logprobs: Optional[
939
+ torch.Tensor], # shape: [nprompt_tokens, vocab_size]
940
+ k: int,
941
+ stage_times: Tuple[float, float, float],
942
+ ) -> List[SamplerOutput]:
943
+ """Given the accepted token ids, create a list of SamplerOutput.
944
+
945
+ The output is padded with -1 tokens such that each sequence has
946
+ the same number of outputs.
947
+ """
948
+ batch_size, num_steps = accepted_token_ids.shape
949
+ accepted_token_ids_by_step = accepted_token_ids.transpose(0, 1)
950
+ if self._disable_logprobs:
951
+ # We are skipping the logprobs. Hence don't serialize the
952
+ # logprobs related tensors from the GPU. Instead create
953
+ # empty/dummy lists.
954
+ (accepted_token_id_ranks_by_step,
955
+ accepted_token_id_logprobs_by_step,
956
+ topk_logprobs_by_step, topk_indices_by_step) =\
957
+ self._create_dummy_logprob_lists(
958
+ batch_size, num_steps,
959
+ self.scorer_worker.model_config.max_logprobs)
960
+ else:
961
+ # Organize input tensors by step instead of by sequence.
962
+ target_logprobs_by_step = target_logprobs.transpose(0, 1)
963
+ # Serialize all tensors into Python lists.
964
+ (accepted_token_id_ranks_by_step,
965
+ accepted_token_id_logprobs_by_step,
966
+ topk_logprobs_by_step, topk_indices_by_step) =\
967
+ self._create_logprob_lists_from_tensors(
968
+ target_logprobs_by_step, accepted_token_ids_by_step,
969
+ self.scorer_worker.model_config.max_logprobs)
970
+
971
+ # Get the sequence ids and num_logprobs (sampling parameter) in the
972
+ # batch.
973
+ seq_ids, request_ids_seq_ids_mapping = get_all_seq_ids_and_request_ids(
974
+ seq_group_metadata_list)
975
+
976
+ num_logprobs_per_seq = get_all_num_logprobs(seq_group_metadata_list)
977
+
978
+ # Serialize tensor to CPU Python list.
979
+ accepted_token_ids_by_step = accepted_token_ids_by_step.tolist()
980
+
981
+ # Construct the output on a per-step, per-sequence basis.
982
+ # Non-terminal prefill chunks will end up here as rows with just -1s
983
+ # i.e mixed-batch [[-1, 1576], [-1, 29884], [-1, -1], [-1, -1]] while
984
+ # terminal chunks will only have one generated token at time 0.
985
+ sampler_output_list: List[SamplerOutput] = []
986
+
987
+ # Prefills are not multi-step (return at most 1 token), in order to
988
+ # avoid padding or repetition to fit decodes, we separate them.
989
+ for i, sg in enumerate(seq_group_metadata_list):
990
+ if not sg.is_prompt:
991
+ # Requests are ordered as prefills|decodes=>no more prefills.
992
+ break
993
+ num_logprobs = num_logprobs_per_seq[i]
994
+ seq_kwargs = dict(token_id=-1,
995
+ token_id_logprob_rank=0,
996
+ token_id_logprob=-float('inf'),
997
+ topk_token_ids=[-1] * num_logprobs,
998
+ topk_logprobs=[-float('inf')] * num_logprobs,
999
+ seq_id=seq_ids[i])
1000
+ # Terminal chunk, has token.
1001
+ if sg.do_sample:
1002
+ seq_kwargs.update(
1003
+ dict(
1004
+ token_id=accepted_token_ids[i][0].item(),
1005
+ token_id_logprob_rank=accepted_token_id_ranks_by_step[
1006
+ 0][i],
1007
+ token_id_logprob=accepted_token_id_logprobs_by_step[0]
1008
+ [i],
1009
+ topk_token_ids=topk_indices_by_step[0][i]
1010
+ [:num_logprobs],
1011
+ # output only so step is 0
1012
+ topk_logprobs=topk_logprobs_by_step[0][i]
1013
+ [:num_logprobs],
1014
+ ))
1015
+ needs_plogs = (sg.sampling_params.prompt_logprobs
1016
+ and sg.sampling_params.prompt_logprobs > 0)
1017
+ plogs = None
1018
+ if prompt_logprobs is not None:
1019
+ # Even non-terminal prompt chunks can have logprobs here.
1020
+ plogs = prompt_logprobs[i]
1021
+ elif needs_plogs:
1022
+ # Prompt logprobs are requested but `_disable_logprobs` is set.
1023
+ seq_data = next(iter(sg.seq_data.values()))
1024
+ # Get only the tokens in this chunk!
1025
+ prompt_token_ids = seq_data.get_prompt_token_ids()
1026
+ prompt_token_ids = prompt_token_ids[
1027
+ seq_data.
1028
+ _num_computed_tokens:seq_data._num_computed_tokens +
1029
+ sg.token_chunk_size]
1030
+
1031
+ is_first_chunk = seq_data._num_computed_tokens == 0
1032
+ # There's no prob generated for the first token in a sequence.
1033
+ if is_first_chunk:
1034
+ prompt_token_ids = prompt_token_ids[1:]
1035
+ plogs = [
1036
+ create_logprobs_output(
1037
+ token_id=p_token_id,
1038
+ token_id_logprob_rank=-1,
1039
+ token_id_logprob=0.0,
1040
+ topk_token_ids=[],
1041
+ topk_logprobs=[],
1042
+ ) for p_token_id in prompt_token_ids
1043
+ ]
1044
+ seq_kwargs.update(dict(prompt_logprobs=plogs))
1045
+
1046
+ sampler_output_list.append(
1047
+ SamplerOutput(
1048
+ outputs=[create_sequence_group_output(
1049
+ **seq_kwargs)])) # type: ignore
1050
+
1051
+ # Decodes, create one SamplerOutput per-step (at most K+1).
1052
+ for step_index in range(num_steps):
1053
+ if all(token_id == -1 for sg, token_id in zip(
1054
+ seq_group_metadata_list,
1055
+ accepted_token_ids_by_step[step_index])
1056
+ if not sg.is_prompt):
1057
+ break
1058
+
1059
+ step_output_token_ids: List[CompletionSequenceGroupOutput] = []
1060
+ for sequence_index in range(batch_size):
1061
+ seq_meta = seq_group_metadata_list[sequence_index]
1062
+ # Prompts already processed above.
1063
+ if seq_meta.is_prompt:
1064
+ continue
1065
+
1066
+ # Each sequence may have a different num_logprobs; retrieve it.
1067
+ num_logprobs = num_logprobs_per_seq[sequence_index]
1068
+ step_output_token_ids.append(
1069
+ create_sequence_group_output(
1070
+ token_id=accepted_token_ids_by_step[step_index]
1071
+ [sequence_index],
1072
+ token_id_logprob_rank=accepted_token_id_ranks_by_step[
1073
+ step_index][sequence_index],
1074
+ token_id_logprob=accepted_token_id_logprobs_by_step[
1075
+ step_index][sequence_index],
1076
+ seq_id=seq_ids[sequence_index],
1077
+ topk_token_ids=topk_indices_by_step[step_index]
1078
+ [sequence_index][:num_logprobs],
1079
+ topk_logprobs=topk_logprobs_by_step[step_index]
1080
+ [sequence_index][:num_logprobs],
1081
+ step_index=step_index))
1082
+ sampler_output_list.append(
1083
+ SamplerOutput(outputs=step_output_token_ids))
1084
+
1085
+ # Populate the data structures needed to keep track of sequences with
1086
+ # bonus tokens.
1087
+ self._track_sequences_with_bonus_tokens(seq_ids,
1088
+ request_ids_seq_ids_mapping,
1089
+ accepted_token_ids_by_step)
1090
+ maybe_rejsample_metrics = (
1091
+ self._metrics.maybe_collect_rejsample_metrics(k))
1092
+ if maybe_rejsample_metrics is not None:
1093
+ sampler_output_list[
1094
+ 0].spec_decode_worker_metrics = maybe_rejsample_metrics
1095
+
1096
+ # Log time spent in each stage periodically.
1097
+ # This is periodic because the rejection sampler emits metrics
1098
+ # periodically.
1099
+ self._maybe_log_stage_times(*stage_times)
1100
+ # First `n_prefills` entries will contain prefills SamplerOutput when
1101
+ # chunked prefill is enabled, the rest is decodes in multi-step format.
1102
+ return sampler_output_list
1103
+
1104
+ def _maybe_log_stage_times(self, average_time_per_proposal_tok_ms: float,
1105
+ scoring_time_ms: float,
1106
+ verification_time_ms: float) -> None:
1107
+ """Log the speculative stage times. If stat logging is disabled, do
1108
+ nothing.
1109
+ """
1110
+ if self._disable_log_stats:
1111
+ return
1112
+
1113
+ logger.info(
1114
+ "SpecDecodeWorker stage times: "
1115
+ "average_time_per_proposal_tok_ms=%.02f "
1116
+ "scoring_time_ms=%.02f verification_time_ms=%.02f",
1117
+ average_time_per_proposal_tok_ms, scoring_time_ms,
1118
+ verification_time_ms)
1119
+
1120
+ def _create_dummy_logprob_lists(
1121
+ self,
1122
+ batch_size: int,
1123
+ num_steps: int,
1124
+ num_top_k: int,
1125
+ ) -> Tuple[List[List[int]], List[List[float]],
1126
+ List[List[List[Optional[float]]]],
1127
+ List[List[List[Optional[int]]]]]:
1128
+ """
1129
+ Creates and returns four dummy lists representing token probabilities
1130
+ and their ranks.
1131
+
1132
+ This method initializes and returns:
1133
+ - The ranks of the accepted tokens, shaped (num_steps, batch_size)
1134
+ - The log probabilities of the accepted tokens,
1135
+ shaped (num_steps, batch_size)
1136
+ - The log probabilities of the top k tokens,
1137
+ shaped (num_steps, batch_size, num_top_k)
1138
+ - The token IDs of the top k tokens,
1139
+ shaped (num_steps, batch_size, num_top_k)
1140
+
1141
+ Args:
1142
+ batch_size (int): The size of the batch.
1143
+ num_steps (int): The number of steps in the sequence.
1144
+ num_top_k (int): The number of top-k token log probabilities to
1145
+ return.
1146
+
1147
+ Returns:
1148
+ A tuple containing four dummy lists as described above.
1149
+ """
1150
+ accepted_token_id_ranks_by_step = [[-1] * batch_size
1151
+ for _ in range(num_steps)]
1152
+ accepted_token_id_logprobs_by_step = [[0.0] * batch_size
1153
+ for _ in range(num_steps)]
1154
+ topk_logprobs_by_step: List[List[List[Optional[float]]]] = [[
1155
+ [None] * num_top_k for _ in range(batch_size)
1156
+ ] for _ in range(num_steps)]
1157
+ topk_indices_by_step: List[List[List[Optional[int]]]] = [[
1158
+ [None] * num_top_k for _ in range(batch_size)
1159
+ ] for _ in range(num_steps)]
1160
+ return (accepted_token_id_ranks_by_step,
1161
+ accepted_token_id_logprobs_by_step, topk_logprobs_by_step,
1162
+ topk_indices_by_step)
1163
+
1164
+ def _create_logprob_lists_from_tensors(
1165
+ self,
1166
+ target_logprobs_by_step: torch.Tensor,
1167
+ accepted_token_ids_by_step: torch.Tensor,
1168
+ num_top_k: int,
1169
+ ) -> Tuple[List[List[int]], List[List[float]],
1170
+ List[List[List[Optional[float]]]],
1171
+ List[List[List[Optional[int]]]]]:
1172
+ """
1173
+ Creates and returns four lists representing token probabilities and
1174
+ their ranks.
1175
+
1176
+ This method initializes and returns four lists containing:
1177
+ - The ranks of the accepted tokens, shaped (num_steps, batch_size)
1178
+ - The log probabilities of the accepted tokens,
1179
+ shaped (num_steps, batch_size)
1180
+ - The log probabilities of the top k tokens,
1181
+ shaped (num_steps, batch_size, num_top_k)
1182
+ - The token IDs of the top k tokens,
1183
+ shaped (num_steps, batch_size, num_top_k)
1184
+
1185
+ Args:
1186
+ target_logprobs_by_step (torch.Tensor): Tensor representing the
1187
+ log probabilities of the target model,
1188
+ shaped (num_steps, batch_size, vocab_size)
1189
+ accepted_token_ids_by_step (torch.Tensor): Tensor representing
1190
+ the accepted token_ids, shaped (num_steps, batch_size)
1191
+ num_top_k (int): The number of top-k token log probabilities to
1192
+ return.
1193
+
1194
+ Returns:
1195
+ A tuple containing the lists as described above.
1196
+ """
1197
+ # Serialize all tensors to CPU Python lists.
1198
+ # Get the logprobs/rank of the accepted tokens.
1199
+ (accepted_token_id_ranks_by_step_tensor,
1200
+ accepted_token_id_logprobs_by_step_tensor
1201
+ ) = get_sampled_token_logprobs(
1202
+ logprob_tensor=target_logprobs_by_step,
1203
+ sampled_token_ids=accepted_token_ids_by_step,
1204
+ )
1205
+ # Get the top-k logprobs (which may or may not include the
1206
+ # logprob of the accepted token).
1207
+ (topk_logprobs_by_step_tensor,
1208
+ topk_indices_by_step_tensor) = target_logprobs_by_step.topk(
1209
+ k=num_top_k,
1210
+ dim=-1,
1211
+ )
1212
+ accepted_token_id_ranks_by_step = (
1213
+ accepted_token_id_ranks_by_step_tensor.tolist())
1214
+ accepted_token_id_logprobs_by_step = (
1215
+ accepted_token_id_logprobs_by_step_tensor.tolist())
1216
+ topk_logprobs_by_step = topk_logprobs_by_step_tensor.tolist()
1217
+ topk_indices_by_step = topk_indices_by_step_tensor.tolist()
1218
+ return (accepted_token_id_ranks_by_step,
1219
+ accepted_token_id_logprobs_by_step, topk_logprobs_by_step,
1220
+ topk_indices_by_step)
1221
+
1222
+ def _track_finished_requests(self, execute_model_req: ExecuteModelRequest):
1223
+ """
1224
+ Removes the finished requests and their associated sequence ids from
1225
+ internal book keeping data structures.
1226
+ """
1227
+ for finished_request in execute_model_req.finished_requests_ids:
1228
+ for seq_id in self._request_id_seq_id_mapping[finished_request]:
1229
+ self._seq_with_bonus_token_in_last_step.discard(seq_id)
1230
+ del self._request_id_seq_id_mapping[finished_request]
1231
+
1232
+ def _track_sequences_with_bonus_tokens(
1233
+ self, seq_ids: List[int],
1234
+ request_ids_seq_ids_mapping: Dict[str, Set[int]],
1235
+ accepted_token_ids_by_step: List[List[int]]):
1236
+ """
1237
+ Updates the internal data structures which keep track of sequences
1238
+ which have been assigned bonus tokens in their last forward pass.
1239
+ """
1240
+ for seq_index, seq_id in enumerate(seq_ids):
1241
+ last_token_id = accepted_token_ids_by_step[-1][seq_index]
1242
+ if last_token_id == -1:
1243
+ self._seq_with_bonus_token_in_last_step.discard(seq_id)
1244
+ else:
1245
+ self._seq_with_bonus_token_in_last_step.add(seq_id)
1246
+ for request_id, sequences in request_ids_seq_ids_mapping.items():
1247
+ self._request_id_seq_id_mapping[request_id].update(sequences)
1248
+
1249
+ @cached_property
1250
+ def _vocab_size(self) -> int:
1251
+ """Get the vocab size of the model and make sure it's consistent between
1252
+ draft and target workers.
1253
+ """
1254
+ vocab_sizes = [
1255
+ worker.vocab_size
1256
+ for worker in [self.proposer_worker, self.scorer_worker]
1257
+ ]
1258
+ assert all(vocab_sizes[0] == vocab_size for vocab_size in vocab_sizes)
1259
+ return vocab_sizes[0]
1260
+
1261
+ @property
1262
+ def rank(self):
1263
+ return self.scorer_worker.rank
1264
+
1265
+ @property
1266
+ def device(self):
1267
+ return self.scorer_worker.device
1268
+
1269
+ @property
1270
+ def _driver_rank(self) -> int:
1271
+ return 0
1272
+
1273
+ def get_cache_block_size_bytes(self):
1274
+ """Return the size of a cache block in bytes.
1275
+
1276
+ This function is only used to compose workers within a SpecDecodeWorker.
1277
+ We leave composing a SpecDecodeWorker within a SpecDecodeWorker
1278
+ undefined for now, although it could be implemented in the future.
1279
+ See https://arxiv.org/abs/2308.04623.
1280
+ """
1281
+ raise NotImplementedError
1282
+
1283
+ def start_profile(self):
1284
+ if isinstance(self.scorer_worker, WorkerBase):
1285
+ self.scorer_worker.start_profile()
1286
+
1287
+ def stop_profile(self):
1288
+ if isinstance(self.scorer_worker, WorkerBase):
1289
+ self.scorer_worker.stop_profile()
1290
+
1291
+
1292
+ def split_num_cache_blocks_evenly(scorer_cache_block_size_bytes: int,
1293
+ proposer_cache_block_size_bytes: int,
1294
+ total_num_gpu_blocks: int) -> int:
1295
+ """Given total_num_gpu_blocks, the number of GPU blocks that could be
1296
+ allocate to the target model, this function calculates how many blocks
1297
+ should be given to the draft and target model.
1298
+
1299
+ Note that usually the block size, in bytes, of each model is different,
1300
+ as it's a function of number of KV/layer, number of heads, and hidden
1301
+ dimension size.
1302
+
1303
+ Since the target and draft models allocate the same number of blocks, we
1304
+ simply calculate the number of blocks where if allocated by both models,
1305
+ the total memory usage from KV cache is no larger than the number of
1306
+ blocks allocatable by the target model alone.
1307
+ """
1308
+ new_num_gpu_blocks = int(
1309
+ total_num_gpu_blocks * scorer_cache_block_size_bytes /
1310
+ (proposer_cache_block_size_bytes + scorer_cache_block_size_bytes))
1311
+
1312
+ return new_num_gpu_blocks
1313
+
1314
+
1315
+ def prepare_prefill_hidden_states(
1316
+ prefill_hidden_states: torch.Tensor) -> HiddenStates:
1317
+ # For prefill step in proposer, we run the model for N-1 tokens
1318
+ # because Nth token will be processed in the first decode step. For
1319
+ # N-1 tokens, the input should be 0:N-1 hidden states which should
1320
+ # be concatanated with 1:N token (since output of scorer has to be
1321
+ # the input for proposer). Therefore, we shift the hidden states to
1322
+ # align n-1th hidden state with nth token.
1323
+ return HiddenStates(prefill_hidden_states.roll(
1324
+ shifts=1, dims=0)) if prefill_hidden_states is not None else None