vllm-cpu 0.9.2.post2__cp311-cp311-manylinux_2_17_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1236) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +214 -0
  3. vllm/_custom_ops.py +1915 -0
  4. vllm/_ipex_ops.py +350 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +139 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +325 -0
  20. vllm/attention/backends/blocksparse_attn.py +465 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1506 -0
  23. vllm/attention/backends/flash_attn.py +1008 -0
  24. vllm/attention/backends/flashinfer.py +1107 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +318 -0
  27. vllm/attention/backends/ipex_attn.py +403 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1391 -0
  30. vllm/attention/backends/pallas.py +356 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +1015 -0
  34. vllm/attention/backends/torch_sdpa.py +707 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +807 -0
  38. vllm/attention/layer.py +481 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +903 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/pallas_kv_cache_update.py +120 -0
  52. vllm/attention/ops/prefix_prefill.py +902 -0
  53. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  54. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  55. vllm/attention/ops/triton_decode_attention.py +674 -0
  56. vllm/attention/ops/triton_flash_attention.py +984 -0
  57. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  58. vllm/attention/ops/triton_unified_attention.py +738 -0
  59. vllm/attention/selector.py +214 -0
  60. vllm/attention/utils/fa_utils.py +72 -0
  61. vllm/beam_search.py +87 -0
  62. vllm/benchmarks/__init__.py +0 -0
  63. vllm/benchmarks/datasets.py +1441 -0
  64. vllm/benchmarks/endpoint_request_func.py +393 -0
  65. vllm/benchmarks/latency.py +168 -0
  66. vllm/benchmarks/serve.py +1063 -0
  67. vllm/benchmarks/throughput.py +609 -0
  68. vllm/benchmarks/utils.py +70 -0
  69. vllm/collect_env.py +820 -0
  70. vllm/compilation/__init__.py +0 -0
  71. vllm/compilation/activation_quant_fusion.py +89 -0
  72. vllm/compilation/backends.py +610 -0
  73. vllm/compilation/base_piecewise_backend.py +72 -0
  74. vllm/compilation/collective_fusion.py +127 -0
  75. vllm/compilation/compiler_interface.py +564 -0
  76. vllm/compilation/counter.py +41 -0
  77. vllm/compilation/cuda_piecewise_backend.py +218 -0
  78. vllm/compilation/decorators.py +250 -0
  79. vllm/compilation/fix_functionalization.py +191 -0
  80. vllm/compilation/fusion.py +645 -0
  81. vllm/compilation/fusion_attn.py +166 -0
  82. vllm/compilation/fx_utils.py +84 -0
  83. vllm/compilation/inductor_pass.py +115 -0
  84. vllm/compilation/monitor.py +39 -0
  85. vllm/compilation/multi_output_match.py +109 -0
  86. vllm/compilation/noop_elimination.py +165 -0
  87. vllm/compilation/pass_manager.py +82 -0
  88. vllm/compilation/sequence_parallelism.py +482 -0
  89. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  90. vllm/compilation/vllm_inductor_pass.py +70 -0
  91. vllm/compilation/wrapper.py +135 -0
  92. vllm/config.py +4913 -0
  93. vllm/connections.py +174 -0
  94. vllm/core/__init__.py +0 -0
  95. vllm/core/block/__init__.py +0 -0
  96. vllm/core/block/block_table.py +399 -0
  97. vllm/core/block/common.py +371 -0
  98. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  99. vllm/core/block/interfaces.py +319 -0
  100. vllm/core/block/naive_block.py +466 -0
  101. vllm/core/block/prefix_caching_block.py +1135 -0
  102. vllm/core/block/utils.py +28 -0
  103. vllm/core/block_manager.py +525 -0
  104. vllm/core/evictor.py +157 -0
  105. vllm/core/interfaces.py +139 -0
  106. vllm/core/placeholder_block_space_manager.py +103 -0
  107. vllm/core/scheduler.py +2126 -0
  108. vllm/device_allocator/__init__.py +0 -0
  109. vllm/device_allocator/cumem.py +281 -0
  110. vllm/distributed/__init__.py +6 -0
  111. vllm/distributed/communication_op.py +41 -0
  112. vllm/distributed/device_communicators/__init__.py +0 -0
  113. vllm/distributed/device_communicators/all2all.py +264 -0
  114. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  115. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  116. vllm/distributed/device_communicators/cuda_communicator.py +194 -0
  117. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  118. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  119. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  120. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  121. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  122. vllm/distributed/device_communicators/pynccl.py +218 -0
  123. vllm/distributed/device_communicators/pynccl_wrapper.py +349 -0
  124. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  125. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  126. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  127. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  128. vllm/distributed/eplb/__init__.py +8 -0
  129. vllm/distributed/eplb/eplb_state.py +432 -0
  130. vllm/distributed/eplb/rebalance_algo.py +234 -0
  131. vllm/distributed/eplb/rebalance_execute.py +307 -0
  132. vllm/distributed/kv_events.py +356 -0
  133. vllm/distributed/kv_transfer/README.md +29 -0
  134. vllm/distributed/kv_transfer/__init__.py +12 -0
  135. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  137. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  138. vllm/distributed/kv_transfer/kv_connector/factory.py +133 -0
  139. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  140. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  141. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  142. vllm/distributed/kv_transfer/kv_connector/utils.py +109 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1103 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +485 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +533 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +265 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +389 -0
  153. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  154. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  155. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  156. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  158. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  159. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  160. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  161. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  162. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  163. vllm/distributed/parallel_state.py +1385 -0
  164. vllm/distributed/tpu_distributed_utils.py +178 -0
  165. vllm/distributed/utils.py +536 -0
  166. vllm/engine/__init__.py +0 -0
  167. vllm/engine/arg_utils.py +1801 -0
  168. vllm/engine/async_llm_engine.py +1200 -0
  169. vllm/engine/async_timeout.py +173 -0
  170. vllm/engine/llm_engine.py +2101 -0
  171. vllm/engine/metrics.py +629 -0
  172. vllm/engine/metrics_types.py +94 -0
  173. vllm/engine/multiprocessing/__init__.py +148 -0
  174. vllm/engine/multiprocessing/client.py +681 -0
  175. vllm/engine/multiprocessing/engine.py +460 -0
  176. vllm/engine/output_processor/__init__.py +0 -0
  177. vllm/engine/output_processor/interfaces.py +75 -0
  178. vllm/engine/output_processor/multi_step.py +216 -0
  179. vllm/engine/output_processor/single_step.py +145 -0
  180. vllm/engine/output_processor/stop_checker.py +131 -0
  181. vllm/engine/output_processor/util.py +28 -0
  182. vllm/engine/protocol.py +326 -0
  183. vllm/entrypoints/__init__.py +0 -0
  184. vllm/entrypoints/api_server.py +178 -0
  185. vllm/entrypoints/chat_utils.py +1278 -0
  186. vllm/entrypoints/cli/__init__.py +12 -0
  187. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  188. vllm/entrypoints/cli/benchmark/base.py +25 -0
  189. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  190. vllm/entrypoints/cli/benchmark/main.py +58 -0
  191. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  192. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  193. vllm/entrypoints/cli/collect_env.py +36 -0
  194. vllm/entrypoints/cli/main.py +71 -0
  195. vllm/entrypoints/cli/openai.py +201 -0
  196. vllm/entrypoints/cli/run_batch.py +69 -0
  197. vllm/entrypoints/cli/serve.py +265 -0
  198. vllm/entrypoints/cli/types.py +29 -0
  199. vllm/entrypoints/launcher.py +147 -0
  200. vllm/entrypoints/llm.py +1599 -0
  201. vllm/entrypoints/logger.py +50 -0
  202. vllm/entrypoints/openai/__init__.py +0 -0
  203. vllm/entrypoints/openai/api_server.py +1495 -0
  204. vllm/entrypoints/openai/cli_args.py +331 -0
  205. vllm/entrypoints/openai/logits_processors.py +90 -0
  206. vllm/entrypoints/openai/protocol.py +2096 -0
  207. vllm/entrypoints/openai/run_batch.py +473 -0
  208. vllm/entrypoints/openai/serving_chat.py +1258 -0
  209. vllm/entrypoints/openai/serving_classification.py +160 -0
  210. vllm/entrypoints/openai/serving_completion.py +618 -0
  211. vllm/entrypoints/openai/serving_embedding.py +201 -0
  212. vllm/entrypoints/openai/serving_engine.py +988 -0
  213. vllm/entrypoints/openai/serving_models.py +315 -0
  214. vllm/entrypoints/openai/serving_pooling.py +234 -0
  215. vllm/entrypoints/openai/serving_score.py +431 -0
  216. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  217. vllm/entrypoints/openai/serving_transcription.py +132 -0
  218. vllm/entrypoints/openai/speech_to_text.py +395 -0
  219. vllm/entrypoints/openai/tool_parsers/__init__.py +25 -0
  220. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  221. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  222. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  223. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  224. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  225. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  226. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  227. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  228. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  229. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +369 -0
  230. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  231. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  232. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  233. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  234. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +466 -0
  235. vllm/entrypoints/score_utils.py +50 -0
  236. vllm/entrypoints/ssl.py +75 -0
  237. vllm/entrypoints/utils.py +262 -0
  238. vllm/env_override.py +41 -0
  239. vllm/envs.py +1029 -0
  240. vllm/executor/__init__.py +0 -0
  241. vllm/executor/executor_base.py +401 -0
  242. vllm/executor/mp_distributed_executor.py +244 -0
  243. vllm/executor/msgspec_utils.py +30 -0
  244. vllm/executor/multiproc_worker_utils.py +313 -0
  245. vllm/executor/ray_distributed_executor.py +701 -0
  246. vllm/executor/ray_utils.py +399 -0
  247. vllm/executor/uniproc_executor.py +139 -0
  248. vllm/forward_context.py +185 -0
  249. vllm/inputs/__init__.py +41 -0
  250. vllm/inputs/data.py +331 -0
  251. vllm/inputs/parse.py +151 -0
  252. vllm/inputs/preprocess.py +924 -0
  253. vllm/inputs/registry.py +245 -0
  254. vllm/jsontree.py +80 -0
  255. vllm/logger.py +212 -0
  256. vllm/logging_utils/__init__.py +8 -0
  257. vllm/logging_utils/dump_input.py +81 -0
  258. vllm/logging_utils/formatter.py +18 -0
  259. vllm/logits_process.py +119 -0
  260. vllm/lora/__init__.py +0 -0
  261. vllm/lora/fully_sharded_layers.py +355 -0
  262. vllm/lora/layers.py +1285 -0
  263. vllm/lora/lora.py +199 -0
  264. vllm/lora/models.py +818 -0
  265. vllm/lora/ops/__init__.py +0 -0
  266. vllm/lora/ops/torch_ops/__init__.py +16 -0
  267. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  268. vllm/lora/ops/triton_ops/__init__.py +12 -0
  269. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  270. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  271. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  272. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  273. vllm/lora/ops/triton_ops/utils.py +120 -0
  274. vllm/lora/ops/xla_ops/__init__.py +7 -0
  275. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  276. vllm/lora/peft_helper.py +136 -0
  277. vllm/lora/punica_wrapper/__init__.py +10 -0
  278. vllm/lora/punica_wrapper/punica_base.py +485 -0
  279. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  280. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  281. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  284. vllm/lora/punica_wrapper/utils.py +164 -0
  285. vllm/lora/request.py +99 -0
  286. vllm/lora/resolver.py +85 -0
  287. vllm/lora/utils.py +240 -0
  288. vllm/lora/worker_manager.py +256 -0
  289. vllm/model_executor/__init__.py +16 -0
  290. vllm/model_executor/custom_op.py +208 -0
  291. vllm/model_executor/guided_decoding/__init__.py +181 -0
  292. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  293. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  294. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  295. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  296. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  297. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  298. vllm/model_executor/guided_decoding/utils.py +242 -0
  299. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  300. vllm/model_executor/layers/__init__.py +0 -0
  301. vllm/model_executor/layers/activation.py +420 -0
  302. vllm/model_executor/layers/fused_moe/__init__.py +78 -0
  303. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +298 -0
  304. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +140 -0
  305. vllm/model_executor/layers/fused_moe/config.py +456 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  475. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +215 -0
  476. vllm/model_executor/layers/fused_moe/cutlass_moe.py +645 -0
  477. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +250 -0
  478. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +231 -0
  479. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +183 -0
  480. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1021 -0
  481. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +234 -0
  482. vllm/model_executor/layers/fused_moe/fused_moe.py +1734 -0
  483. vllm/model_executor/layers/fused_moe/layer.py +1528 -0
  484. vllm/model_executor/layers/fused_moe/modular_kernel.py +598 -0
  485. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +224 -0
  486. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  487. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  488. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  489. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +233 -0
  490. vllm/model_executor/layers/fused_moe/prepare_finalize.py +66 -0
  491. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +429 -0
  492. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +136 -0
  493. vllm/model_executor/layers/fused_moe/utils.py +144 -0
  494. vllm/model_executor/layers/layernorm.py +287 -0
  495. vllm/model_executor/layers/lightning_attn.py +652 -0
  496. vllm/model_executor/layers/linear.py +1547 -0
  497. vllm/model_executor/layers/logits_processor.py +197 -0
  498. vllm/model_executor/layers/mamba/__init__.py +0 -0
  499. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  500. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  501. vllm/model_executor/layers/mamba/mamba_mixer2.py +731 -0
  502. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  503. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  504. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  505. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  506. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  507. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  508. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  509. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  510. vllm/model_executor/layers/pooler.py +473 -0
  511. vllm/model_executor/layers/quantization/__init__.py +160 -0
  512. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  513. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  514. vllm/model_executor/layers/quantization/awq.py +228 -0
  515. vllm/model_executor/layers/quantization/awq_marlin.py +523 -0
  516. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  517. vllm/model_executor/layers/quantization/base_config.py +164 -0
  518. vllm/model_executor/layers/quantization/bitblas.py +462 -0
  519. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  520. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  521. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +694 -0
  522. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1613 -0
  523. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  524. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  525. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  526. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  527. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  528. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +149 -0
  529. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  530. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  531. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  532. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  533. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  534. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  535. vllm/model_executor/layers/quantization/deepgemm.py +83 -0
  536. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  537. vllm/model_executor/layers/quantization/experts_int8.py +204 -0
  538. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  539. vllm/model_executor/layers/quantization/fp8.py +950 -0
  540. vllm/model_executor/layers/quantization/gguf.py +577 -0
  541. vllm/model_executor/layers/quantization/gptq.py +278 -0
  542. vllm/model_executor/layers/quantization/gptq_bitblas.py +446 -0
  543. vllm/model_executor/layers/quantization/gptq_marlin.py +679 -0
  544. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  545. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  546. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  547. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  548. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  549. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  550. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  551. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  552. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  553. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +132 -0
  554. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  555. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  556. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  557. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  558. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  559. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  560. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  561. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  562. vllm/model_executor/layers/quantization/marlin.py +263 -0
  563. vllm/model_executor/layers/quantization/modelopt.py +747 -0
  564. vllm/model_executor/layers/quantization/moe_wna16.py +457 -0
  565. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  566. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  567. vllm/model_executor/layers/quantization/qqq.py +275 -0
  568. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  569. vllm/model_executor/layers/quantization/quark/quark.py +437 -0
  570. vllm/model_executor/layers/quantization/quark/quark_moe.py +245 -0
  571. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  572. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  573. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  574. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +157 -0
  575. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  576. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  577. vllm/model_executor/layers/quantization/rtn.py +289 -0
  578. vllm/model_executor/layers/quantization/schema.py +86 -0
  579. vllm/model_executor/layers/quantization/torchao.py +212 -0
  580. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  581. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  582. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  583. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/fp8_utils.py +653 -0
  787. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  788. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  789. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  790. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  791. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  792. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  793. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  794. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  795. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  796. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  797. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  798. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +146 -0
  799. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  800. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  801. vllm/model_executor/layers/rejection_sampler.py +406 -0
  802. vllm/model_executor/layers/resampler.py +270 -0
  803. vllm/model_executor/layers/rotary_embedding.py +2025 -0
  804. vllm/model_executor/layers/sampler.py +1204 -0
  805. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  806. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  807. vllm/model_executor/layers/utils.py +116 -0
  808. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  809. vllm/model_executor/model_loader/__init__.py +77 -0
  810. vllm/model_executor/model_loader/base_loader.py +43 -0
  811. vllm/model_executor/model_loader/bitsandbytes_loader.py +613 -0
  812. vllm/model_executor/model_loader/default_loader.py +282 -0
  813. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  814. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  815. vllm/model_executor/model_loader/neuron.py +476 -0
  816. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  817. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  818. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  819. vllm/model_executor/model_loader/tensorizer.py +602 -0
  820. vllm/model_executor/model_loader/tensorizer_loader.py +127 -0
  821. vllm/model_executor/model_loader/tpu.py +113 -0
  822. vllm/model_executor/model_loader/utils.py +315 -0
  823. vllm/model_executor/model_loader/weight_utils.py +782 -0
  824. vllm/model_executor/models/__init__.py +30 -0
  825. vllm/model_executor/models/adapters.py +375 -0
  826. vllm/model_executor/models/aimv2.py +246 -0
  827. vllm/model_executor/models/arctic.py +559 -0
  828. vllm/model_executor/models/aria.py +670 -0
  829. vllm/model_executor/models/aya_vision.py +486 -0
  830. vllm/model_executor/models/baichuan.py +474 -0
  831. vllm/model_executor/models/bamba.py +558 -0
  832. vllm/model_executor/models/bart.py +938 -0
  833. vllm/model_executor/models/bert.py +513 -0
  834. vllm/model_executor/models/bert_with_rope.py +617 -0
  835. vllm/model_executor/models/blip.py +339 -0
  836. vllm/model_executor/models/blip2.py +728 -0
  837. vllm/model_executor/models/bloom.py +373 -0
  838. vllm/model_executor/models/chameleon.py +1146 -0
  839. vllm/model_executor/models/chatglm.py +478 -0
  840. vllm/model_executor/models/clip.py +407 -0
  841. vllm/model_executor/models/commandr.py +471 -0
  842. vllm/model_executor/models/config.py +200 -0
  843. vllm/model_executor/models/constant_size_cache.py +137 -0
  844. vllm/model_executor/models/dbrx.py +472 -0
  845. vllm/model_executor/models/deepseek.py +486 -0
  846. vllm/model_executor/models/deepseek_mtp.py +281 -0
  847. vllm/model_executor/models/deepseek_v2.py +935 -0
  848. vllm/model_executor/models/deepseek_vl2.py +660 -0
  849. vllm/model_executor/models/dots1.py +536 -0
  850. vllm/model_executor/models/eagle.py +261 -0
  851. vllm/model_executor/models/ernie45.py +43 -0
  852. vllm/model_executor/models/ernie45_moe.py +583 -0
  853. vllm/model_executor/models/exaone.py +551 -0
  854. vllm/model_executor/models/fairseq2_llama.py +154 -0
  855. vllm/model_executor/models/falcon.py +510 -0
  856. vllm/model_executor/models/falcon_h1.py +708 -0
  857. vllm/model_executor/models/florence2.py +1113 -0
  858. vllm/model_executor/models/fuyu.py +406 -0
  859. vllm/model_executor/models/gemma.py +427 -0
  860. vllm/model_executor/models/gemma2.py +427 -0
  861. vllm/model_executor/models/gemma3.py +535 -0
  862. vllm/model_executor/models/gemma3_mm.py +729 -0
  863. vllm/model_executor/models/gemma3n.py +811 -0
  864. vllm/model_executor/models/glm.py +23 -0
  865. vllm/model_executor/models/glm4.py +305 -0
  866. vllm/model_executor/models/glm4_1v.py +1590 -0
  867. vllm/model_executor/models/glm4v.py +657 -0
  868. vllm/model_executor/models/gpt2.py +382 -0
  869. vllm/model_executor/models/gpt_bigcode.py +335 -0
  870. vllm/model_executor/models/gpt_j.py +339 -0
  871. vllm/model_executor/models/gpt_neox.py +332 -0
  872. vllm/model_executor/models/granite.py +493 -0
  873. vllm/model_executor/models/granite_speech.py +790 -0
  874. vllm/model_executor/models/granitemoe.py +437 -0
  875. vllm/model_executor/models/granitemoehybrid.py +653 -0
  876. vllm/model_executor/models/granitemoeshared.py +341 -0
  877. vllm/model_executor/models/gritlm.py +224 -0
  878. vllm/model_executor/models/grok1.py +546 -0
  879. vllm/model_executor/models/h2ovl.py +549 -0
  880. vllm/model_executor/models/hunyuan_v1_moe.py +897 -0
  881. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  882. vllm/model_executor/models/idefics3.py +786 -0
  883. vllm/model_executor/models/interfaces.py +681 -0
  884. vllm/model_executor/models/interfaces_base.py +164 -0
  885. vllm/model_executor/models/intern_vit.py +480 -0
  886. vllm/model_executor/models/internlm2.py +455 -0
  887. vllm/model_executor/models/internlm2_ve.py +147 -0
  888. vllm/model_executor/models/internvl.py +1432 -0
  889. vllm/model_executor/models/jais.py +373 -0
  890. vllm/model_executor/models/jamba.py +592 -0
  891. vllm/model_executor/models/keye.py +1736 -0
  892. vllm/model_executor/models/kimi_vl.py +585 -0
  893. vllm/model_executor/models/llama.py +644 -0
  894. vllm/model_executor/models/llama4.py +531 -0
  895. vllm/model_executor/models/llama_eagle.py +165 -0
  896. vllm/model_executor/models/llama_eagle3.py +263 -0
  897. vllm/model_executor/models/llava.py +887 -0
  898. vllm/model_executor/models/llava_next.py +604 -0
  899. vllm/model_executor/models/llava_next_video.py +492 -0
  900. vllm/model_executor/models/llava_onevision.py +985 -0
  901. vllm/model_executor/models/mamba.py +273 -0
  902. vllm/model_executor/models/mamba2.py +320 -0
  903. vllm/model_executor/models/mamba_cache.py +76 -0
  904. vllm/model_executor/models/medusa.py +219 -0
  905. vllm/model_executor/models/mimo.py +192 -0
  906. vllm/model_executor/models/mimo_mtp.py +285 -0
  907. vllm/model_executor/models/minicpm.py +592 -0
  908. vllm/model_executor/models/minicpm3.py +230 -0
  909. vllm/model_executor/models/minicpm_eagle.py +391 -0
  910. vllm/model_executor/models/minicpmo.py +772 -0
  911. vllm/model_executor/models/minicpmv.py +1307 -0
  912. vllm/model_executor/models/minimax_cache.py +36 -0
  913. vllm/model_executor/models/minimax_text_01.py +1301 -0
  914. vllm/model_executor/models/minimax_vl_01.py +374 -0
  915. vllm/model_executor/models/mistral3.py +624 -0
  916. vllm/model_executor/models/mixtral.py +488 -0
  917. vllm/model_executor/models/mixtral_quant.py +453 -0
  918. vllm/model_executor/models/mllama.py +1682 -0
  919. vllm/model_executor/models/mllama4.py +947 -0
  920. vllm/model_executor/models/mlp_speculator.py +206 -0
  921. vllm/model_executor/models/modernbert.py +339 -0
  922. vllm/model_executor/models/module_mapping.py +72 -0
  923. vllm/model_executor/models/molmo.py +1576 -0
  924. vllm/model_executor/models/moonvit.py +630 -0
  925. vllm/model_executor/models/mpt.py +331 -0
  926. vllm/model_executor/models/nemotron.py +508 -0
  927. vllm/model_executor/models/nemotron_h.py +588 -0
  928. vllm/model_executor/models/nemotron_nas.py +484 -0
  929. vllm/model_executor/models/nvlm_d.py +216 -0
  930. vllm/model_executor/models/olmo.py +389 -0
  931. vllm/model_executor/models/olmo2.py +414 -0
  932. vllm/model_executor/models/olmoe.py +468 -0
  933. vllm/model_executor/models/opt.py +412 -0
  934. vllm/model_executor/models/orion.py +349 -0
  935. vllm/model_executor/models/ovis.py +577 -0
  936. vllm/model_executor/models/paligemma.py +419 -0
  937. vllm/model_executor/models/persimmon.py +344 -0
  938. vllm/model_executor/models/phi.py +356 -0
  939. vllm/model_executor/models/phi3.py +19 -0
  940. vllm/model_executor/models/phi3_small.py +465 -0
  941. vllm/model_executor/models/phi3v.py +733 -0
  942. vllm/model_executor/models/phi4mm.py +1258 -0
  943. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  944. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  945. vllm/model_executor/models/phimoe.py +674 -0
  946. vllm/model_executor/models/pixtral.py +1329 -0
  947. vllm/model_executor/models/plamo2.py +738 -0
  948. vllm/model_executor/models/prithvi_geospatial_mae.py +240 -0
  949. vllm/model_executor/models/qwen.py +362 -0
  950. vllm/model_executor/models/qwen2.py +501 -0
  951. vllm/model_executor/models/qwen2_5_omni_thinker.py +923 -0
  952. vllm/model_executor/models/qwen2_5_vl.py +1175 -0
  953. vllm/model_executor/models/qwen2_audio.py +420 -0
  954. vllm/model_executor/models/qwen2_moe.py +540 -0
  955. vllm/model_executor/models/qwen2_rm.py +122 -0
  956. vllm/model_executor/models/qwen2_vl.py +1513 -0
  957. vllm/model_executor/models/qwen3.py +325 -0
  958. vllm/model_executor/models/qwen3_moe.py +541 -0
  959. vllm/model_executor/models/qwen_vl.py +796 -0
  960. vllm/model_executor/models/registry.py +634 -0
  961. vllm/model_executor/models/roberta.py +271 -0
  962. vllm/model_executor/models/siglip.py +524 -0
  963. vllm/model_executor/models/skyworkr1v.py +961 -0
  964. vllm/model_executor/models/smolvlm.py +52 -0
  965. vllm/model_executor/models/solar.py +506 -0
  966. vllm/model_executor/models/stablelm.py +343 -0
  967. vllm/model_executor/models/starcoder2.py +356 -0
  968. vllm/model_executor/models/tarsier.py +652 -0
  969. vllm/model_executor/models/telechat2.py +140 -0
  970. vllm/model_executor/models/teleflm.py +79 -0
  971. vllm/model_executor/models/transformers.py +509 -0
  972. vllm/model_executor/models/ultravox.py +670 -0
  973. vllm/model_executor/models/utils.py +744 -0
  974. vllm/model_executor/models/vision.py +147 -0
  975. vllm/model_executor/models/whisper.py +886 -0
  976. vllm/model_executor/models/zamba2.py +1036 -0
  977. vllm/model_executor/parameter.py +459 -0
  978. vllm/model_executor/pooling_metadata.py +72 -0
  979. vllm/model_executor/sampling_metadata.py +597 -0
  980. vllm/model_executor/utils.py +80 -0
  981. vllm/multimodal/__init__.py +33 -0
  982. vllm/multimodal/audio.py +116 -0
  983. vllm/multimodal/base.py +219 -0
  984. vllm/multimodal/hasher.py +91 -0
  985. vllm/multimodal/image.py +103 -0
  986. vllm/multimodal/inputs.py +878 -0
  987. vllm/multimodal/parse.py +499 -0
  988. vllm/multimodal/processing.py +1948 -0
  989. vllm/multimodal/profiling.py +283 -0
  990. vllm/multimodal/registry.py +331 -0
  991. vllm/multimodal/utils.py +492 -0
  992. vllm/multimodal/video.py +227 -0
  993. vllm/outputs.py +516 -0
  994. vllm/platforms/__init__.py +291 -0
  995. vllm/platforms/cpu.py +281 -0
  996. vllm/platforms/cuda.py +568 -0
  997. vllm/platforms/hpu.py +106 -0
  998. vllm/platforms/interface.py +551 -0
  999. vllm/platforms/neuron.py +150 -0
  1000. vllm/platforms/rocm.py +453 -0
  1001. vllm/platforms/tpu.py +206 -0
  1002. vllm/platforms/xpu.py +192 -0
  1003. vllm/plugins/__init__.py +94 -0
  1004. vllm/plugins/lora_resolvers/README.md +15 -0
  1005. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1006. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1007. vllm/pooling_params.py +64 -0
  1008. vllm/profiler/__init__.py +0 -0
  1009. vllm/profiler/layerwise_profile.py +375 -0
  1010. vllm/profiler/utils.py +148 -0
  1011. vllm/prompt_adapter/__init__.py +0 -0
  1012. vllm/prompt_adapter/layers.py +83 -0
  1013. vllm/prompt_adapter/models.py +358 -0
  1014. vllm/prompt_adapter/request.py +37 -0
  1015. vllm/prompt_adapter/utils.py +98 -0
  1016. vllm/prompt_adapter/worker_manager.py +179 -0
  1017. vllm/py.typed +2 -0
  1018. vllm/reasoning/__init__.py +15 -0
  1019. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  1020. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1021. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1022. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1023. vllm/sampling_params.py +602 -0
  1024. vllm/scalar_type.py +347 -0
  1025. vllm/scripts.py +15 -0
  1026. vllm/sequence.py +1568 -0
  1027. vllm/spec_decode/__init__.py +0 -0
  1028. vllm/spec_decode/batch_expansion.py +506 -0
  1029. vllm/spec_decode/draft_model_runner.py +349 -0
  1030. vllm/spec_decode/interfaces.py +99 -0
  1031. vllm/spec_decode/medusa_worker.py +138 -0
  1032. vllm/spec_decode/metrics.py +213 -0
  1033. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1034. vllm/spec_decode/mqa_scorer.py +160 -0
  1035. vllm/spec_decode/multi_step_worker.py +423 -0
  1036. vllm/spec_decode/ngram_worker.py +196 -0
  1037. vllm/spec_decode/proposer_worker_base.py +59 -0
  1038. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1039. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1040. vllm/spec_decode/target_model_runner.py +45 -0
  1041. vllm/spec_decode/top1_proposer.py +275 -0
  1042. vllm/spec_decode/util.py +277 -0
  1043. vllm/test_utils.py +130 -0
  1044. vllm/third_party/__init__.py +0 -0
  1045. vllm/third_party/pynvml.py +6140 -0
  1046. vllm/tracing.py +131 -0
  1047. vllm/transformers_utils/__init__.py +24 -0
  1048. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1049. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1050. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1051. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1052. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1053. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1054. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1055. vllm/transformers_utils/config.py +922 -0
  1056. vllm/transformers_utils/configs/__init__.py +57 -0
  1057. vllm/transformers_utils/configs/arctic.py +207 -0
  1058. vllm/transformers_utils/configs/chatglm.py +72 -0
  1059. vllm/transformers_utils/configs/cohere2.py +195 -0
  1060. vllm/transformers_utils/configs/dbrx.py +280 -0
  1061. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1062. vllm/transformers_utils/configs/eagle.py +85 -0
  1063. vllm/transformers_utils/configs/exaone.py +190 -0
  1064. vllm/transformers_utils/configs/falcon.py +90 -0
  1065. vllm/transformers_utils/configs/jais.py +238 -0
  1066. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1067. vllm/transformers_utils/configs/medusa.py +63 -0
  1068. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1069. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1070. vllm/transformers_utils/configs/mllama.py +31 -0
  1071. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1072. vllm/transformers_utils/configs/moonvit.py +33 -0
  1073. vllm/transformers_utils/configs/mpt.py +180 -0
  1074. vllm/transformers_utils/configs/nemotron.py +205 -0
  1075. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1076. vllm/transformers_utils/configs/nvlm_d.py +31 -0
  1077. vllm/transformers_utils/configs/ovis.py +184 -0
  1078. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1079. vllm/transformers_utils/configs/solar.py +247 -0
  1080. vllm/transformers_utils/configs/telechat2.py +64 -0
  1081. vllm/transformers_utils/configs/ultravox.py +108 -0
  1082. vllm/transformers_utils/detokenizer.py +168 -0
  1083. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1084. vllm/transformers_utils/processor.py +221 -0
  1085. vllm/transformers_utils/processors/__init__.py +8 -0
  1086. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1087. vllm/transformers_utils/processors/ovis.py +420 -0
  1088. vllm/transformers_utils/s3_utils.py +162 -0
  1089. vllm/transformers_utils/tokenizer.py +302 -0
  1090. vllm/transformers_utils/tokenizer_base.py +149 -0
  1091. vllm/transformers_utils/tokenizer_group.py +120 -0
  1092. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1093. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1094. vllm/transformers_utils/utils.py +99 -0
  1095. vllm/triton_utils/__init__.py +14 -0
  1096. vllm/triton_utils/importing.py +94 -0
  1097. vllm/usage/__init__.py +0 -0
  1098. vllm/usage/usage_lib.py +259 -0
  1099. vllm/utils/__init__.py +3008 -0
  1100. vllm/v1/__init__.py +0 -0
  1101. vllm/v1/attention/__init__.py +0 -0
  1102. vllm/v1/attention/backends/__init__.py +0 -0
  1103. vllm/v1/attention/backends/cpu_attn.py +184 -0
  1104. vllm/v1/attention/backends/flash_attn.py +757 -0
  1105. vllm/v1/attention/backends/flashinfer.py +680 -0
  1106. vllm/v1/attention/backends/flex_attention.py +491 -0
  1107. vllm/v1/attention/backends/mamba_attn.py +192 -0
  1108. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1109. vllm/v1/attention/backends/mla/common.py +978 -0
  1110. vllm/v1/attention/backends/mla/cutlass_mla.py +98 -0
  1111. vllm/v1/attention/backends/mla/flashmla.py +180 -0
  1112. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +241 -0
  1113. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1114. vllm/v1/attention/backends/pallas.py +320 -0
  1115. vllm/v1/attention/backends/rocm_aiter_fa.py +609 -0
  1116. vllm/v1/attention/backends/triton_attn.py +449 -0
  1117. vllm/v1/attention/backends/utils.py +310 -0
  1118. vllm/v1/core/__init__.py +0 -0
  1119. vllm/v1/core/block_pool.py +349 -0
  1120. vllm/v1/core/encoder_cache_manager.py +254 -0
  1121. vllm/v1/core/kv_cache_coordinator.py +369 -0
  1122. vllm/v1/core/kv_cache_manager.py +398 -0
  1123. vllm/v1/core/kv_cache_utils.py +999 -0
  1124. vllm/v1/core/sched/__init__.py +0 -0
  1125. vllm/v1/core/sched/interface.py +150 -0
  1126. vllm/v1/core/sched/output.py +157 -0
  1127. vllm/v1/core/sched/request_queue.py +224 -0
  1128. vllm/v1/core/sched/scheduler.py +1115 -0
  1129. vllm/v1/core/sched/utils.py +36 -0
  1130. vllm/v1/core/single_type_kv_cache_manager.py +444 -0
  1131. vllm/v1/engine/__init__.py +179 -0
  1132. vllm/v1/engine/async_llm.py +626 -0
  1133. vllm/v1/engine/coordinator.py +278 -0
  1134. vllm/v1/engine/core.py +1046 -0
  1135. vllm/v1/engine/core_client.py +1049 -0
  1136. vllm/v1/engine/detokenizer.py +292 -0
  1137. vllm/v1/engine/exceptions.py +17 -0
  1138. vllm/v1/engine/llm_engine.py +322 -0
  1139. vllm/v1/engine/logprobs.py +200 -0
  1140. vllm/v1/engine/mm_input_cache.py +91 -0
  1141. vllm/v1/engine/output_processor.py +477 -0
  1142. vllm/v1/engine/parallel_sampling.py +133 -0
  1143. vllm/v1/engine/processor.py +422 -0
  1144. vllm/v1/engine/utils.py +546 -0
  1145. vllm/v1/executor/__init__.py +0 -0
  1146. vllm/v1/executor/abstract.py +113 -0
  1147. vllm/v1/executor/multiproc_executor.py +532 -0
  1148. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1149. vllm/v1/kv_cache_interface.py +223 -0
  1150. vllm/v1/metrics/__init__.py +0 -0
  1151. vllm/v1/metrics/loggers.py +557 -0
  1152. vllm/v1/metrics/prometheus.py +82 -0
  1153. vllm/v1/metrics/ray_wrappers.py +131 -0
  1154. vllm/v1/metrics/reader.py +246 -0
  1155. vllm/v1/metrics/stats.py +240 -0
  1156. vllm/v1/outputs.py +124 -0
  1157. vllm/v1/pool/__init__.py +0 -0
  1158. vllm/v1/pool/metadata.py +17 -0
  1159. vllm/v1/request.py +229 -0
  1160. vllm/v1/sample/__init__.py +0 -0
  1161. vllm/v1/sample/logits_processor.py +517 -0
  1162. vllm/v1/sample/metadata.py +43 -0
  1163. vllm/v1/sample/ops/__init__.py +0 -0
  1164. vllm/v1/sample/ops/bad_words.py +39 -0
  1165. vllm/v1/sample/ops/penalties.py +43 -0
  1166. vllm/v1/sample/ops/topk_topp_sampler.py +296 -0
  1167. vllm/v1/sample/rejection_sampler.py +631 -0
  1168. vllm/v1/sample/sampler.py +226 -0
  1169. vllm/v1/sample/tpu/__init__.py +0 -0
  1170. vllm/v1/sample/tpu/metadata.py +124 -0
  1171. vllm/v1/sample/tpu/sampler.py +145 -0
  1172. vllm/v1/serial_utils.py +315 -0
  1173. vllm/v1/spec_decode/__init__.py +0 -0
  1174. vllm/v1/spec_decode/eagle.py +441 -0
  1175. vllm/v1/spec_decode/medusa.py +64 -0
  1176. vllm/v1/spec_decode/metadata.py +62 -0
  1177. vllm/v1/spec_decode/metrics.py +178 -0
  1178. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1179. vllm/v1/spec_decode/utils.py +41 -0
  1180. vllm/v1/structured_output/__init__.py +227 -0
  1181. vllm/v1/structured_output/backend_guidance.py +245 -0
  1182. vllm/v1/structured_output/backend_types.py +134 -0
  1183. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1184. vllm/v1/structured_output/request.py +86 -0
  1185. vllm/v1/structured_output/utils.py +175 -0
  1186. vllm/v1/utils.py +377 -0
  1187. vllm/v1/worker/__init__.py +0 -0
  1188. vllm/v1/worker/block_table.py +142 -0
  1189. vllm/v1/worker/cpu_model_runner.py +91 -0
  1190. vllm/v1/worker/cpu_worker.py +153 -0
  1191. vllm/v1/worker/gpu_input_batch.py +757 -0
  1192. vllm/v1/worker/gpu_model_runner.py +2739 -0
  1193. vllm/v1/worker/gpu_worker.py +408 -0
  1194. vllm/v1/worker/lora_model_runner_mixin.py +177 -0
  1195. vllm/v1/worker/tpu_input_batch.py +585 -0
  1196. vllm/v1/worker/tpu_model_runner.py +1849 -0
  1197. vllm/v1/worker/tpu_worker.py +315 -0
  1198. vllm/v1/worker/utils.py +112 -0
  1199. vllm/v1/worker/worker_base.py +65 -0
  1200. vllm/v1/worker/xpu_model_runner.py +33 -0
  1201. vllm/v1/worker/xpu_worker.py +165 -0
  1202. vllm/version.py +41 -0
  1203. vllm/vllm_flash_attn/.gitkeep +0 -0
  1204. vllm/worker/__init__.py +0 -0
  1205. vllm/worker/cache_engine.py +145 -0
  1206. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1207. vllm/worker/cpu_model_runner.py +671 -0
  1208. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1209. vllm/worker/cpu_worker.py +452 -0
  1210. vllm/worker/enc_dec_model_runner.py +555 -0
  1211. vllm/worker/hpu_model_runner.py +2320 -0
  1212. vllm/worker/hpu_worker.py +484 -0
  1213. vllm/worker/model_runner.py +2178 -0
  1214. vllm/worker/model_runner_base.py +282 -0
  1215. vllm/worker/multi_step_hpu_worker.py +123 -0
  1216. vllm/worker/multi_step_model_runner.py +911 -0
  1217. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1218. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1219. vllm/worker/multi_step_tpu_worker.py +108 -0
  1220. vllm/worker/multi_step_worker.py +197 -0
  1221. vllm/worker/neuron_model_runner.py +460 -0
  1222. vllm/worker/neuron_worker.py +193 -0
  1223. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1224. vllm/worker/pooling_model_runner.py +211 -0
  1225. vllm/worker/tpu_model_runner.py +909 -0
  1226. vllm/worker/tpu_worker.py +337 -0
  1227. vllm/worker/utils.py +53 -0
  1228. vllm/worker/worker.py +577 -0
  1229. vllm/worker/worker_base.py +646 -0
  1230. vllm/worker/xpu_model_runner.py +606 -0
  1231. vllm/worker/xpu_worker.py +186 -0
  1232. vllm_cpu-0.9.2.post2.dist-info/METADATA +339 -0
  1233. vllm_cpu-0.9.2.post2.dist-info/RECORD +1236 -0
  1234. vllm_cpu-0.9.2.post2.dist-info/WHEEL +5 -0
  1235. vllm_cpu-0.9.2.post2.dist-info/entry_points.txt +5 -0
  1236. vllm_cpu-0.9.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1599 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import itertools
5
+ import warnings
6
+ from collections.abc import Sequence
7
+ from contextlib import contextmanager
8
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Optional, Union,
9
+ cast, overload)
10
+
11
+ import cloudpickle
12
+ import torch.nn as nn
13
+ from pydantic import ValidationError
14
+ from tqdm.auto import tqdm
15
+ from typing_extensions import TypeVar, deprecated
16
+
17
+ from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
18
+ BeamSearchSequence,
19
+ create_sort_beams_key_function)
20
+ from vllm.config import (CompilationConfig, ModelDType, TokenizerMode,
21
+ is_init_field)
22
+ from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig,
23
+ TaskOption)
24
+ from vllm.engine.llm_engine import LLMEngine
25
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
26
+ ChatTemplateContentFormatOption,
27
+ apply_hf_chat_template,
28
+ apply_mistral_chat_template,
29
+ parse_chat_messages,
30
+ resolve_chat_template_content_format)
31
+ from vllm.entrypoints.score_utils import (_cosine_similarity,
32
+ _validate_score_input_lens)
33
+ from vllm.entrypoints.utils import _validate_truncation_size
34
+ from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt
35
+ from vllm.inputs.parse import parse_and_batch_prompt
36
+ from vllm.logger import init_logger
37
+ from vllm.lora.request import LoRARequest
38
+ from vllm.model_executor.guided_decoding.guided_fields import (
39
+ GuidedDecodingRequest, LLMGuidedOptions)
40
+ from vllm.model_executor.layers.quantization import QuantizationMethods
41
+ from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
42
+ PoolingRequestOutput, RequestOutput,
43
+ ScoringRequestOutput)
44
+ from vllm.pooling_params import PoolingParams
45
+ from vllm.prompt_adapter.request import PromptAdapterRequest
46
+ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
47
+ RequestOutputKind, SamplingParams)
48
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
49
+ get_cached_tokenizer)
50
+ from vllm.usage.usage_lib import UsageContext
51
+ from vllm.utils import Counter, Device, deprecate_kwargs, is_list_of
52
+
53
+ if TYPE_CHECKING:
54
+ from vllm.v1.metrics.reader import Metric
55
+
56
+ logger = init_logger(__name__)
57
+
58
+ _R = TypeVar("_R", default=Any)
59
+
60
+
61
+ class LLM:
62
+ """An LLM for generating texts from given prompts and sampling parameters.
63
+
64
+ This class includes a tokenizer, a language model (possibly distributed
65
+ across multiple GPUs), and GPU memory space allocated for intermediate
66
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
67
+ this class generates texts from the model, using an intelligent batching
68
+ mechanism and efficient memory management.
69
+
70
+ Args:
71
+ model: The name or path of a HuggingFace Transformers model.
72
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
73
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
74
+ if available, and "slow" will always use the slow tokenizer.
75
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
76
+ detokenizer. Expect valid prompt_token_ids and None for prompt
77
+ from the input.
78
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
79
+ downloading the model and tokenizer.
80
+ allowed_local_media_path: Allowing API requests to read local images
81
+ or videos from directories specified by the server file system.
82
+ This is a security risk. Should only be enabled in trusted
83
+ environments.
84
+ tensor_parallel_size: The number of GPUs to use for distributed
85
+ execution with tensor parallelism.
86
+ dtype: The data type for the model weights and activations. Currently,
87
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
88
+ the `torch_dtype` attribute specified in the model config file.
89
+ However, if the `torch_dtype` in the config is `float32`, we will
90
+ use `float16` instead.
91
+ quantization: The method used to quantize the model weights. Currently,
92
+ we support "awq", "gptq", and "fp8" (experimental).
93
+ If None, we first check the `quantization_config` attribute in the
94
+ model config file. If that is None, we assume the model weights are
95
+ not quantized and use `dtype` to determine the data type of
96
+ the weights.
97
+ revision: The specific model version to use. It can be a branch name,
98
+ a tag name, or a commit id.
99
+ tokenizer_revision: The specific tokenizer version to use. It can be a
100
+ branch name, a tag name, or a commit id.
101
+ seed: The seed to initialize the random number generator for sampling.
102
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
103
+ reserve for the model weights, activations, and KV cache. Higher
104
+ values will increase the KV cache size and thus improve the model's
105
+ throughput. However, if the value is too high, it may cause out-of-
106
+ memory (OOM) errors.
107
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
108
+ This can be used for temporarily storing the states of the requests
109
+ when their `best_of` sampling parameters are larger than 1. If all
110
+ requests will have `best_of=1`, you can safely set this to 0.
111
+ Noting that `best_of` is only supported in V0. Otherwise, too small
112
+ values may cause out-of-memory (OOM) errors.
113
+ cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
114
+ the model weights. This virtually increases the GPU memory space
115
+ you can use to hold the model weights, at the cost of CPU-GPU data
116
+ transfer for every forward pass.
117
+ enforce_eager: Whether to enforce eager execution. If True, we will
118
+ disable CUDA graph and always execute the model in eager mode.
119
+ If False, we will use CUDA graph and eager execution in hybrid.
120
+ max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
121
+ When a sequence has context length larger than this, we fall back
122
+ to eager mode. Additionally for encoder-decoder models, if the
123
+ sequence length of the encoder input is larger than this, we fall
124
+ back to the eager mode.
125
+ disable_custom_all_reduce: See
126
+ [ParallelConfig][vllm.config.ParallelConfig].
127
+ disable_async_output_proc: Disable async output processing.
128
+ This may result in lower performance.
129
+ hf_token: The token to use as HTTP bearer authorization for remote files
130
+ . If `True`, will use the token generated when running
131
+ `huggingface-cli login` (stored in `~/.huggingface`).
132
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
133
+ HuggingFace config. If a callable, it is called to update the
134
+ HuggingFace config.
135
+ mm_processor_kwargs: Arguments to be forwarded to the model's processor
136
+ for multi-modal data, e.g., image processor. Overrides for the
137
+ multi-modal processor obtained from `AutoProcessor.from_pretrained`.
138
+ The available overrides depend on the model that is being run.
139
+ For example, for Phi-3-Vision: `{"num_crops": 4}`.
140
+ override_pooler_config: Initialize non-default pooling config or
141
+ override default pooling config for the pooling model.
142
+ e.g. `PoolerConfig(pooling_type="mean", normalize=False)`.
143
+ compilation_config: Either an integer or a dictionary. If it is an
144
+ integer, it is used as the level of compilation optimization. If it
145
+ is a dictionary, it can specify the full compilation configuration.
146
+ **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].
147
+
148
+ Note:
149
+ This class is intended to be used for offline inference. For online
150
+ serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
151
+ """
152
+
153
+ DEPRECATE_LEGACY: ClassVar[bool] = True
154
+ """A flag to toggle whether to deprecate the legacy generate/encode API."""
155
+
156
+ @classmethod
157
+ @contextmanager
158
+ def deprecate_legacy_api(cls):
159
+ cls.DEPRECATE_LEGACY = True
160
+
161
+ yield
162
+
163
+ cls.DEPRECATE_LEGACY = False
164
+
165
+ def __init__(
166
+ self,
167
+ model: str,
168
+ *,
169
+ task: TaskOption = "auto",
170
+ tokenizer: Optional[str] = None,
171
+ tokenizer_mode: TokenizerMode = "auto",
172
+ skip_tokenizer_init: bool = False,
173
+ trust_remote_code: bool = False,
174
+ allowed_local_media_path: str = "",
175
+ tensor_parallel_size: int = 1,
176
+ dtype: ModelDType = "auto",
177
+ quantization: Optional[QuantizationMethods] = None,
178
+ revision: Optional[str] = None,
179
+ tokenizer_revision: Optional[str] = None,
180
+ seed: Optional[int] = None,
181
+ gpu_memory_utilization: float = 0.9,
182
+ swap_space: float = 4,
183
+ cpu_offload_gb: float = 0,
184
+ enforce_eager: bool = False,
185
+ max_seq_len_to_capture: int = 8192,
186
+ disable_custom_all_reduce: bool = False,
187
+ disable_async_output_proc: bool = False,
188
+ hf_token: Optional[Union[bool, str]] = None,
189
+ hf_overrides: Optional[HfOverrides] = None,
190
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
191
+ override_pooler_config: Optional[PoolerConfig] = None,
192
+ compilation_config: Optional[Union[int, dict[str, Any],
193
+ CompilationConfig]] = None,
194
+ **kwargs,
195
+ ) -> None:
196
+ """LLM constructor."""
197
+
198
+ if "disable_log_stats" not in kwargs:
199
+ kwargs["disable_log_stats"] = True
200
+
201
+ if "worker_cls" in kwargs:
202
+ worker_cls = kwargs["worker_cls"]
203
+ # if the worker_cls is not qualified string name,
204
+ # we serialize it using cloudpickle to avoid pickling issues
205
+ if isinstance(worker_cls, type):
206
+ kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
207
+
208
+ if "kv_transfer_config" in kwargs and isinstance(
209
+ kwargs["kv_transfer_config"], dict):
210
+ from vllm.config import KVTransferConfig
211
+ raw_config_dict = kwargs["kv_transfer_config"]
212
+ try:
213
+ kwargs["kv_transfer_config"] = KVTransferConfig(
214
+ **raw_config_dict)
215
+ except ValidationError as e:
216
+ logger.error(
217
+ "Failed to convert 'kv_transfer_config' dict to "
218
+ "KVTransferConfig object. Dict: %s. Error: %s",
219
+ raw_config_dict, e)
220
+ # Consider re-raising a more specific vLLM error or ValueError
221
+ # to provide better context to the user.
222
+ raise ValueError(
223
+ f"Invalid 'kv_transfer_config' provided: {e}") from e
224
+
225
+ if hf_overrides is None:
226
+ hf_overrides = {}
227
+
228
+ if compilation_config is not None:
229
+ if isinstance(compilation_config, int):
230
+ compilation_config_instance = CompilationConfig(
231
+ level=compilation_config)
232
+ elif isinstance(compilation_config, dict):
233
+ predicate = lambda x: is_init_field(CompilationConfig, x[0])
234
+ compilation_config_instance = CompilationConfig(
235
+ **dict(filter(predicate, compilation_config.items())))
236
+ else:
237
+ compilation_config_instance = compilation_config
238
+ else:
239
+ compilation_config_instance = CompilationConfig()
240
+
241
+ engine_args = EngineArgs(
242
+ model=model,
243
+ task=task,
244
+ tokenizer=tokenizer,
245
+ tokenizer_mode=tokenizer_mode,
246
+ skip_tokenizer_init=skip_tokenizer_init,
247
+ trust_remote_code=trust_remote_code,
248
+ allowed_local_media_path=allowed_local_media_path,
249
+ tensor_parallel_size=tensor_parallel_size,
250
+ dtype=dtype,
251
+ quantization=quantization,
252
+ revision=revision,
253
+ tokenizer_revision=tokenizer_revision,
254
+ seed=seed,
255
+ gpu_memory_utilization=gpu_memory_utilization,
256
+ swap_space=swap_space,
257
+ cpu_offload_gb=cpu_offload_gb,
258
+ enforce_eager=enforce_eager,
259
+ max_seq_len_to_capture=max_seq_len_to_capture,
260
+ disable_custom_all_reduce=disable_custom_all_reduce,
261
+ disable_async_output_proc=disable_async_output_proc,
262
+ hf_token=hf_token,
263
+ hf_overrides=hf_overrides,
264
+ mm_processor_kwargs=mm_processor_kwargs,
265
+ override_pooler_config=override_pooler_config,
266
+ compilation_config=compilation_config_instance,
267
+ **kwargs,
268
+ )
269
+
270
+ # Create the Engine (autoselects V0 vs V1)
271
+ self.llm_engine = LLMEngine.from_engine_args(
272
+ engine_args=engine_args, usage_context=UsageContext.LLM_CLASS)
273
+ self.engine_class = type(self.llm_engine)
274
+
275
+ self.request_counter = Counter()
276
+ self.default_sampling_params: Union[dict[str, Any], None] = None
277
+
278
+ def get_tokenizer(
279
+ self,
280
+ lora_request: Optional[LoRARequest] = None,
281
+ ) -> AnyTokenizer:
282
+ return self.llm_engine.get_tokenizer_group().get_lora_tokenizer(
283
+ lora_request)
284
+
285
+ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
286
+ tokenizer_group = self.llm_engine.get_tokenizer_group()
287
+
288
+ # While CachedTokenizer is dynamic, have no choice but
289
+ # compare class name. Misjudgment will arise from
290
+ # user-defined tokenizer started with 'Cached'
291
+ if tokenizer.__class__.__name__.startswith("Cached"):
292
+ tokenizer_group.tokenizer = tokenizer
293
+ else:
294
+ tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
295
+
296
+ def get_default_sampling_params(self) -> SamplingParams:
297
+ if self.default_sampling_params is None:
298
+ self.default_sampling_params = (
299
+ self.llm_engine.model_config.get_diff_sampling_param())
300
+ if self.default_sampling_params:
301
+ return SamplingParams.from_optional(**self.default_sampling_params)
302
+ return SamplingParams()
303
+
304
+ @overload
305
+ def generate(
306
+ self,
307
+ prompts: Union[PromptType, Sequence[PromptType]],
308
+ /,
309
+ sampling_params: Optional[Union[SamplingParams,
310
+ Sequence[SamplingParams]]] = None,
311
+ *,
312
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
313
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
314
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
315
+ guided_options_request: Optional[Union[LLMGuidedOptions,
316
+ GuidedDecodingRequest]] = None,
317
+ ) -> list[RequestOutput]:
318
+ ...
319
+
320
+ @overload # LEGACY: single (prompt + optional token ids)
321
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
322
+ def generate(
323
+ self,
324
+ prompts: str,
325
+ sampling_params: Optional[Union[SamplingParams,
326
+ list[SamplingParams]]] = None,
327
+ prompt_token_ids: Optional[list[int]] = None,
328
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
329
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
330
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
331
+ guided_options_request: Optional[Union[LLMGuidedOptions,
332
+ GuidedDecodingRequest]] = None,
333
+ ) -> list[RequestOutput]:
334
+ ...
335
+
336
+ @overload # LEGACY: multi (prompt + optional token ids)
337
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
338
+ def generate(
339
+ self,
340
+ prompts: list[str],
341
+ sampling_params: Optional[Union[SamplingParams,
342
+ list[SamplingParams]]] = None,
343
+ prompt_token_ids: Optional[list[list[int]]] = None,
344
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
345
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
346
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
347
+ guided_options_request: Optional[Union[LLMGuidedOptions,
348
+ GuidedDecodingRequest]] = None,
349
+ ) -> list[RequestOutput]:
350
+ ...
351
+
352
+ @overload # LEGACY: single (token ids + optional prompt)
353
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
354
+ def generate(
355
+ self,
356
+ prompts: Optional[str] = None,
357
+ sampling_params: Optional[Union[SamplingParams,
358
+ list[SamplingParams]]] = None,
359
+ *,
360
+ prompt_token_ids: list[int],
361
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
362
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
363
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
364
+ guided_options_request: Optional[Union[LLMGuidedOptions,
365
+ GuidedDecodingRequest]] = None,
366
+ ) -> list[RequestOutput]:
367
+ ...
368
+
369
+ @overload # LEGACY: multi (token ids + optional prompt)
370
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
371
+ def generate(
372
+ self,
373
+ prompts: Optional[list[str]] = None,
374
+ sampling_params: Optional[Union[SamplingParams,
375
+ list[SamplingParams]]] = None,
376
+ *,
377
+ prompt_token_ids: list[list[int]],
378
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
379
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
380
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
381
+ guided_options_request: Optional[Union[LLMGuidedOptions,
382
+ GuidedDecodingRequest]] = None,
383
+ ) -> list[RequestOutput]:
384
+ ...
385
+
386
+ @overload # LEGACY: single or multi token ids [pos-only]
387
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
388
+ def generate(
389
+ self,
390
+ prompts: None,
391
+ sampling_params: None,
392
+ prompt_token_ids: Union[list[int], list[list[int]]],
393
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
394
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
395
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
396
+ guided_options_request: Optional[Union[LLMGuidedOptions,
397
+ GuidedDecodingRequest]] = None,
398
+ ) -> list[RequestOutput]:
399
+ ...
400
+
401
+ @deprecate_kwargs(
402
+ "prompt_token_ids",
403
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
404
+ additional_message="Please use the 'prompts' parameter instead.",
405
+ )
406
+ def generate(
407
+ self,
408
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
409
+ Optional[Union[str, list[str]]]] = None,
410
+ sampling_params: Optional[Union[SamplingParams,
411
+ Sequence[SamplingParams]]] = None,
412
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
413
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
414
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
415
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
416
+ guided_options_request: Optional[Union[LLMGuidedOptions,
417
+ GuidedDecodingRequest]] = None,
418
+ priority: Optional[list[int]] = None,
419
+ ) -> list[RequestOutput]:
420
+ """Generates the completions for the input prompts.
421
+
422
+ This class automatically batches the given prompts, considering
423
+ the memory constraint. For the best performance, put all of your prompts
424
+ into a single list and pass it to this method.
425
+
426
+ Args:
427
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
428
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
429
+ for more details about the format of each prompts.
430
+ sampling_params: The sampling parameters for text generation. If
431
+ None, we use the default sampling parameters.
432
+ When it is a single value, it is applied to every prompt.
433
+ When it is a list, the list must have the same length as the
434
+ prompts and it is paired one by one with the prompt.
435
+ use_tqdm: If `True`, shows a tqdm progress bar.
436
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
437
+ it is used to create the progress bar.
438
+ If `False`, no progress bar is created.
439
+ lora_request: LoRA request to use for generation, if any.
440
+ prompt_adapter_request: Prompt Adapter request to use for
441
+ generation, if any.
442
+ priority: The priority of the requests, if any.
443
+ Only applicable when priority scheduling policy is enabled.
444
+
445
+ Returns:
446
+ A list of `RequestOutput` objects containing the
447
+ generated completions in the same order as the input prompts.
448
+
449
+ Note:
450
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
451
+ considered legacy and may be deprecated in the future. You should
452
+ instead pass them via the `inputs` parameter.
453
+ """
454
+ runner_type = self.llm_engine.model_config.runner_type
455
+ if runner_type not in ["generate", "transcription"]:
456
+ messages = [
457
+ "LLM.generate() is only supported for (conditional) generation "
458
+ "models (XForCausalLM, XForConditionalGeneration).",
459
+ ]
460
+
461
+ supported_runner_types = self.llm_engine.model_config \
462
+ .supported_runner_types
463
+ if "generate" in supported_runner_types:
464
+ messages.append(
465
+ "Your model supports the 'generate' runner, but is "
466
+ f"currently initialized for the '{runner_type}' runner. "
467
+ "Please initialize vLLM using `--task generate`.")
468
+
469
+ raise ValueError(" ".join(messages))
470
+
471
+ if prompt_token_ids is not None:
472
+ parsed_prompts = self._convert_v1_inputs(
473
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
474
+ prompt_token_ids=prompt_token_ids,
475
+ )
476
+ else:
477
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
478
+ prompts)
479
+
480
+ if isinstance(guided_options_request, dict):
481
+ if len(guided_options_request) > 1:
482
+ raise ValueError(
483
+ "You can only use one guided decoding but multiple is "
484
+ f"specified: {guided_options_request}")
485
+ guided_options_request = GuidedDecodingRequest(
486
+ **guided_options_request)
487
+
488
+ if sampling_params is None:
489
+ # Use default sampling params.
490
+ sampling_params = self.get_default_sampling_params()
491
+
492
+ tokenization_kwargs: dict[str, Any] = {}
493
+ truncate_prompt_tokens = None
494
+ if isinstance(sampling_params, SamplingParams):
495
+ truncate_prompt_tokens = sampling_params.truncate_prompt_tokens
496
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
497
+ truncate_prompt_tokens, tokenization_kwargs)
498
+
499
+ self._validate_and_add_requests(
500
+ prompts=parsed_prompts,
501
+ params=sampling_params,
502
+ use_tqdm=use_tqdm,
503
+ lora_request=lora_request,
504
+ prompt_adapter_request=prompt_adapter_request,
505
+ guided_options=guided_options_request,
506
+ tokenization_kwargs=tokenization_kwargs,
507
+ priority=priority,
508
+ )
509
+
510
+ outputs = self._run_engine(use_tqdm=use_tqdm)
511
+ return self.engine_class.validate_outputs(outputs, RequestOutput)
512
+
513
+ def collective_rpc(self,
514
+ method: Union[str, Callable[..., _R]],
515
+ timeout: Optional[float] = None,
516
+ args: tuple = (),
517
+ kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
518
+ """
519
+ Execute an RPC call on all workers.
520
+
521
+ Args:
522
+ method: Name of the worker method to execute, or a callable that
523
+ is serialized and sent to all workers to execute.
524
+
525
+ If the method is a callable, it should accept an additional
526
+ `self` argument, in addition to the arguments passed in `args`
527
+ and `kwargs`. The `self` argument will be the worker object.
528
+ timeout: Maximum time in seconds to wait for execution. Raises a
529
+ [`TimeoutError`][] on timeout. `None` means wait indefinitely.
530
+ args: Positional arguments to pass to the worker method.
531
+ kwargs: Keyword arguments to pass to the worker method.
532
+
533
+ Returns:
534
+ A list containing the results from each worker.
535
+
536
+ Note:
537
+ It is recommended to use this API to only pass control messages,
538
+ and set up data-plane communication to pass data.
539
+ """
540
+
541
+ return self.llm_engine.collective_rpc(method, timeout, args, kwargs)
542
+
543
+ def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
544
+ """
545
+ Run a function directly on the model inside each worker,
546
+ returning the result for each of them.
547
+ """
548
+ executor = self.llm_engine.model_executor
549
+ return executor.apply_model(func)
550
+
551
+ def _get_beam_search_lora_requests(
552
+ self,
553
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]],
554
+ prompts: list[Union[TokensPrompt, TextPrompt]],
555
+ ) -> list[Optional[LoRARequest]]:
556
+ """Get the optional lora request corresponding to each prompt."""
557
+ if isinstance(lora_request,
558
+ Sequence) and len(lora_request) != len(prompts):
559
+ raise ValueError(
560
+ "Lora request list should be the same length as the prompts")
561
+
562
+ if lora_request is None or isinstance(lora_request, LoRARequest):
563
+ return [lora_request] * len(prompts)
564
+
565
+ raise TypeError(f"Invalid lora_request type {type(lora_request)}")
566
+
567
+ def beam_search(
568
+ self,
569
+ prompts: list[Union[TokensPrompt, TextPrompt]],
570
+ params: BeamSearchParams,
571
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
572
+ use_tqdm: bool = False,
573
+ ) -> list[BeamSearchOutput]:
574
+ """
575
+ Generate sequences using beam search.
576
+
577
+ Args:
578
+ prompts: A list of prompts. Each prompt can be a string or a list
579
+ of token IDs.
580
+ params: The beam search parameters.
581
+ lora_request: LoRA request to use for generation, if any.
582
+ use_tqdm: Whether to use tqdm to display the progress bar.
583
+ """
584
+ # TODO: how does beam search work together with length penalty,
585
+ # frequency, penalty, and stopping criteria, etc.?
586
+ beam_width = params.beam_width
587
+ max_tokens = params.max_tokens
588
+ temperature = params.temperature
589
+ ignore_eos = params.ignore_eos
590
+ length_penalty = params.length_penalty
591
+
592
+ lora_requests = self._get_beam_search_lora_requests(
593
+ lora_request, prompts)
594
+
595
+ tokenizer = self.get_tokenizer()
596
+ sort_beams_key = create_sort_beams_key_function(
597
+ tokenizer.eos_token_id,
598
+ length_penalty,
599
+ )
600
+
601
+ def create_tokens_prompt_from_beam(
602
+ beam: BeamSearchSequence) -> TokensPrompt:
603
+ token_prompt_kwargs: TokensPrompt = {
604
+ "prompt_token_ids": beam.tokens
605
+ }
606
+ if beam.multi_modal_data is not None:
607
+ token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data
608
+
609
+ if beam.mm_processor_kwargs is not None:
610
+ token_prompt_kwargs[
611
+ "mm_processor_kwargs"] = beam.mm_processor_kwargs
612
+ return TokensPrompt(**token_prompt_kwargs)
613
+
614
+ # generate 2 * beam_width candidates at each step
615
+ # following the huggingface transformers implementation
616
+ # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
617
+ beam_search_params = SamplingParams(logprobs=2 * beam_width,
618
+ max_tokens=1,
619
+ temperature=temperature)
620
+ instances: list[BeamSearchInstance] = []
621
+
622
+ for lora_req, prompt in zip(lora_requests, prompts):
623
+ # Add multimodal processor kwargs & data
624
+ mm_kwargs = {}
625
+ if "multi_modal_data" in prompt:
626
+ mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"]
627
+ if "mm_processor_kwargs" in prompt:
628
+ mm_kwargs["mm_processor_kwargs"] = prompt[
629
+ "mm_processor_kwargs"]
630
+
631
+ if "prompt_token_ids" in prompt:
632
+ prompt = cast(TokensPrompt, prompt) # Needed for mypy
633
+ prompt_tokens = prompt["prompt_token_ids"]
634
+ else:
635
+ prompt_tokens = tokenizer.encode(prompt["prompt"])
636
+
637
+ instances.append(
638
+ BeamSearchInstance(
639
+ prompt_tokens,
640
+ lora_request=lora_req,
641
+ logprobs=None,
642
+ **mm_kwargs,
643
+ ), )
644
+
645
+ token_iter = range(max_tokens)
646
+ if use_tqdm:
647
+ token_iter = tqdm(token_iter,
648
+ desc="Beam search",
649
+ unit="token",
650
+ unit_scale=False)
651
+ logger.warning(
652
+ "The progress bar shows the upper bound on token steps and "
653
+ "may finish early due to stopping conditions. It does not "
654
+ "reflect instance-level progress.")
655
+
656
+ for _ in token_iter:
657
+ all_beams: list[BeamSearchSequence] = list(
658
+ sum((instance.beams for instance in instances), []))
659
+ pos = [0] + list(
660
+ itertools.accumulate(
661
+ len(instance.beams) for instance in instances))
662
+ instance_start_and_end: list[tuple[int, int]] = list(
663
+ zip(pos[:-1], pos[1:]))
664
+
665
+ if len(all_beams) == 0:
666
+ break
667
+
668
+ # create the corresponding batch entries for prompt & optional lora
669
+ prompts_batch, lora_req_batch = zip(
670
+ *[(create_tokens_prompt_from_beam(beam), beam.lora_request)
671
+ for beam in all_beams])
672
+
673
+ # only runs for one step
674
+ # we don't need to use tqdm here
675
+ output = self.generate(prompts_batch,
676
+ sampling_params=beam_search_params,
677
+ use_tqdm=False,
678
+ lora_request=lora_req_batch)
679
+
680
+ for (start, end), instance in zip(instance_start_and_end,
681
+ instances):
682
+ instance_new_beams = []
683
+ for i in range(start, end):
684
+ current_beam = all_beams[i]
685
+ result = output[i]
686
+
687
+ if result.outputs[0].logprobs is not None:
688
+ # if `result.outputs[0].logprobs` is None, it means
689
+ # the sequence is completed because of the max-model-len
690
+ # or abortion. we don't need to add it to the new beams.
691
+ logprobs = result.outputs[0].logprobs[0]
692
+ for token_id, logprob_obj in logprobs.items():
693
+ new_beam = BeamSearchSequence(
694
+ tokens=current_beam.tokens + [token_id],
695
+ logprobs=current_beam.logprobs + [logprobs],
696
+ lora_request=current_beam.lora_request,
697
+ cum_logprob=current_beam.cum_logprob +
698
+ logprob_obj.logprob,
699
+ multi_modal_data=current_beam.multi_modal_data,
700
+ mm_processor_kwargs=current_beam.
701
+ mm_processor_kwargs)
702
+
703
+ if token_id == tokenizer.eos_token_id and \
704
+ not ignore_eos:
705
+ instance.completed.append(new_beam)
706
+ else:
707
+ instance_new_beams.append(new_beam)
708
+ sorted_beams = sorted(instance_new_beams,
709
+ key=sort_beams_key,
710
+ reverse=True)
711
+ instance.beams = sorted_beams[:beam_width]
712
+
713
+ outputs = []
714
+ for instance in instances:
715
+ instance.completed.extend(instance.beams)
716
+ sorted_completed = sorted(instance.completed,
717
+ key=sort_beams_key,
718
+ reverse=True)
719
+ best_beams = sorted_completed[:beam_width]
720
+
721
+ for beam in best_beams:
722
+ beam.text = tokenizer.decode(beam.tokens)
723
+ outputs.append(BeamSearchOutput(sequences=best_beams))
724
+
725
+ return outputs
726
+
727
+ def chat(
728
+ self,
729
+ messages: Union[list[ChatCompletionMessageParam],
730
+ list[list[ChatCompletionMessageParam]]],
731
+ sampling_params: Optional[Union[SamplingParams,
732
+ list[SamplingParams]]] = None,
733
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
734
+ lora_request: Optional[LoRARequest] = None,
735
+ chat_template: Optional[str] = None,
736
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
737
+ add_generation_prompt: bool = True,
738
+ continue_final_message: bool = False,
739
+ tools: Optional[list[dict[str, Any]]] = None,
740
+ chat_template_kwargs: Optional[dict[str, Any]] = None,
741
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
742
+ ) -> list[RequestOutput]:
743
+ """
744
+ Generate responses for a chat conversation.
745
+
746
+ The chat conversation is converted into a text prompt using the
747
+ tokenizer and calls the [generate][] method to generate the
748
+ responses.
749
+
750
+ Multi-modal inputs can be passed in the same way you would pass them
751
+ to the OpenAI API.
752
+
753
+ Args:
754
+ messages: A list of conversations or a single conversation.
755
+
756
+ - Each conversation is represented as a list of messages.
757
+ - Each message is a dictionary with 'role' and 'content' keys.
758
+
759
+ sampling_params: The sampling parameters for text generation.
760
+ If None, we use the default sampling parameters. When it
761
+ is a single value, it is applied to every prompt. When it
762
+ is a list, the list must have the same length as the
763
+ prompts and it is paired one by one with the prompt.
764
+ use_tqdm: If `True`, shows a tqdm progress bar.
765
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
766
+ it is used to create the progress bar.
767
+ If `False`, no progress bar is created.
768
+ lora_request: LoRA request to use for generation, if any.
769
+ chat_template: The template to use for structuring the chat.
770
+ If not provided, the model's default chat template will be used.
771
+ chat_template_content_format: The format to render message content.
772
+
773
+ - "string" will render the content as a string.
774
+ Example: `"Who are you?"`
775
+ - "openai" will render the content as a list of dictionaries,
776
+ similar to OpenAI schema.
777
+ Example: `[{"type": "text", "text": "Who are you?"}]`
778
+
779
+ add_generation_prompt: If True, adds a generation template
780
+ to each message.
781
+ continue_final_message: If True, continues the final message in
782
+ the conversation instead of starting a new one. Cannot be
783
+ `True` if `add_generation_prompt` is also `True`.
784
+ chat_template_kwargs: Additional kwargs to pass to the chat
785
+ template.
786
+ mm_processor_kwargs: Multimodal processor kwarg overrides for this
787
+ chat request. Only used for offline requests.
788
+
789
+ Returns:
790
+ A list of `RequestOutput` objects containing the generated
791
+ responses in the same order as the input messages.
792
+ """
793
+ list_of_messages: list[list[ChatCompletionMessageParam]]
794
+
795
+ # Handle multi and single conversations
796
+ if is_list_of(messages, list):
797
+ # messages is list[list[...]]
798
+ list_of_messages = cast(list[list[ChatCompletionMessageParam]],
799
+ messages)
800
+ else:
801
+ # messages is list[...]
802
+ list_of_messages = [
803
+ cast(list[ChatCompletionMessageParam], messages)
804
+ ]
805
+
806
+ tokenizer = self.get_tokenizer(lora_request)
807
+ model_config = self.llm_engine.get_model_config()
808
+ resolved_content_format = resolve_chat_template_content_format(
809
+ chat_template,
810
+ tools,
811
+ chat_template_content_format,
812
+ tokenizer,
813
+ model_config=model_config,
814
+ )
815
+
816
+ _chat_template_kwargs: dict[str, Any] = dict(
817
+ chat_template=chat_template,
818
+ add_generation_prompt=add_generation_prompt,
819
+ continue_final_message=continue_final_message,
820
+ tools=tools,
821
+ )
822
+ _chat_template_kwargs.update(chat_template_kwargs or {})
823
+
824
+ prompts: list[Union[TokensPrompt, TextPrompt]] = []
825
+
826
+ for msgs in list_of_messages:
827
+ # NOTE: _parse_chat_message_content_parts() currently doesn't
828
+ # handle mm_processor_kwargs, since there is no implementation in
829
+ # the chat message parsing for it.
830
+ conversation, mm_data = parse_chat_messages(
831
+ msgs,
832
+ model_config,
833
+ tokenizer,
834
+ content_format=resolved_content_format,
835
+ )
836
+
837
+ if isinstance(tokenizer, MistralTokenizer):
838
+ prompt_token_ids = apply_mistral_chat_template(
839
+ tokenizer,
840
+ messages=msgs,
841
+ **_chat_template_kwargs,
842
+ )
843
+ else:
844
+ prompt_str = apply_hf_chat_template(
845
+ tokenizer=tokenizer,
846
+ conversation=conversation,
847
+ model_config=model_config,
848
+ **_chat_template_kwargs,
849
+ )
850
+ # Special tokens are already included in chat templates so
851
+ # should not be added by the tokenizer in this case.
852
+ prompt_token_ids = tokenizer.encode(prompt_str,
853
+ add_special_tokens=False)
854
+
855
+ prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
856
+
857
+ if mm_data is not None:
858
+ prompt["multi_modal_data"] = mm_data
859
+
860
+ if mm_processor_kwargs is not None:
861
+ prompt["mm_processor_kwargs"] = mm_processor_kwargs
862
+
863
+ prompts.append(prompt)
864
+
865
+ return self.generate(
866
+ prompts,
867
+ sampling_params=sampling_params,
868
+ use_tqdm=use_tqdm,
869
+ lora_request=lora_request,
870
+ )
871
+
872
+ @overload
873
+ def encode(
874
+ self,
875
+ prompts: Union[PromptType, Sequence[PromptType]],
876
+ /,
877
+ pooling_params: Optional[Union[PoolingParams,
878
+ Sequence[PoolingParams]]] = None,
879
+ *,
880
+ truncate_prompt_tokens: Optional[int] = None,
881
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
882
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
883
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
884
+ ) -> list[PoolingRequestOutput]:
885
+ ...
886
+
887
+ @overload # LEGACY: single (prompt + optional token ids)
888
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
889
+ def encode(
890
+ self,
891
+ prompts: str,
892
+ pooling_params: Optional[Union[PoolingParams,
893
+ Sequence[PoolingParams]]] = None,
894
+ prompt_token_ids: Optional[list[int]] = None,
895
+ truncate_prompt_tokens: Optional[int] = None,
896
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
897
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
898
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
899
+ ) -> list[PoolingRequestOutput]:
900
+ ...
901
+
902
+ @overload # LEGACY: multi (prompt + optional token ids)
903
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
904
+ def encode(
905
+ self,
906
+ prompts: list[str],
907
+ pooling_params: Optional[Union[PoolingParams,
908
+ Sequence[PoolingParams]]] = None,
909
+ prompt_token_ids: Optional[list[list[int]]] = None,
910
+ truncate_prompt_tokens: Optional[int] = None,
911
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
912
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
913
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
914
+ ) -> list[PoolingRequestOutput]:
915
+ ...
916
+
917
+ @overload # LEGACY: single (token ids + optional prompt)
918
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
919
+ def encode(
920
+ self,
921
+ prompts: Optional[str] = None,
922
+ pooling_params: Optional[Union[PoolingParams,
923
+ Sequence[PoolingParams]]] = None,
924
+ *,
925
+ prompt_token_ids: list[int],
926
+ truncate_prompt_tokens: Optional[int] = None,
927
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
928
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
929
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
930
+ ) -> list[PoolingRequestOutput]:
931
+ ...
932
+
933
+ @overload # LEGACY: multi (token ids + optional prompt)
934
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
935
+ def encode(
936
+ self,
937
+ prompts: Optional[list[str]] = None,
938
+ pooling_params: Optional[Union[PoolingParams,
939
+ Sequence[PoolingParams]]] = None,
940
+ *,
941
+ prompt_token_ids: list[list[int]],
942
+ truncate_prompt_tokens: Optional[int] = None,
943
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
944
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
945
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
946
+ ) -> list[PoolingRequestOutput]:
947
+ ...
948
+
949
+ @overload # LEGACY: single or multi token ids [pos-only]
950
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
951
+ def encode(
952
+ self,
953
+ prompts: None,
954
+ pooling_params: None,
955
+ prompt_token_ids: Union[list[int], list[list[int]]],
956
+ truncate_prompt_tokens: Optional[int] = None,
957
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
958
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
959
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
960
+ ) -> list[PoolingRequestOutput]:
961
+ ...
962
+
963
+ @deprecate_kwargs(
964
+ "prompt_token_ids",
965
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
966
+ additional_message="Please use the 'prompts' parameter instead.",
967
+ )
968
+ def encode(
969
+ self,
970
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
971
+ Optional[Union[str, list[str]]]] = None,
972
+ pooling_params: Optional[Union[PoolingParams,
973
+ Sequence[PoolingParams]]] = None,
974
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
975
+ truncate_prompt_tokens: Optional[int] = None,
976
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
977
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
978
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
979
+ ) -> list[PoolingRequestOutput]:
980
+ """Apply pooling to the hidden states corresponding to the input
981
+ prompts.
982
+
983
+ This class automatically batches the given prompts, considering
984
+ the memory constraint. For the best performance, put all of your prompts
985
+ into a single list and pass it to this method.
986
+
987
+ Args:
988
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
989
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
990
+ for more details about the format of each prompts.
991
+ pooling_params: The pooling parameters for pooling. If None, we
992
+ use the default pooling parameters.
993
+ use_tqdm: If `True`, shows a tqdm progress bar.
994
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
995
+ it is used to create the progress bar.
996
+ If `False`, no progress bar is created.
997
+ lora_request: LoRA request to use for generation, if any.
998
+ prompt_adapter_request: Prompt Adapter request to use for
999
+ generation, if any.
1000
+
1001
+ Returns:
1002
+ A list of `PoolingRequestOutput` objects containing the
1003
+ pooled hidden states in the same order as the input prompts.
1004
+
1005
+ Note:
1006
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
1007
+ considered legacy and may be deprecated in the future. You should
1008
+ instead pass them via the `inputs` parameter.
1009
+ """
1010
+ runner_type = self.llm_engine.model_config.runner_type
1011
+ if runner_type != "pooling":
1012
+ messages = ["LLM.encode() is only supported for pooling models."]
1013
+
1014
+ supported_runner_types = self.llm_engine.model_config \
1015
+ .supported_runner_types
1016
+ if "pooling" in supported_runner_types:
1017
+ messages.append(
1018
+ "Your model supports the 'pooling' runner, but is "
1019
+ f"currently initialized for the '{runner_type}' runner. "
1020
+ "Please initialize vLLM using `--task embed`, "
1021
+ "`--task classify`, `--task score` etc.")
1022
+
1023
+ raise ValueError(" ".join(messages))
1024
+
1025
+ if prompt_token_ids is not None:
1026
+ parsed_prompts = self._convert_v1_inputs(
1027
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
1028
+ prompt_token_ids=prompt_token_ids,
1029
+ )
1030
+ else:
1031
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
1032
+ prompts)
1033
+
1034
+ if pooling_params is None:
1035
+ # Use default pooling params.
1036
+ pooling_params = PoolingParams()
1037
+ elif isinstance(pooling_params, PoolingParams):
1038
+ pooling_params.verify(self.llm_engine.model_config)
1039
+ else:
1040
+ for pooling_param in pooling_params:
1041
+ pooling_param.verify(self.llm_engine.model_config)
1042
+
1043
+ tokenization_kwargs: dict[str, Any] = {}
1044
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
1045
+ truncate_prompt_tokens, tokenization_kwargs)
1046
+
1047
+ self._validate_and_add_requests(
1048
+ prompts=parsed_prompts,
1049
+ params=pooling_params,
1050
+ use_tqdm=use_tqdm,
1051
+ lora_request=lora_request,
1052
+ tokenization_kwargs=tokenization_kwargs,
1053
+ prompt_adapter_request=prompt_adapter_request,
1054
+ )
1055
+
1056
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1057
+ return self.engine_class.validate_outputs(outputs,
1058
+ PoolingRequestOutput)
1059
+
1060
+ def embed(
1061
+ self,
1062
+ prompts: Union[PromptType, Sequence[PromptType]],
1063
+ /,
1064
+ *,
1065
+ truncate_prompt_tokens: Optional[int] = None,
1066
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1067
+ pooling_params: Optional[Union[PoolingParams,
1068
+ Sequence[PoolingParams]]] = None,
1069
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1070
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1071
+ ) -> list[EmbeddingRequestOutput]:
1072
+ """
1073
+ Generate an embedding vector for each prompt.
1074
+
1075
+ This class automatically batches the given prompts, considering
1076
+ the memory constraint. For the best performance, put all of your prompts
1077
+ into a single list and pass it to this method.
1078
+
1079
+ Args:
1080
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1081
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1082
+ for more details about the format of each prompts.
1083
+ pooling_params: The pooling parameters for pooling. If None, we
1084
+ use the default pooling parameters.
1085
+ use_tqdm: If `True`, shows a tqdm progress bar.
1086
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1087
+ it is used to create the progress bar.
1088
+ If `False`, no progress bar is created.
1089
+ lora_request: LoRA request to use for generation, if any.
1090
+ prompt_adapter_request: Prompt Adapter request to use for
1091
+ generation, if any.
1092
+
1093
+ Returns:
1094
+ A list of `EmbeddingRequestOutput` objects containing the
1095
+ embedding vectors in the same order as the input prompts.
1096
+ """
1097
+ if self.llm_engine.model_config.task != "embed":
1098
+ raise ValueError(
1099
+ "Embedding API is only enabled for `--task embed`")
1100
+
1101
+ items = self.encode(prompts,
1102
+ truncate_prompt_tokens=truncate_prompt_tokens,
1103
+ use_tqdm=use_tqdm,
1104
+ pooling_params=pooling_params,
1105
+ lora_request=lora_request,
1106
+ prompt_adapter_request=prompt_adapter_request)
1107
+
1108
+ return [EmbeddingRequestOutput.from_base(item) for item in items]
1109
+
1110
+ def classify(
1111
+ self,
1112
+ prompts: Union[PromptType, Sequence[PromptType]],
1113
+ /,
1114
+ *,
1115
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1116
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1117
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1118
+ ) -> list[ClassificationRequestOutput]:
1119
+ """
1120
+ Generate class logits for each prompt.
1121
+
1122
+ This class automatically batches the given prompts, considering
1123
+ the memory constraint. For the best performance, put all of your prompts
1124
+ into a single list and pass it to this method.
1125
+
1126
+ Args:
1127
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1128
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1129
+ for more details about the format of each prompts.
1130
+ use_tqdm: If `True`, shows a tqdm progress bar.
1131
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1132
+ it is used to create the progress bar.
1133
+ If `False`, no progress bar is created.
1134
+ lora_request: LoRA request to use for generation, if any.
1135
+ prompt_adapter_request: Prompt Adapter request to use for
1136
+ generation, if any.
1137
+
1138
+ Returns:
1139
+ A list of `ClassificationRequestOutput` objects containing the
1140
+ embedding vectors in the same order as the input prompts.
1141
+ """
1142
+ if self.llm_engine.model_config.task != "classify":
1143
+ raise ValueError(
1144
+ "Classification API is only enabled for `--task classify`")
1145
+
1146
+ items = self.encode(prompts,
1147
+ use_tqdm=use_tqdm,
1148
+ lora_request=lora_request,
1149
+ prompt_adapter_request=prompt_adapter_request)
1150
+
1151
+ return [ClassificationRequestOutput.from_base(item) for item in items]
1152
+
1153
+ def _embedding_score(
1154
+ self,
1155
+ tokenizer: AnyTokenizer,
1156
+ text_1: list[Union[str, TextPrompt, TokensPrompt]],
1157
+ text_2: list[Union[str, TextPrompt, TokensPrompt]],
1158
+ truncate_prompt_tokens: Optional[int] = None,
1159
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1160
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1161
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1162
+ ) -> list[ScoringRequestOutput]:
1163
+
1164
+ encoded_output: list[PoolingRequestOutput] = self.encode(
1165
+ text_1 + text_2,
1166
+ truncate_prompt_tokens=truncate_prompt_tokens,
1167
+ use_tqdm=use_tqdm,
1168
+ lora_request=lora_request,
1169
+ prompt_adapter_request=prompt_adapter_request)
1170
+
1171
+ encoded_output_1: list[PoolingRequestOutput] = encoded_output[
1172
+ 0:len(text_1)]
1173
+ encoded_output_2: list[PoolingRequestOutput] = encoded_output[
1174
+ len(text_1):]
1175
+
1176
+ if len(encoded_output_1) == 1:
1177
+ encoded_output_1 = encoded_output_1 * len(encoded_output_2)
1178
+
1179
+ scores = _cosine_similarity(tokenizer=tokenizer,
1180
+ embed_1=encoded_output_1,
1181
+ embed_2=encoded_output_2)
1182
+
1183
+ items = self.engine_class.validate_outputs(scores,
1184
+ PoolingRequestOutput)
1185
+ return [ScoringRequestOutput.from_base(item) for item in items]
1186
+
1187
+ def _cross_encoding_score(
1188
+ self,
1189
+ tokenizer: AnyTokenizer,
1190
+ text_1: list[str],
1191
+ text_2: list[str],
1192
+ truncate_prompt_tokens: Optional[int] = None,
1193
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1194
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1195
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1196
+ ) -> list[ScoringRequestOutput]:
1197
+
1198
+ if isinstance(tokenizer, MistralTokenizer):
1199
+ raise ValueError(
1200
+ "Score API is only enabled for `--task embed or score`")
1201
+
1202
+ if len(text_1) == 1:
1203
+ text_1 = text_1 * len(text_2)
1204
+
1205
+ input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)]
1206
+
1207
+ pooling_params = PoolingParams(use_cross_encoder=True)
1208
+
1209
+ tokenization_kwargs: dict[str, Any] = {}
1210
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
1211
+ truncate_prompt_tokens, tokenization_kwargs)
1212
+
1213
+ parsed_prompts = []
1214
+
1215
+ for q, t in input_pairs:
1216
+ prompt_inputs = tokenizer(text=q,
1217
+ text_pair=t,
1218
+ **tokenization_kwargs)
1219
+ engine_prompt = TokensPrompt(
1220
+ prompt_token_ids=prompt_inputs["input_ids"],
1221
+ token_type_ids=prompt_inputs.get("token_type_ids"))
1222
+ parsed_prompts.append(engine_prompt)
1223
+
1224
+ self._validate_and_add_requests(
1225
+ prompts=parsed_prompts,
1226
+ params=pooling_params,
1227
+ use_tqdm=use_tqdm,
1228
+ lora_request=lora_request,
1229
+ prompt_adapter_request=prompt_adapter_request,
1230
+ )
1231
+
1232
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1233
+ items = self.engine_class.validate_outputs(outputs,
1234
+ PoolingRequestOutput)
1235
+
1236
+ return [ScoringRequestOutput.from_base(item) for item in items]
1237
+
1238
+ def score(
1239
+ self,
1240
+ text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1241
+ text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1242
+ /,
1243
+ *,
1244
+ truncate_prompt_tokens: Optional[int] = None,
1245
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1246
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1247
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1248
+ ) -> list[ScoringRequestOutput]:
1249
+ """Generate similarity scores for all pairs `<text,text_pair>`.
1250
+
1251
+ The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
1252
+ In the `1 - N` case the `text_1` sentence will be replicated `N`
1253
+ times to pair with the `text_2` sentences.
1254
+ The input pairs are used to build a list of prompts for the
1255
+ cross encoder model. This class automatically batches the prompts,
1256
+ considering the memory constraint. For the best performance, put all
1257
+ of your texts into a single list and pass it to this method.
1258
+
1259
+ Args:
1260
+ text_1: can be a single prompt or a list of prompts, in which
1261
+ case it has to have the same length as the `text_2` list
1262
+ text_2: The texts to pair with the query to form the input
1263
+ to the LLM. See [PromptType][vllm.inputs.PromptType] for
1264
+ more details about the format of each prompts.
1265
+ use_tqdm: If `True`, shows a tqdm progress bar.
1266
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1267
+ it is used to create the progress bar.
1268
+ If `False`, no progress bar is created.
1269
+ lora_request: LoRA request to use for generation, if any.
1270
+ prompt_adapter_request: Prompt Adapter request to use for
1271
+ generation, if any.
1272
+
1273
+ Returns:
1274
+ A list of `ScoringRequestOutput` objects containing the
1275
+ generated scores in the same order as the input prompts.
1276
+ """
1277
+ runner_type = self.llm_engine.model_config.runner_type
1278
+ if runner_type != "pooling":
1279
+ messages = ["LLM.score() is only supported for pooling models."]
1280
+
1281
+ supported_runner_types = self.llm_engine.model_config \
1282
+ .supported_runner_types
1283
+ if "pooling" in supported_runner_types:
1284
+ messages.append(
1285
+ "Your model supports the 'pooling' runner, but is "
1286
+ f"currently initialized for the '{runner_type}' runner. "
1287
+ "Please initialize vLLM using `--task embed`, "
1288
+ "`--task classify`, `--task score` etc.")
1289
+
1290
+ raise ValueError(" ".join(messages))
1291
+
1292
+ if self.llm_engine.model_config.task not in ("embed", "classify"):
1293
+ raise ValueError("Score API is only enabled for "
1294
+ "`--task embed or --task classify`.")
1295
+
1296
+ if (self.llm_engine.model_config.task == "classify"
1297
+ and self.llm_engine.model_config.hf_config.num_labels != 1):
1298
+ raise ValueError("Score API is only enabled for num_labels == 1.")
1299
+
1300
+ # the tokenizer for models such as
1301
+ # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
1302
+ # lists of tokens to the `text` and `text_pair` kwargs
1303
+ tokenizer = self.get_tokenizer()
1304
+
1305
+ def ensure_str(prompt: SingletonPrompt):
1306
+ if isinstance(prompt, dict):
1307
+ if "multi_modal_data" in prompt:
1308
+ raise ValueError("Multi-modal prompt is not "
1309
+ "supported for scoring")
1310
+ elif "prompt_token_ids" in prompt:
1311
+ prompt = tokenizer.decode(
1312
+ cast(TokensPrompt, prompt)["prompt_token_ids"])
1313
+ elif "prompt" in prompt:
1314
+ prompt = cast(TextPrompt, prompt)["prompt"]
1315
+ assert type(prompt) is str
1316
+ return prompt
1317
+
1318
+ if isinstance(text_1, (str, dict)):
1319
+ # Convert a single prompt to a list.
1320
+ text_1 = [text_1]
1321
+ input_text_1: list[str] = [ensure_str(t) for t in text_1]
1322
+
1323
+ if isinstance(text_2, (str, dict)):
1324
+ # Convert a single prompt to a list.
1325
+ text_2 = [text_2]
1326
+ input_text_2: list[str] = [ensure_str(t) for t in text_2]
1327
+
1328
+ _validate_score_input_lens(input_text_1, input_text_2)
1329
+
1330
+ if self.llm_engine.model_config.is_cross_encoder:
1331
+ return self._cross_encoding_score(tokenizer, input_text_1,
1332
+ input_text_2,
1333
+ truncate_prompt_tokens, use_tqdm,
1334
+ lora_request,
1335
+ prompt_adapter_request)
1336
+ else:
1337
+ return self._embedding_score(
1338
+ tokenizer,
1339
+ input_text_1, # type: ignore[arg-type]
1340
+ input_text_2, # type: ignore[arg-type]
1341
+ truncate_prompt_tokens,
1342
+ use_tqdm,
1343
+ lora_request,
1344
+ prompt_adapter_request)
1345
+
1346
+ def start_profile(self) -> None:
1347
+ self.llm_engine.start_profile()
1348
+
1349
+ def stop_profile(self) -> None:
1350
+ self.llm_engine.stop_profile()
1351
+
1352
+ def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
1353
+ return self.llm_engine.reset_prefix_cache(device)
1354
+
1355
+ def sleep(self, level: int = 1):
1356
+ """
1357
+ Put the engine to sleep. The engine should not process any requests.
1358
+ The caller should guarantee that no requests are being processed
1359
+ during the sleep period, before `wake_up` is called.
1360
+
1361
+ Args:
1362
+ level: The sleep level. Level 1 sleep will offload the model
1363
+ weights and discard the kv cache. The content of kv cache
1364
+ is forgotten. Level 1 sleep is good for sleeping and waking
1365
+ up the engine to run the same model again. The model weights
1366
+ are backed up in CPU memory. Please make sure there's enough
1367
+ CPU memory to store the model weights. Level 2 sleep will
1368
+ discard both the model weights and the kv cache. The content
1369
+ of both the model weights and kv cache is forgotten. Level 2
1370
+ sleep is good for sleeping and waking up the engine to run a
1371
+ different model or update the model, where previous model
1372
+ weights are not needed. It reduces CPU memory pressure.
1373
+ """
1374
+ self.reset_prefix_cache()
1375
+ self.llm_engine.sleep(level=level)
1376
+
1377
+ def wake_up(self, tags: Optional[list[str]] = None):
1378
+ """
1379
+ Wake up the engine from sleep mode. See the [sleep][] method
1380
+ for more details.
1381
+
1382
+ Args:
1383
+ tags: An optional list of tags to reallocate the engine memory
1384
+ for specific memory allocations. Values must be in
1385
+ `("weights", "kv_cache")`. If None, all memory is reallocated.
1386
+ wake_up should be called with all tags (or None) before the
1387
+ engine is used again.
1388
+ """
1389
+ self.llm_engine.wake_up(tags)
1390
+
1391
+ def get_metrics(self) -> list["Metric"]:
1392
+ """Return a snapshot of aggregated metrics from Prometheus.
1393
+
1394
+ Returns:
1395
+ A ``MetricSnapshot`` instance capturing the current state
1396
+ of all aggregated metrics from Prometheus.
1397
+
1398
+ Note:
1399
+ This method is only available with the V1 LLM engine.
1400
+ """
1401
+ from vllm.v1.engine.llm_engine import LLMEngine as V1LLMEngine
1402
+ assert isinstance(self.llm_engine, V1LLMEngine)
1403
+ return self.llm_engine.get_metrics()
1404
+
1405
+ # LEGACY
1406
+ def _convert_v1_inputs(
1407
+ self,
1408
+ prompts: Optional[Union[str, list[str]]],
1409
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]],
1410
+ ):
1411
+ # skip_tokenizer_init is now checked in engine
1412
+
1413
+ if prompts is None and prompt_token_ids is None:
1414
+ raise ValueError(
1415
+ "Either prompts or prompt_token_ids must be provided.")
1416
+ if prompts is not None and prompt_token_ids is not None \
1417
+ and len(prompts) != len(prompt_token_ids):
1418
+ raise ValueError(
1419
+ "The lengths of prompts and prompt_token_ids must be the same."
1420
+ )
1421
+
1422
+ if prompts is not None:
1423
+ prompts = [p["content"] for p in parse_and_batch_prompt(prompts)]
1424
+ if prompt_token_ids is not None:
1425
+ prompt_token_ids = [
1426
+ p["content"] for p in parse_and_batch_prompt(prompt_token_ids)
1427
+ ]
1428
+ if prompts is not None:
1429
+ num_requests = len(prompts)
1430
+ elif prompt_token_ids is not None:
1431
+ num_requests = len(prompt_token_ids)
1432
+ parsed_prompts: list[PromptType] = []
1433
+ for i in range(num_requests):
1434
+ item: PromptType
1435
+
1436
+ if prompts is not None:
1437
+ item = TextPrompt(prompt=prompts[i])
1438
+ elif prompt_token_ids is not None:
1439
+ item = TokensPrompt(prompt_token_ids=prompt_token_ids[i])
1440
+ else:
1441
+ raise AssertionError
1442
+
1443
+ parsed_prompts.append(item)
1444
+
1445
+ return parsed_prompts
1446
+
1447
+ def _validate_and_add_requests(
1448
+ self,
1449
+ prompts: Union[PromptType, Sequence[PromptType]],
1450
+ params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
1451
+ Sequence[PoolingParams]],
1452
+ *,
1453
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1454
+ lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
1455
+ prompt_adapter_request: Optional[PromptAdapterRequest],
1456
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1457
+ guided_options: Optional[GuidedDecodingRequest] = None,
1458
+ priority: Optional[list[int]] = None,
1459
+ ) -> None:
1460
+ if guided_options is not None:
1461
+ warnings.warn(
1462
+ "guided_options_request is deprecated, use "
1463
+ "SamplingParams.guided_decoding instead",
1464
+ DeprecationWarning,
1465
+ stacklevel=2,
1466
+ )
1467
+
1468
+ if isinstance(prompts, (str, dict)):
1469
+ # Convert a single prompt to a list.
1470
+ prompts = [prompts]
1471
+
1472
+ num_requests = len(prompts)
1473
+ if isinstance(params, Sequence) and len(params) != num_requests:
1474
+ raise ValueError("The lengths of prompts and params "
1475
+ "must be the same.")
1476
+ if isinstance(lora_request,
1477
+ Sequence) and len(lora_request) != num_requests:
1478
+ raise ValueError("The lengths of prompts and lora_request "
1479
+ "must be the same.")
1480
+
1481
+ for sp in params if isinstance(params, Sequence) else (params, ):
1482
+ if isinstance(sp, SamplingParams):
1483
+ self._add_guided_params(sp, guided_options)
1484
+
1485
+ # We only care about the final output
1486
+ sp.output_kind = RequestOutputKind.FINAL_ONLY
1487
+
1488
+ # Add requests to the engine.
1489
+ it = prompts
1490
+ if use_tqdm:
1491
+ tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
1492
+ it = tqdm_func(it, desc="Adding requests")
1493
+
1494
+ for i, prompt in enumerate(it):
1495
+ self._add_request(
1496
+ prompt,
1497
+ params[i] if isinstance(params, Sequence) else params,
1498
+ tokenization_kwargs=tokenization_kwargs,
1499
+ lora_request=lora_request[i] if isinstance(
1500
+ lora_request, Sequence) else lora_request,
1501
+ prompt_adapter_request=prompt_adapter_request,
1502
+ priority=priority[i] if priority else 0,
1503
+ )
1504
+
1505
+ def _add_request(
1506
+ self,
1507
+ prompt: PromptType,
1508
+ params: Union[SamplingParams, PoolingParams],
1509
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1510
+ lora_request: Optional[LoRARequest] = None,
1511
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1512
+ priority: int = 0,
1513
+ ) -> None:
1514
+ request_id = str(next(self.request_counter))
1515
+ self.llm_engine.add_request(
1516
+ request_id,
1517
+ prompt,
1518
+ params,
1519
+ lora_request=lora_request,
1520
+ tokenization_kwargs=tokenization_kwargs,
1521
+ prompt_adapter_request=prompt_adapter_request,
1522
+ priority=priority,
1523
+ )
1524
+
1525
+ def _add_guided_params(
1526
+ self,
1527
+ params: SamplingParams,
1528
+ guided_options: Optional[GuidedDecodingRequest] = None):
1529
+ if guided_options is None:
1530
+ return params
1531
+
1532
+ if params.guided_decoding is not None:
1533
+ raise ValueError("Cannot set both guided_options_request and "
1534
+ "params.guided_decoding.")
1535
+
1536
+ params.guided_decoding = GuidedDecodingParams(
1537
+ json=guided_options.guided_json,
1538
+ regex=guided_options.guided_regex,
1539
+ choice=guided_options.guided_choice,
1540
+ grammar=guided_options.guided_grammar,
1541
+ json_object=guided_options.guided_json_object,
1542
+ backend=guided_options.guided_decoding_backend,
1543
+ whitespace_pattern=guided_options.guided_whitespace_pattern,
1544
+ structural_tag=guided_options.structural_tag,
1545
+ )
1546
+ return params
1547
+
1548
+ def _run_engine(
1549
+ self,
1550
+ *,
1551
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True
1552
+ ) -> list[Union[RequestOutput, PoolingRequestOutput]]:
1553
+ # Initialize tqdm.
1554
+ if use_tqdm:
1555
+ num_requests = self.llm_engine.get_num_unfinished_requests()
1556
+ tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
1557
+ pbar = tqdm_func(
1558
+ total=num_requests,
1559
+ desc="Processed prompts",
1560
+ dynamic_ncols=True,
1561
+ postfix=(f"est. speed input: {0:.2f} toks/s, "
1562
+ f"output: {0:.2f} toks/s"),
1563
+ )
1564
+
1565
+ # Run the engine.
1566
+ outputs: list[Union[RequestOutput, PoolingRequestOutput]] = []
1567
+ total_in_toks = 0
1568
+ total_out_toks = 0
1569
+ while self.llm_engine.has_unfinished_requests():
1570
+ step_outputs = self.llm_engine.step()
1571
+ for output in step_outputs:
1572
+ if output.finished:
1573
+ outputs.append(output)
1574
+ if use_tqdm:
1575
+ if isinstance(output, RequestOutput):
1576
+ # Calculate tokens only for RequestOutput
1577
+ n = len(output.outputs)
1578
+ assert output.prompt_token_ids is not None
1579
+ total_in_toks += len(output.prompt_token_ids) * n
1580
+ in_spd = total_in_toks / pbar.format_dict["elapsed"]
1581
+ total_out_toks += sum(
1582
+ len(stp.token_ids) for stp in output.outputs)
1583
+ out_spd = (total_out_toks /
1584
+ pbar.format_dict["elapsed"])
1585
+ pbar.postfix = (
1586
+ f"est. speed input: {in_spd:.2f} toks/s, "
1587
+ f"output: {out_spd:.2f} toks/s")
1588
+ pbar.update(n)
1589
+ else:
1590
+ pbar.update(1)
1591
+ if pbar.n == num_requests:
1592
+ pbar.refresh()
1593
+
1594
+ if use_tqdm:
1595
+ pbar.close()
1596
+ # Sort the outputs by request ID.
1597
+ # This is necessary because some requests may be finished earlier than
1598
+ # its previous requests.
1599
+ return sorted(outputs, key=lambda x: int(x.request_id))