vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1197) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +53 -0
  3. vllm/_custom_ops.py +1828 -0
  4. vllm/_ipex_ops.py +244 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +115 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +308 -0
  20. vllm/attention/backends/blocksparse_attn.py +461 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
  23. vllm/attention/backends/flash_attn.py +1003 -0
  24. vllm/attention/backends/flashinfer.py +1104 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +313 -0
  27. vllm/attention/backends/ipex_attn.py +398 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1385 -0
  30. vllm/attention/backends/pallas.py +351 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +975 -0
  34. vllm/attention/backends/torch_sdpa.py +703 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +802 -0
  38. vllm/attention/layer.py +468 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +906 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/prefix_prefill.py +902 -0
  52. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  53. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  54. vllm/attention/ops/triton_decode_attention.py +674 -0
  55. vllm/attention/ops/triton_flash_attention.py +979 -0
  56. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  57. vllm/attention/ops/triton_unified_attention.py +334 -0
  58. vllm/attention/selector.py +187 -0
  59. vllm/attention/utils/fa_utils.py +55 -0
  60. vllm/beam_search.py +87 -0
  61. vllm/benchmarks/__init__.py +0 -0
  62. vllm/benchmarks/datasets.py +1185 -0
  63. vllm/benchmarks/endpoint_request_func.py +381 -0
  64. vllm/benchmarks/latency.py +168 -0
  65. vllm/benchmarks/serve.py +1135 -0
  66. vllm/benchmarks/throughput.py +609 -0
  67. vllm/benchmarks/utils.py +70 -0
  68. vllm/collect_env.py +820 -0
  69. vllm/compilation/__init__.py +0 -0
  70. vllm/compilation/activation_quant_fusion.py +89 -0
  71. vllm/compilation/backends.py +563 -0
  72. vllm/compilation/base_piecewise_backend.py +72 -0
  73. vllm/compilation/collective_fusion.py +127 -0
  74. vllm/compilation/compiler_interface.py +544 -0
  75. vllm/compilation/counter.py +38 -0
  76. vllm/compilation/cuda_piecewise_backend.py +214 -0
  77. vllm/compilation/decorators.py +250 -0
  78. vllm/compilation/fix_functionalization.py +191 -0
  79. vllm/compilation/fusion.py +618 -0
  80. vllm/compilation/fx_utils.py +62 -0
  81. vllm/compilation/inductor_pass.py +115 -0
  82. vllm/compilation/monitor.py +39 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +137 -0
  85. vllm/compilation/pass_manager.py +78 -0
  86. vllm/compilation/sequence_parallelism.py +268 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +67 -0
  89. vllm/compilation/wrapper.py +135 -0
  90. vllm/config.py +4746 -0
  91. vllm/connections.py +174 -0
  92. vllm/core/__init__.py +0 -0
  93. vllm/core/block/__init__.py +0 -0
  94. vllm/core/block/block_table.py +399 -0
  95. vllm/core/block/common.py +371 -0
  96. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  97. vllm/core/block/interfaces.py +319 -0
  98. vllm/core/block/naive_block.py +466 -0
  99. vllm/core/block/prefix_caching_block.py +1135 -0
  100. vllm/core/block/utils.py +28 -0
  101. vllm/core/block_manager.py +521 -0
  102. vllm/core/evictor.py +157 -0
  103. vllm/core/interfaces.py +135 -0
  104. vllm/core/placeholder_block_space_manager.py +100 -0
  105. vllm/core/scheduler.py +2093 -0
  106. vllm/device_allocator/__init__.py +0 -0
  107. vllm/device_allocator/cumem.py +281 -0
  108. vllm/distributed/__init__.py +6 -0
  109. vllm/distributed/communication_op.py +41 -0
  110. vllm/distributed/device_communicators/__init__.py +0 -0
  111. vllm/distributed/device_communicators/all2all.py +264 -0
  112. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  113. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  114. vllm/distributed/device_communicators/cuda_communicator.py +176 -0
  115. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  116. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  117. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  118. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  119. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  120. vllm/distributed/device_communicators/pynccl.py +218 -0
  121. vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
  122. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  123. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  124. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  125. vllm/distributed/kv_events.py +356 -0
  126. vllm/distributed/kv_transfer/README.md +29 -0
  127. vllm/distributed/kv_transfer/__init__.py +12 -0
  128. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  129. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  130. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  131. vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
  132. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  133. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  134. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
  142. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  145. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  146. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  147. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  149. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
  150. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  151. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  152. vllm/distributed/parallel_state.py +1296 -0
  153. vllm/distributed/tpu_distributed_utils.py +177 -0
  154. vllm/distributed/utils.py +536 -0
  155. vllm/engine/__init__.py +0 -0
  156. vllm/engine/arg_utils.py +1708 -0
  157. vllm/engine/async_llm_engine.py +1200 -0
  158. vllm/engine/async_timeout.py +173 -0
  159. vllm/engine/llm_engine.py +2097 -0
  160. vllm/engine/metrics.py +629 -0
  161. vllm/engine/metrics_types.py +94 -0
  162. vllm/engine/multiprocessing/__init__.py +148 -0
  163. vllm/engine/multiprocessing/client.py +681 -0
  164. vllm/engine/multiprocessing/engine.py +460 -0
  165. vllm/engine/output_processor/__init__.py +0 -0
  166. vllm/engine/output_processor/interfaces.py +75 -0
  167. vllm/engine/output_processor/multi_step.py +216 -0
  168. vllm/engine/output_processor/single_step.py +145 -0
  169. vllm/engine/output_processor/stop_checker.py +131 -0
  170. vllm/engine/output_processor/util.py +28 -0
  171. vllm/engine/protocol.py +317 -0
  172. vllm/entrypoints/__init__.py +0 -0
  173. vllm/entrypoints/api_server.py +178 -0
  174. vllm/entrypoints/chat_utils.py +1299 -0
  175. vllm/entrypoints/cli/__init__.py +0 -0
  176. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  177. vllm/entrypoints/cli/benchmark/base.py +39 -0
  178. vllm/entrypoints/cli/benchmark/latency.py +30 -0
  179. vllm/entrypoints/cli/benchmark/main.py +54 -0
  180. vllm/entrypoints/cli/benchmark/serve.py +30 -0
  181. vllm/entrypoints/cli/benchmark/throughput.py +30 -0
  182. vllm/entrypoints/cli/collect_env.py +35 -0
  183. vllm/entrypoints/cli/main.py +65 -0
  184. vllm/entrypoints/cli/openai.py +205 -0
  185. vllm/entrypoints/cli/run_batch.py +62 -0
  186. vllm/entrypoints/cli/serve.py +328 -0
  187. vllm/entrypoints/cli/types.py +25 -0
  188. vllm/entrypoints/launcher.py +147 -0
  189. vllm/entrypoints/llm.py +1544 -0
  190. vllm/entrypoints/logger.py +50 -0
  191. vllm/entrypoints/openai/__init__.py +0 -0
  192. vllm/entrypoints/openai/api_server.py +1387 -0
  193. vllm/entrypoints/openai/cli_args.py +315 -0
  194. vllm/entrypoints/openai/logits_processors.py +90 -0
  195. vllm/entrypoints/openai/protocol.py +1913 -0
  196. vllm/entrypoints/openai/run_batch.py +463 -0
  197. vllm/entrypoints/openai/serving_chat.py +1221 -0
  198. vllm/entrypoints/openai/serving_classification.py +160 -0
  199. vllm/entrypoints/openai/serving_completion.py +592 -0
  200. vllm/entrypoints/openai/serving_embedding.py +201 -0
  201. vllm/entrypoints/openai/serving_engine.py +986 -0
  202. vllm/entrypoints/openai/serving_models.py +315 -0
  203. vllm/entrypoints/openai/serving_pooling.py +232 -0
  204. vllm/entrypoints/openai/serving_score.py +433 -0
  205. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  206. vllm/entrypoints/openai/serving_transcription.py +424 -0
  207. vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
  208. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  209. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  210. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  211. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  212. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  213. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  214. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  215. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  216. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  217. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  218. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  219. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  220. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  221. vllm/entrypoints/score_utils.py +50 -0
  222. vllm/entrypoints/ssl.py +75 -0
  223. vllm/entrypoints/utils.py +233 -0
  224. vllm/env_override.py +41 -0
  225. vllm/envs.py +944 -0
  226. vllm/executor/__init__.py +0 -0
  227. vllm/executor/executor_base.py +401 -0
  228. vllm/executor/mp_distributed_executor.py +244 -0
  229. vllm/executor/msgspec_utils.py +30 -0
  230. vllm/executor/multiproc_worker_utils.py +313 -0
  231. vllm/executor/ray_distributed_executor.py +701 -0
  232. vllm/executor/ray_utils.py +399 -0
  233. vllm/executor/uniproc_executor.py +139 -0
  234. vllm/forward_context.py +179 -0
  235. vllm/inputs/__init__.py +41 -0
  236. vllm/inputs/data.py +331 -0
  237. vllm/inputs/parse.py +151 -0
  238. vllm/inputs/preprocess.py +909 -0
  239. vllm/inputs/registry.py +237 -0
  240. vllm/jsontree.py +80 -0
  241. vllm/logger.py +212 -0
  242. vllm/logging_utils/__init__.py +8 -0
  243. vllm/logging_utils/dump_input.py +85 -0
  244. vllm/logging_utils/formatter.py +18 -0
  245. vllm/logits_process.py +119 -0
  246. vllm/lora/__init__.py +0 -0
  247. vllm/lora/fully_sharded_layers.py +355 -0
  248. vllm/lora/layers.py +1285 -0
  249. vllm/lora/lora.py +199 -0
  250. vllm/lora/models.py +818 -0
  251. vllm/lora/ops/__init__.py +0 -0
  252. vllm/lora/ops/torch_ops/__init__.py +16 -0
  253. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  254. vllm/lora/ops/triton_ops/__init__.py +12 -0
  255. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  256. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  257. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  258. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  259. vllm/lora/ops/triton_ops/utils.py +120 -0
  260. vllm/lora/ops/xla_ops/__init__.py +7 -0
  261. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  262. vllm/lora/peft_helper.py +136 -0
  263. vllm/lora/punica_wrapper/__init__.py +10 -0
  264. vllm/lora/punica_wrapper/punica_base.py +485 -0
  265. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  266. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  267. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  268. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  269. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  270. vllm/lora/punica_wrapper/utils.py +164 -0
  271. vllm/lora/request.py +99 -0
  272. vllm/lora/resolver.py +85 -0
  273. vllm/lora/utils.py +240 -0
  274. vllm/lora/worker_manager.py +259 -0
  275. vllm/model_executor/__init__.py +16 -0
  276. vllm/model_executor/custom_op.py +152 -0
  277. vllm/model_executor/guided_decoding/__init__.py +181 -0
  278. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  279. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  280. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  281. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  282. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  283. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  284. vllm/model_executor/guided_decoding/utils.py +242 -0
  285. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  286. vllm/model_executor/layers/__init__.py +0 -0
  287. vllm/model_executor/layers/activation.py +369 -0
  288. vllm/model_executor/layers/fused_moe/__init__.py +54 -0
  289. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
  290. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  455. vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
  456. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
  457. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
  458. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
  459. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
  460. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
  461. vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
  462. vllm/model_executor/layers/fused_moe/layer.py +1535 -0
  463. vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
  464. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  465. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  466. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  467. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  468. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
  469. vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
  470. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
  471. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
  472. vllm/model_executor/layers/fused_moe/utils.py +98 -0
  473. vllm/model_executor/layers/layernorm.py +288 -0
  474. vllm/model_executor/layers/lightning_attn.py +652 -0
  475. vllm/model_executor/layers/linear.py +1524 -0
  476. vllm/model_executor/layers/logits_processor.py +197 -0
  477. vllm/model_executor/layers/mamba/__init__.py +0 -0
  478. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  479. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  480. vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
  481. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  482. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  483. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  484. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  485. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  486. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  487. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  488. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  489. vllm/model_executor/layers/pooler.py +350 -0
  490. vllm/model_executor/layers/quantization/__init__.py +157 -0
  491. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  492. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  493. vllm/model_executor/layers/quantization/awq.py +194 -0
  494. vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
  495. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  496. vllm/model_executor/layers/quantization/base_config.py +151 -0
  497. vllm/model_executor/layers/quantization/bitblas.py +461 -0
  498. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  499. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  500. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
  501. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
  502. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  503. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  504. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  505. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  506. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
  507. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
  508. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  509. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  510. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  511. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  512. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  513. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  514. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  515. vllm/model_executor/layers/quantization/experts_int8.py +196 -0
  516. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  517. vllm/model_executor/layers/quantization/fp8.py +906 -0
  518. vllm/model_executor/layers/quantization/gguf.py +565 -0
  519. vllm/model_executor/layers/quantization/gptq.py +278 -0
  520. vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
  521. vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
  522. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  523. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  524. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  525. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  526. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  527. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  528. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  529. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  530. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  531. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
  532. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  533. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  534. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  535. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  536. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  537. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  538. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  539. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  540. vllm/model_executor/layers/quantization/marlin.py +261 -0
  541. vllm/model_executor/layers/quantization/modelopt.py +737 -0
  542. vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
  543. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  544. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  545. vllm/model_executor/layers/quantization/qqq.py +275 -0
  546. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  547. vllm/model_executor/layers/quantization/quark/quark.py +441 -0
  548. vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
  549. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  550. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  551. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  552. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
  553. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  554. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  555. vllm/model_executor/layers/quantization/schema.py +86 -0
  556. vllm/model_executor/layers/quantization/torchao.py +161 -0
  557. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  558. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  559. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  560. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
  764. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  765. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  766. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  767. vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
  768. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  769. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  770. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  771. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  772. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  773. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  774. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  775. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
  776. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  777. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  778. vllm/model_executor/layers/rejection_sampler.py +406 -0
  779. vllm/model_executor/layers/resampler.py +270 -0
  780. vllm/model_executor/layers/rotary_embedding.py +1862 -0
  781. vllm/model_executor/layers/sampler.py +1204 -0
  782. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  783. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  784. vllm/model_executor/layers/utils.py +95 -0
  785. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  786. vllm/model_executor/model_loader/__init__.py +76 -0
  787. vllm/model_executor/model_loader/base_loader.py +43 -0
  788. vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
  789. vllm/model_executor/model_loader/default_loader.py +282 -0
  790. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  791. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  792. vllm/model_executor/model_loader/neuron.py +476 -0
  793. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  794. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  795. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  796. vllm/model_executor/model_loader/tensorizer.py +600 -0
  797. vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
  798. vllm/model_executor/model_loader/tpu.py +112 -0
  799. vllm/model_executor/model_loader/utils.py +302 -0
  800. vllm/model_executor/model_loader/weight_utils.py +782 -0
  801. vllm/model_executor/models/__init__.py +28 -0
  802. vllm/model_executor/models/adapters.py +248 -0
  803. vllm/model_executor/models/aimv2.py +246 -0
  804. vllm/model_executor/models/arctic.py +559 -0
  805. vllm/model_executor/models/aria.py +657 -0
  806. vllm/model_executor/models/aya_vision.py +466 -0
  807. vllm/model_executor/models/baichuan.py +474 -0
  808. vllm/model_executor/models/bamba.py +543 -0
  809. vllm/model_executor/models/bart.py +938 -0
  810. vllm/model_executor/models/bert.py +523 -0
  811. vllm/model_executor/models/bert_with_rope.py +769 -0
  812. vllm/model_executor/models/blip.py +339 -0
  813. vllm/model_executor/models/blip2.py +718 -0
  814. vllm/model_executor/models/bloom.py +373 -0
  815. vllm/model_executor/models/chameleon.py +1136 -0
  816. vllm/model_executor/models/chatglm.py +478 -0
  817. vllm/model_executor/models/clip.py +407 -0
  818. vllm/model_executor/models/commandr.py +472 -0
  819. vllm/model_executor/models/constant_size_cache.py +137 -0
  820. vllm/model_executor/models/dbrx.py +472 -0
  821. vllm/model_executor/models/deepseek.py +486 -0
  822. vllm/model_executor/models/deepseek_mtp.py +269 -0
  823. vllm/model_executor/models/deepseek_v2.py +843 -0
  824. vllm/model_executor/models/deepseek_vl2.py +648 -0
  825. vllm/model_executor/models/eagle.py +260 -0
  826. vllm/model_executor/models/exaone.py +551 -0
  827. vllm/model_executor/models/fairseq2_llama.py +154 -0
  828. vllm/model_executor/models/falcon.py +510 -0
  829. vllm/model_executor/models/falcon_h1.py +685 -0
  830. vllm/model_executor/models/florence2.py +1103 -0
  831. vllm/model_executor/models/fuyu.py +389 -0
  832. vllm/model_executor/models/gemma.py +425 -0
  833. vllm/model_executor/models/gemma2.py +425 -0
  834. vllm/model_executor/models/gemma3.py +533 -0
  835. vllm/model_executor/models/gemma3_mm.py +709 -0
  836. vllm/model_executor/models/glm.py +23 -0
  837. vllm/model_executor/models/glm4.py +305 -0
  838. vllm/model_executor/models/glm4v.py +648 -0
  839. vllm/model_executor/models/gpt2.py +328 -0
  840. vllm/model_executor/models/gpt_bigcode.py +335 -0
  841. vllm/model_executor/models/gpt_j.py +339 -0
  842. vllm/model_executor/models/gpt_neox.py +332 -0
  843. vllm/model_executor/models/granite.py +493 -0
  844. vllm/model_executor/models/granite_speech.py +779 -0
  845. vllm/model_executor/models/granitemoe.py +437 -0
  846. vllm/model_executor/models/granitemoehybrid.py +586 -0
  847. vllm/model_executor/models/granitemoeshared.py +341 -0
  848. vllm/model_executor/models/gritlm.py +224 -0
  849. vllm/model_executor/models/grok1.py +546 -0
  850. vllm/model_executor/models/h2ovl.py +546 -0
  851. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  852. vllm/model_executor/models/idefics3.py +776 -0
  853. vllm/model_executor/models/interfaces.py +572 -0
  854. vllm/model_executor/models/interfaces_base.py +164 -0
  855. vllm/model_executor/models/intern_vit.py +480 -0
  856. vllm/model_executor/models/internlm2.py +455 -0
  857. vllm/model_executor/models/internlm2_ve.py +147 -0
  858. vllm/model_executor/models/internvl.py +1418 -0
  859. vllm/model_executor/models/jais.py +373 -0
  860. vllm/model_executor/models/jamba.py +592 -0
  861. vllm/model_executor/models/kimi_vl.py +577 -0
  862. vllm/model_executor/models/llama.py +644 -0
  863. vllm/model_executor/models/llama4.py +532 -0
  864. vllm/model_executor/models/llama_eagle.py +165 -0
  865. vllm/model_executor/models/llama_eagle3.py +263 -0
  866. vllm/model_executor/models/llava.py +866 -0
  867. vllm/model_executor/models/llava_next.py +586 -0
  868. vllm/model_executor/models/llava_next_video.py +471 -0
  869. vllm/model_executor/models/llava_onevision.py +956 -0
  870. vllm/model_executor/models/mamba.py +273 -0
  871. vllm/model_executor/models/mamba2.py +308 -0
  872. vllm/model_executor/models/mamba_cache.py +76 -0
  873. vllm/model_executor/models/medusa.py +219 -0
  874. vllm/model_executor/models/mimo.py +192 -0
  875. vllm/model_executor/models/mimo_mtp.py +285 -0
  876. vllm/model_executor/models/minicpm.py +592 -0
  877. vllm/model_executor/models/minicpm3.py +230 -0
  878. vllm/model_executor/models/minicpm_eagle.py +391 -0
  879. vllm/model_executor/models/minicpmo.py +759 -0
  880. vllm/model_executor/models/minicpmv.py +1287 -0
  881. vllm/model_executor/models/minimax_cache.py +36 -0
  882. vllm/model_executor/models/minimax_text_01.py +1301 -0
  883. vllm/model_executor/models/minimax_vl_01.py +364 -0
  884. vllm/model_executor/models/mistral3.py +604 -0
  885. vllm/model_executor/models/mixtral.py +488 -0
  886. vllm/model_executor/models/mixtral_quant.py +453 -0
  887. vllm/model_executor/models/mllama.py +1624 -0
  888. vllm/model_executor/models/mllama4.py +938 -0
  889. vllm/model_executor/models/mlp_speculator.py +206 -0
  890. vllm/model_executor/models/modernbert.py +331 -0
  891. vllm/model_executor/models/module_mapping.py +72 -0
  892. vllm/model_executor/models/molmo.py +1568 -0
  893. vllm/model_executor/models/moonvit.py +630 -0
  894. vllm/model_executor/models/mpt.py +331 -0
  895. vllm/model_executor/models/nemotron.py +508 -0
  896. vllm/model_executor/models/nemotron_h.py +573 -0
  897. vllm/model_executor/models/nemotron_nas.py +484 -0
  898. vllm/model_executor/models/nvlm_d.py +216 -0
  899. vllm/model_executor/models/olmo.py +389 -0
  900. vllm/model_executor/models/olmo2.py +414 -0
  901. vllm/model_executor/models/olmoe.py +468 -0
  902. vllm/model_executor/models/opt.py +412 -0
  903. vllm/model_executor/models/orion.py +349 -0
  904. vllm/model_executor/models/ovis.py +567 -0
  905. vllm/model_executor/models/paligemma.py +398 -0
  906. vllm/model_executor/models/persimmon.py +344 -0
  907. vllm/model_executor/models/phi.py +356 -0
  908. vllm/model_executor/models/phi3.py +19 -0
  909. vllm/model_executor/models/phi3_small.py +465 -0
  910. vllm/model_executor/models/phi3v.py +723 -0
  911. vllm/model_executor/models/phi4mm.py +1246 -0
  912. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  913. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  914. vllm/model_executor/models/phimoe.py +665 -0
  915. vllm/model_executor/models/pixtral.py +1316 -0
  916. vllm/model_executor/models/plamo2.py +738 -0
  917. vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
  918. vllm/model_executor/models/qwen.py +362 -0
  919. vllm/model_executor/models/qwen2.py +497 -0
  920. vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
  921. vllm/model_executor/models/qwen2_5_vl.py +1166 -0
  922. vllm/model_executor/models/qwen2_audio.py +410 -0
  923. vllm/model_executor/models/qwen2_moe.py +540 -0
  924. vllm/model_executor/models/qwen2_rm.py +132 -0
  925. vllm/model_executor/models/qwen2_vl.py +1405 -0
  926. vllm/model_executor/models/qwen3.py +321 -0
  927. vllm/model_executor/models/qwen3_moe.py +535 -0
  928. vllm/model_executor/models/qwen_vl.py +785 -0
  929. vllm/model_executor/models/registry.py +622 -0
  930. vllm/model_executor/models/roberta.py +276 -0
  931. vllm/model_executor/models/siglip.py +524 -0
  932. vllm/model_executor/models/skyworkr1v.py +951 -0
  933. vllm/model_executor/models/smolvlm.py +52 -0
  934. vllm/model_executor/models/solar.py +506 -0
  935. vllm/model_executor/models/stablelm.py +343 -0
  936. vllm/model_executor/models/starcoder2.py +356 -0
  937. vllm/model_executor/models/tarsier.py +643 -0
  938. vllm/model_executor/models/telechat2.py +140 -0
  939. vllm/model_executor/models/teleflm.py +79 -0
  940. vllm/model_executor/models/transformers.py +508 -0
  941. vllm/model_executor/models/ultravox.py +656 -0
  942. vllm/model_executor/models/utils.py +731 -0
  943. vllm/model_executor/models/vision.py +147 -0
  944. vllm/model_executor/models/whisper.py +747 -0
  945. vllm/model_executor/models/zamba2.py +1009 -0
  946. vllm/model_executor/parameter.py +459 -0
  947. vllm/model_executor/pooling_metadata.py +72 -0
  948. vllm/model_executor/sampling_metadata.py +597 -0
  949. vllm/model_executor/utils.py +77 -0
  950. vllm/multimodal/__init__.py +33 -0
  951. vllm/multimodal/audio.py +106 -0
  952. vllm/multimodal/base.py +219 -0
  953. vllm/multimodal/hasher.py +118 -0
  954. vllm/multimodal/image.py +97 -0
  955. vllm/multimodal/inputs.py +876 -0
  956. vllm/multimodal/parse.py +461 -0
  957. vllm/multimodal/processing.py +1895 -0
  958. vllm/multimodal/profiling.py +258 -0
  959. vllm/multimodal/registry.py +331 -0
  960. vllm/multimodal/utils.py +436 -0
  961. vllm/multimodal/video.py +198 -0
  962. vllm/outputs.py +512 -0
  963. vllm/platforms/__init__.py +291 -0
  964. vllm/platforms/cpu.py +266 -0
  965. vllm/platforms/cuda.py +526 -0
  966. vllm/platforms/hpu.py +106 -0
  967. vllm/platforms/interface.py +538 -0
  968. vllm/platforms/neuron.py +150 -0
  969. vllm/platforms/rocm.py +435 -0
  970. vllm/platforms/tpu.py +216 -0
  971. vllm/platforms/xpu.py +156 -0
  972. vllm/plugins/__init__.py +94 -0
  973. vllm/plugins/lora_resolvers/README.md +15 -0
  974. vllm/plugins/lora_resolvers/__init__.py +0 -0
  975. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  976. vllm/pooling_params.py +54 -0
  977. vllm/profiler/__init__.py +0 -0
  978. vllm/profiler/layerwise_profile.py +375 -0
  979. vllm/profiler/utils.py +148 -0
  980. vllm/prompt_adapter/__init__.py +0 -0
  981. vllm/prompt_adapter/layers.py +83 -0
  982. vllm/prompt_adapter/models.py +358 -0
  983. vllm/prompt_adapter/request.py +37 -0
  984. vllm/prompt_adapter/utils.py +98 -0
  985. vllm/prompt_adapter/worker_manager.py +179 -0
  986. vllm/py.typed +2 -0
  987. vllm/reasoning/__init__.py +15 -0
  988. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  989. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  990. vllm/reasoning/granite_reasoning_parser.py +363 -0
  991. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  992. vllm/sampling_params.py +602 -0
  993. vllm/scalar_type.py +347 -0
  994. vllm/scripts.py +15 -0
  995. vllm/sequence.py +1568 -0
  996. vllm/spec_decode/__init__.py +0 -0
  997. vllm/spec_decode/batch_expansion.py +506 -0
  998. vllm/spec_decode/draft_model_runner.py +349 -0
  999. vllm/spec_decode/interfaces.py +99 -0
  1000. vllm/spec_decode/medusa_worker.py +138 -0
  1001. vllm/spec_decode/metrics.py +213 -0
  1002. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1003. vllm/spec_decode/mqa_scorer.py +160 -0
  1004. vllm/spec_decode/multi_step_worker.py +423 -0
  1005. vllm/spec_decode/ngram_worker.py +196 -0
  1006. vllm/spec_decode/proposer_worker_base.py +59 -0
  1007. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1008. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1009. vllm/spec_decode/target_model_runner.py +45 -0
  1010. vllm/spec_decode/top1_proposer.py +275 -0
  1011. vllm/spec_decode/util.py +277 -0
  1012. vllm/test_utils.py +130 -0
  1013. vllm/third_party/__init__.py +0 -0
  1014. vllm/third_party/pynvml.py +6140 -0
  1015. vllm/tracing.py +131 -0
  1016. vllm/transformers_utils/__init__.py +24 -0
  1017. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1018. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1019. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1020. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1021. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1022. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1023. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1024. vllm/transformers_utils/config.py +887 -0
  1025. vllm/transformers_utils/configs/__init__.py +61 -0
  1026. vllm/transformers_utils/configs/arctic.py +207 -0
  1027. vllm/transformers_utils/configs/chatglm.py +72 -0
  1028. vllm/transformers_utils/configs/cohere2.py +195 -0
  1029. vllm/transformers_utils/configs/dbrx.py +280 -0
  1030. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1031. vllm/transformers_utils/configs/eagle.py +85 -0
  1032. vllm/transformers_utils/configs/exaone.py +190 -0
  1033. vllm/transformers_utils/configs/falcon.py +90 -0
  1034. vllm/transformers_utils/configs/h2ovl.py +16 -0
  1035. vllm/transformers_utils/configs/internvl.py +54 -0
  1036. vllm/transformers_utils/configs/jais.py +238 -0
  1037. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1038. vllm/transformers_utils/configs/medusa.py +63 -0
  1039. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1040. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1041. vllm/transformers_utils/configs/mllama.py +31 -0
  1042. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1043. vllm/transformers_utils/configs/moonvit.py +33 -0
  1044. vllm/transformers_utils/configs/mpt.py +180 -0
  1045. vllm/transformers_utils/configs/nemotron.py +205 -0
  1046. vllm/transformers_utils/configs/nemotron_h.py +258 -0
  1047. vllm/transformers_utils/configs/nvlm_d.py +15 -0
  1048. vllm/transformers_utils/configs/ovis.py +184 -0
  1049. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1050. vllm/transformers_utils/configs/solar.py +247 -0
  1051. vllm/transformers_utils/configs/telechat2.py +64 -0
  1052. vllm/transformers_utils/configs/ultravox.py +108 -0
  1053. vllm/transformers_utils/detokenizer.py +168 -0
  1054. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1055. vllm/transformers_utils/processor.py +221 -0
  1056. vllm/transformers_utils/processors/__init__.py +8 -0
  1057. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1058. vllm/transformers_utils/processors/ovis.py +420 -0
  1059. vllm/transformers_utils/s3_utils.py +162 -0
  1060. vllm/transformers_utils/tokenizer.py +302 -0
  1061. vllm/transformers_utils/tokenizer_base.py +149 -0
  1062. vllm/transformers_utils/tokenizer_group.py +120 -0
  1063. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1064. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1065. vllm/transformers_utils/utils.py +99 -0
  1066. vllm/triton_utils/__init__.py +14 -0
  1067. vllm/triton_utils/importing.py +50 -0
  1068. vllm/usage/__init__.py +0 -0
  1069. vllm/usage/usage_lib.py +256 -0
  1070. vllm/utils.py +2910 -0
  1071. vllm/v1/__init__.py +0 -0
  1072. vllm/v1/attention/__init__.py +0 -0
  1073. vllm/v1/attention/backends/__init__.py +0 -0
  1074. vllm/v1/attention/backends/cpu_attn.py +163 -0
  1075. vllm/v1/attention/backends/flash_attn.py +869 -0
  1076. vllm/v1/attention/backends/flashinfer.py +651 -0
  1077. vllm/v1/attention/backends/flex_attention.py +477 -0
  1078. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1079. vllm/v1/attention/backends/mla/common.py +931 -0
  1080. vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
  1081. vllm/v1/attention/backends/mla/flashmla.py +152 -0
  1082. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
  1083. vllm/v1/attention/backends/mla/triton_mla.py +120 -0
  1084. vllm/v1/attention/backends/pallas.py +240 -0
  1085. vllm/v1/attention/backends/triton_attn.py +285 -0
  1086. vllm/v1/attention/backends/utils.py +52 -0
  1087. vllm/v1/core/__init__.py +0 -0
  1088. vllm/v1/core/block_pool.py +349 -0
  1089. vllm/v1/core/encoder_cache_manager.py +150 -0
  1090. vllm/v1/core/kv_cache_coordinator.py +363 -0
  1091. vllm/v1/core/kv_cache_manager.py +392 -0
  1092. vllm/v1/core/kv_cache_utils.py +996 -0
  1093. vllm/v1/core/sched/__init__.py +0 -0
  1094. vllm/v1/core/sched/interface.py +150 -0
  1095. vllm/v1/core/sched/output.py +154 -0
  1096. vllm/v1/core/sched/scheduler.py +1044 -0
  1097. vllm/v1/core/sched/utils.py +23 -0
  1098. vllm/v1/core/single_type_kv_cache_manager.py +403 -0
  1099. vllm/v1/engine/__init__.py +173 -0
  1100. vllm/v1/engine/async_llm.py +558 -0
  1101. vllm/v1/engine/coordinator.py +253 -0
  1102. vllm/v1/engine/core.py +961 -0
  1103. vllm/v1/engine/core_client.py +1129 -0
  1104. vllm/v1/engine/detokenizer.py +261 -0
  1105. vllm/v1/engine/exceptions.py +17 -0
  1106. vllm/v1/engine/llm_engine.py +317 -0
  1107. vllm/v1/engine/logprobs.py +199 -0
  1108. vllm/v1/engine/mm_input_cache.py +91 -0
  1109. vllm/v1/engine/output_processor.py +428 -0
  1110. vllm/v1/engine/parallel_sampling.py +133 -0
  1111. vllm/v1/engine/processor.py +407 -0
  1112. vllm/v1/executor/__init__.py +0 -0
  1113. vllm/v1/executor/abstract.py +113 -0
  1114. vllm/v1/executor/multiproc_executor.py +537 -0
  1115. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1116. vllm/v1/kv_cache_interface.py +194 -0
  1117. vllm/v1/metrics/__init__.py +0 -0
  1118. vllm/v1/metrics/loggers.py +523 -0
  1119. vllm/v1/metrics/prometheus.py +82 -0
  1120. vllm/v1/metrics/ray_wrappers.py +131 -0
  1121. vllm/v1/metrics/reader.py +246 -0
  1122. vllm/v1/metrics/stats.py +239 -0
  1123. vllm/v1/outputs.py +116 -0
  1124. vllm/v1/request.py +193 -0
  1125. vllm/v1/sample/__init__.py +0 -0
  1126. vllm/v1/sample/metadata.py +44 -0
  1127. vllm/v1/sample/ops/__init__.py +0 -0
  1128. vllm/v1/sample/ops/bad_words.py +39 -0
  1129. vllm/v1/sample/ops/penalties.py +59 -0
  1130. vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
  1131. vllm/v1/sample/rejection_sampler.py +631 -0
  1132. vllm/v1/sample/sampler.py +286 -0
  1133. vllm/v1/sample/tpu/__init__.py +0 -0
  1134. vllm/v1/sample/tpu/metadata.py +124 -0
  1135. vllm/v1/sample/tpu/sampler.py +145 -0
  1136. vllm/v1/serial_utils.py +315 -0
  1137. vllm/v1/spec_decode/__init__.py +0 -0
  1138. vllm/v1/spec_decode/eagle.py +432 -0
  1139. vllm/v1/spec_decode/medusa.py +62 -0
  1140. vllm/v1/spec_decode/metadata.py +62 -0
  1141. vllm/v1/spec_decode/metrics.py +178 -0
  1142. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1143. vllm/v1/spec_decode/utils.py +46 -0
  1144. vllm/v1/structured_output/__init__.py +222 -0
  1145. vllm/v1/structured_output/backend_guidance.py +245 -0
  1146. vllm/v1/structured_output/backend_types.py +134 -0
  1147. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1148. vllm/v1/structured_output/request.py +86 -0
  1149. vllm/v1/structured_output/utils.py +175 -0
  1150. vllm/v1/utils.py +743 -0
  1151. vllm/v1/worker/__init__.py +0 -0
  1152. vllm/v1/worker/block_table.py +142 -0
  1153. vllm/v1/worker/cpu_model_runner.py +86 -0
  1154. vllm/v1/worker/cpu_worker.py +152 -0
  1155. vllm/v1/worker/gpu_input_batch.py +681 -0
  1156. vllm/v1/worker/gpu_model_runner.py +2320 -0
  1157. vllm/v1/worker/gpu_worker.py +393 -0
  1158. vllm/v1/worker/lora_model_runner_mixin.py +173 -0
  1159. vllm/v1/worker/tpu_model_runner.py +1673 -0
  1160. vllm/v1/worker/tpu_worker.py +299 -0
  1161. vllm/v1/worker/utils.py +111 -0
  1162. vllm/v1/worker/worker_base.py +65 -0
  1163. vllm/version.py +41 -0
  1164. vllm/vllm_flash_attn/.gitkeep +0 -0
  1165. vllm/worker/__init__.py +0 -0
  1166. vllm/worker/cache_engine.py +145 -0
  1167. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1168. vllm/worker/cpu_model_runner.py +671 -0
  1169. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1170. vllm/worker/cpu_worker.py +450 -0
  1171. vllm/worker/enc_dec_model_runner.py +555 -0
  1172. vllm/worker/hpu_model_runner.py +2320 -0
  1173. vllm/worker/hpu_worker.py +484 -0
  1174. vllm/worker/model_runner.py +2178 -0
  1175. vllm/worker/model_runner_base.py +282 -0
  1176. vllm/worker/multi_step_hpu_worker.py +123 -0
  1177. vllm/worker/multi_step_model_runner.py +911 -0
  1178. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1179. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1180. vllm/worker/multi_step_tpu_worker.py +108 -0
  1181. vllm/worker/multi_step_worker.py +197 -0
  1182. vllm/worker/neuron_model_runner.py +460 -0
  1183. vllm/worker/neuron_worker.py +193 -0
  1184. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1185. vllm/worker/pooling_model_runner.py +211 -0
  1186. vllm/worker/tpu_model_runner.py +909 -0
  1187. vllm/worker/tpu_worker.py +337 -0
  1188. vllm/worker/utils.py +53 -0
  1189. vllm/worker/worker.py +577 -0
  1190. vllm/worker/worker_base.py +646 -0
  1191. vllm/worker/xpu_model_runner.py +606 -0
  1192. vllm/worker/xpu_worker.py +186 -0
  1193. vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
  1194. vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
  1195. vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
  1196. vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
  1197. vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1544 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import itertools
5
+ import warnings
6
+ from collections.abc import Sequence
7
+ from contextlib import contextmanager
8
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Optional, Union,
9
+ cast, overload)
10
+
11
+ import cloudpickle
12
+ import torch.nn as nn
13
+ from tqdm.auto import tqdm
14
+ from typing_extensions import TypeVar, deprecated
15
+
16
+ from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
17
+ BeamSearchSequence, get_beam_search_score)
18
+ from vllm.config import (CompilationConfig, ModelDType, TokenizerMode,
19
+ is_init_field)
20
+ from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig,
21
+ TaskOption)
22
+ from vllm.engine.llm_engine import LLMEngine
23
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
24
+ ChatTemplateContentFormatOption,
25
+ apply_hf_chat_template,
26
+ apply_mistral_chat_template,
27
+ parse_chat_messages,
28
+ resolve_chat_template_content_format)
29
+ from vllm.entrypoints.score_utils import (_cosine_similarity,
30
+ _validate_score_input_lens)
31
+ from vllm.entrypoints.utils import _validate_truncation_size
32
+ from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt
33
+ from vllm.inputs.parse import parse_and_batch_prompt
34
+ from vllm.logger import init_logger
35
+ from vllm.lora.request import LoRARequest
36
+ from vllm.model_executor.guided_decoding.guided_fields import (
37
+ GuidedDecodingRequest, LLMGuidedOptions)
38
+ from vllm.model_executor.layers.quantization import QuantizationMethods
39
+ from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
40
+ PoolingRequestOutput, RequestOutput,
41
+ ScoringRequestOutput)
42
+ from vllm.pooling_params import PoolingParams
43
+ from vllm.prompt_adapter.request import PromptAdapterRequest
44
+ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
45
+ RequestOutputKind, SamplingParams)
46
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
47
+ get_cached_tokenizer)
48
+ from vllm.usage.usage_lib import UsageContext
49
+ from vllm.utils import Counter, Device, deprecate_kwargs, is_list_of
50
+
51
+ if TYPE_CHECKING:
52
+ from vllm.v1.metrics.reader import Metric
53
+
54
+ logger = init_logger(__name__)
55
+
56
+ _R = TypeVar("_R", default=Any)
57
+
58
+
59
+ class LLM:
60
+ """An LLM for generating texts from given prompts and sampling parameters.
61
+
62
+ This class includes a tokenizer, a language model (possibly distributed
63
+ across multiple GPUs), and GPU memory space allocated for intermediate
64
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
65
+ this class generates texts from the model, using an intelligent batching
66
+ mechanism and efficient memory management.
67
+
68
+ Args:
69
+ model: The name or path of a HuggingFace Transformers model.
70
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
71
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
72
+ if available, and "slow" will always use the slow tokenizer.
73
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
74
+ detokenizer. Expect valid prompt_token_ids and None for prompt
75
+ from the input.
76
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
77
+ downloading the model and tokenizer.
78
+ allowed_local_media_path: Allowing API requests to read local images
79
+ or videos from directories specified by the server file system.
80
+ This is a security risk. Should only be enabled in trusted
81
+ environments.
82
+ tensor_parallel_size: The number of GPUs to use for distributed
83
+ execution with tensor parallelism.
84
+ dtype: The data type for the model weights and activations. Currently,
85
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
86
+ the `torch_dtype` attribute specified in the model config file.
87
+ However, if the `torch_dtype` in the config is `float32`, we will
88
+ use `float16` instead.
89
+ quantization: The method used to quantize the model weights. Currently,
90
+ we support "awq", "gptq", and "fp8" (experimental).
91
+ If None, we first check the `quantization_config` attribute in the
92
+ model config file. If that is None, we assume the model weights are
93
+ not quantized and use `dtype` to determine the data type of
94
+ the weights.
95
+ revision: The specific model version to use. It can be a branch name,
96
+ a tag name, or a commit id.
97
+ tokenizer_revision: The specific tokenizer version to use. It can be a
98
+ branch name, a tag name, or a commit id.
99
+ seed: The seed to initialize the random number generator for sampling.
100
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
101
+ reserve for the model weights, activations, and KV cache. Higher
102
+ values will increase the KV cache size and thus improve the model's
103
+ throughput. However, if the value is too high, it may cause out-of-
104
+ memory (OOM) errors.
105
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
106
+ This can be used for temporarily storing the states of the requests
107
+ when their `best_of` sampling parameters are larger than 1. If all
108
+ requests will have `best_of=1`, you can safely set this to 0.
109
+ Noting that `best_of` is only supported in V0. Otherwise, too small
110
+ values may cause out-of-memory (OOM) errors.
111
+ cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
112
+ the model weights. This virtually increases the GPU memory space
113
+ you can use to hold the model weights, at the cost of CPU-GPU data
114
+ transfer for every forward pass.
115
+ enforce_eager: Whether to enforce eager execution. If True, we will
116
+ disable CUDA graph and always execute the model in eager mode.
117
+ If False, we will use CUDA graph and eager execution in hybrid.
118
+ max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
119
+ When a sequence has context length larger than this, we fall back
120
+ to eager mode. Additionally for encoder-decoder models, if the
121
+ sequence length of the encoder input is larger than this, we fall
122
+ back to the eager mode.
123
+ disable_custom_all_reduce: See
124
+ [ParallelConfig][vllm.config.ParallelConfig].
125
+ disable_async_output_proc: Disable async output processing.
126
+ This may result in lower performance.
127
+ hf_token: The token to use as HTTP bearer authorization for remote files
128
+ . If `True`, will use the token generated when running
129
+ `huggingface-cli login` (stored in `~/.huggingface`).
130
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
131
+ HuggingFace config. If a callable, it is called to update the
132
+ HuggingFace config.
133
+ compilation_config: Either an integer or a dictionary. If it is an
134
+ integer, it is used as the level of compilation optimization. If it
135
+ is a dictionary, it can specify the full compilation configuration.
136
+ **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].
137
+
138
+ Note:
139
+ This class is intended to be used for offline inference. For online
140
+ serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
141
+ """
142
+
143
+ DEPRECATE_LEGACY: ClassVar[bool] = True
144
+ """A flag to toggle whether to deprecate the legacy generate/encode API."""
145
+
146
+ @classmethod
147
+ @contextmanager
148
+ def deprecate_legacy_api(cls):
149
+ cls.DEPRECATE_LEGACY = True
150
+
151
+ yield
152
+
153
+ cls.DEPRECATE_LEGACY = False
154
+
155
+ def __init__(
156
+ self,
157
+ model: str,
158
+ *,
159
+ task: TaskOption = "auto",
160
+ tokenizer: Optional[str] = None,
161
+ tokenizer_mode: TokenizerMode = "auto",
162
+ skip_tokenizer_init: bool = False,
163
+ trust_remote_code: bool = False,
164
+ allowed_local_media_path: str = "",
165
+ tensor_parallel_size: int = 1,
166
+ dtype: ModelDType = "auto",
167
+ quantization: Optional[QuantizationMethods] = None,
168
+ revision: Optional[str] = None,
169
+ tokenizer_revision: Optional[str] = None,
170
+ seed: Optional[int] = None,
171
+ gpu_memory_utilization: float = 0.9,
172
+ swap_space: float = 4,
173
+ cpu_offload_gb: float = 0,
174
+ enforce_eager: bool = False,
175
+ max_seq_len_to_capture: int = 8192,
176
+ disable_custom_all_reduce: bool = False,
177
+ disable_async_output_proc: bool = False,
178
+ hf_token: Optional[Union[bool, str]] = None,
179
+ hf_overrides: Optional[HfOverrides] = None,
180
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
181
+ override_pooler_config: Optional[PoolerConfig] = None,
182
+ compilation_config: Optional[Union[int, dict[str, Any]]] = None,
183
+ **kwargs,
184
+ ) -> None:
185
+ """LLM constructor."""
186
+
187
+ if "disable_log_stats" not in kwargs:
188
+ kwargs["disable_log_stats"] = True
189
+
190
+ if "worker_cls" in kwargs:
191
+ worker_cls = kwargs["worker_cls"]
192
+ # if the worker_cls is not qualified string name,
193
+ # we serialize it using cloudpickle to avoid pickling issues
194
+ if isinstance(worker_cls, type):
195
+ kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
196
+
197
+ if hf_overrides is None:
198
+ hf_overrides = {}
199
+
200
+ if compilation_config is not None:
201
+ if isinstance(compilation_config, int):
202
+ compilation_config_instance = CompilationConfig(
203
+ level=compilation_config)
204
+ elif isinstance(compilation_config, dict):
205
+ predicate = lambda x: is_init_field(CompilationConfig, x[0])
206
+ compilation_config_instance = CompilationConfig(
207
+ **dict(filter(predicate, compilation_config.items())))
208
+ else:
209
+ compilation_config_instance = compilation_config
210
+ else:
211
+ compilation_config_instance = CompilationConfig()
212
+
213
+ engine_args = EngineArgs(
214
+ model=model,
215
+ task=task,
216
+ tokenizer=tokenizer,
217
+ tokenizer_mode=tokenizer_mode,
218
+ skip_tokenizer_init=skip_tokenizer_init,
219
+ trust_remote_code=trust_remote_code,
220
+ allowed_local_media_path=allowed_local_media_path,
221
+ tensor_parallel_size=tensor_parallel_size,
222
+ dtype=dtype,
223
+ quantization=quantization,
224
+ revision=revision,
225
+ tokenizer_revision=tokenizer_revision,
226
+ seed=seed,
227
+ gpu_memory_utilization=gpu_memory_utilization,
228
+ swap_space=swap_space,
229
+ cpu_offload_gb=cpu_offload_gb,
230
+ enforce_eager=enforce_eager,
231
+ max_seq_len_to_capture=max_seq_len_to_capture,
232
+ disable_custom_all_reduce=disable_custom_all_reduce,
233
+ disable_async_output_proc=disable_async_output_proc,
234
+ hf_token=hf_token,
235
+ hf_overrides=hf_overrides,
236
+ mm_processor_kwargs=mm_processor_kwargs,
237
+ override_pooler_config=override_pooler_config,
238
+ compilation_config=compilation_config_instance,
239
+ **kwargs,
240
+ )
241
+
242
+ # Create the Engine (autoselects V0 vs V1)
243
+ self.llm_engine = LLMEngine.from_engine_args(
244
+ engine_args=engine_args, usage_context=UsageContext.LLM_CLASS)
245
+ self.engine_class = type(self.llm_engine)
246
+
247
+ self.request_counter = Counter()
248
+ self.default_sampling_params: Union[dict[str, Any], None] = None
249
+
250
+ def get_tokenizer(
251
+ self,
252
+ lora_request: Optional[LoRARequest] = None,
253
+ ) -> AnyTokenizer:
254
+ return self.llm_engine.get_tokenizer_group().get_lora_tokenizer(
255
+ lora_request)
256
+
257
+ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
258
+ tokenizer_group = self.llm_engine.get_tokenizer_group()
259
+
260
+ # While CachedTokenizer is dynamic, have no choice but
261
+ # compare class name. Misjudgment will arise from
262
+ # user-defined tokenizer started with 'Cached'
263
+ if tokenizer.__class__.__name__.startswith("Cached"):
264
+ tokenizer_group.tokenizer = tokenizer
265
+ else:
266
+ tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
267
+
268
+ def get_default_sampling_params(self) -> SamplingParams:
269
+ if self.default_sampling_params is None:
270
+ self.default_sampling_params = (
271
+ self.llm_engine.model_config.get_diff_sampling_param())
272
+ if self.default_sampling_params:
273
+ return SamplingParams.from_optional(**self.default_sampling_params)
274
+ return SamplingParams()
275
+
276
+ @overload
277
+ def generate(
278
+ self,
279
+ prompts: Union[PromptType, Sequence[PromptType]],
280
+ /,
281
+ sampling_params: Optional[Union[SamplingParams,
282
+ Sequence[SamplingParams]]] = None,
283
+ *,
284
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
285
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
286
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
287
+ guided_options_request: Optional[Union[LLMGuidedOptions,
288
+ GuidedDecodingRequest]] = None,
289
+ ) -> list[RequestOutput]:
290
+ ...
291
+
292
+ @overload # LEGACY: single (prompt + optional token ids)
293
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
294
+ def generate(
295
+ self,
296
+ prompts: str,
297
+ sampling_params: Optional[Union[SamplingParams,
298
+ list[SamplingParams]]] = None,
299
+ prompt_token_ids: Optional[list[int]] = None,
300
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
301
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
302
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
303
+ guided_options_request: Optional[Union[LLMGuidedOptions,
304
+ GuidedDecodingRequest]] = None,
305
+ ) -> list[RequestOutput]:
306
+ ...
307
+
308
+ @overload # LEGACY: multi (prompt + optional token ids)
309
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
310
+ def generate(
311
+ self,
312
+ prompts: list[str],
313
+ sampling_params: Optional[Union[SamplingParams,
314
+ list[SamplingParams]]] = None,
315
+ prompt_token_ids: Optional[list[list[int]]] = None,
316
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
317
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
318
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
319
+ guided_options_request: Optional[Union[LLMGuidedOptions,
320
+ GuidedDecodingRequest]] = None,
321
+ ) -> list[RequestOutput]:
322
+ ...
323
+
324
+ @overload # LEGACY: single (token ids + optional prompt)
325
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
326
+ def generate(
327
+ self,
328
+ prompts: Optional[str] = None,
329
+ sampling_params: Optional[Union[SamplingParams,
330
+ list[SamplingParams]]] = None,
331
+ *,
332
+ prompt_token_ids: list[int],
333
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
334
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
335
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
336
+ guided_options_request: Optional[Union[LLMGuidedOptions,
337
+ GuidedDecodingRequest]] = None,
338
+ ) -> list[RequestOutput]:
339
+ ...
340
+
341
+ @overload # LEGACY: multi (token ids + optional prompt)
342
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
343
+ def generate(
344
+ self,
345
+ prompts: Optional[list[str]] = None,
346
+ sampling_params: Optional[Union[SamplingParams,
347
+ list[SamplingParams]]] = None,
348
+ *,
349
+ prompt_token_ids: list[list[int]],
350
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
351
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
352
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
353
+ guided_options_request: Optional[Union[LLMGuidedOptions,
354
+ GuidedDecodingRequest]] = None,
355
+ ) -> list[RequestOutput]:
356
+ ...
357
+
358
+ @overload # LEGACY: single or multi token ids [pos-only]
359
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
360
+ def generate(
361
+ self,
362
+ prompts: None,
363
+ sampling_params: None,
364
+ prompt_token_ids: Union[list[int], list[list[int]]],
365
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
366
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
367
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
368
+ guided_options_request: Optional[Union[LLMGuidedOptions,
369
+ GuidedDecodingRequest]] = None,
370
+ ) -> list[RequestOutput]:
371
+ ...
372
+
373
+ @deprecate_kwargs(
374
+ "prompt_token_ids",
375
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
376
+ additional_message="Please use the 'prompts' parameter instead.",
377
+ )
378
+ def generate(
379
+ self,
380
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
381
+ Optional[Union[str, list[str]]]] = None,
382
+ sampling_params: Optional[Union[SamplingParams,
383
+ Sequence[SamplingParams]]] = None,
384
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
385
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
386
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
387
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
388
+ guided_options_request: Optional[Union[LLMGuidedOptions,
389
+ GuidedDecodingRequest]] = None,
390
+ priority: Optional[list[int]] = None,
391
+ ) -> list[RequestOutput]:
392
+ """Generates the completions for the input prompts.
393
+
394
+ This class automatically batches the given prompts, considering
395
+ the memory constraint. For the best performance, put all of your prompts
396
+ into a single list and pass it to this method.
397
+
398
+ Args:
399
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
400
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
401
+ for more details about the format of each prompts.
402
+ sampling_params: The sampling parameters for text generation. If
403
+ None, we use the default sampling parameters.
404
+ When it is a single value, it is applied to every prompt.
405
+ When it is a list, the list must have the same length as the
406
+ prompts and it is paired one by one with the prompt.
407
+ use_tqdm: If `True`, shows a tqdm progress bar.
408
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
409
+ it is used to create the progress bar.
410
+ If `False`, no progress bar is created.
411
+ lora_request: LoRA request to use for generation, if any.
412
+ prompt_adapter_request: Prompt Adapter request to use for
413
+ generation, if any.
414
+ priority: The priority of the requests, if any.
415
+ Only applicable when priority scheduling policy is enabled.
416
+
417
+ Returns:
418
+ A list of `RequestOutput` objects containing the
419
+ generated completions in the same order as the input prompts.
420
+
421
+ Note:
422
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
423
+ considered legacy and may be deprecated in the future. You should
424
+ instead pass them via the `inputs` parameter.
425
+ """
426
+ runner_type = self.llm_engine.model_config.runner_type
427
+ if runner_type not in ["generate", "transcription"]:
428
+ messages = [
429
+ "LLM.generate() is only supported for (conditional) generation "
430
+ "models (XForCausalLM, XForConditionalGeneration).",
431
+ ]
432
+
433
+ supported_runner_types = self.llm_engine.model_config \
434
+ .supported_runner_types
435
+ if "generate" in supported_runner_types:
436
+ messages.append(
437
+ "Your model supports the 'generate' runner, but is "
438
+ f"currently initialized for the '{runner_type}' runner. "
439
+ "Please initialize vLLM using `--task generate`.")
440
+
441
+ raise ValueError(" ".join(messages))
442
+
443
+ if prompt_token_ids is not None:
444
+ parsed_prompts = self._convert_v1_inputs(
445
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
446
+ prompt_token_ids=prompt_token_ids,
447
+ )
448
+ else:
449
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
450
+ prompts)
451
+
452
+ if isinstance(guided_options_request, dict):
453
+ if len(guided_options_request) > 1:
454
+ raise ValueError(
455
+ "You can only use one guided decoding but multiple is "
456
+ f"specified: {guided_options_request}")
457
+ guided_options_request = GuidedDecodingRequest(
458
+ **guided_options_request)
459
+
460
+ if sampling_params is None:
461
+ # Use default sampling params.
462
+ sampling_params = self.get_default_sampling_params()
463
+
464
+ self._validate_and_add_requests(
465
+ prompts=parsed_prompts,
466
+ params=sampling_params,
467
+ use_tqdm=use_tqdm,
468
+ lora_request=lora_request,
469
+ prompt_adapter_request=prompt_adapter_request,
470
+ guided_options=guided_options_request,
471
+ priority=priority,
472
+ )
473
+
474
+ outputs = self._run_engine(use_tqdm=use_tqdm)
475
+ return self.engine_class.validate_outputs(outputs, RequestOutput)
476
+
477
+ def collective_rpc(self,
478
+ method: Union[str, Callable[..., _R]],
479
+ timeout: Optional[float] = None,
480
+ args: tuple = (),
481
+ kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
482
+ """
483
+ Execute an RPC call on all workers.
484
+
485
+ Args:
486
+ method: Name of the worker method to execute, or a callable that
487
+ is serialized and sent to all workers to execute.
488
+
489
+ If the method is a callable, it should accept an additional
490
+ `self` argument, in addition to the arguments passed in `args`
491
+ and `kwargs`. The `self` argument will be the worker object.
492
+ timeout: Maximum time in seconds to wait for execution. Raises a
493
+ [`TimeoutError`][] on timeout. `None` means wait indefinitely.
494
+ args: Positional arguments to pass to the worker method.
495
+ kwargs: Keyword arguments to pass to the worker method.
496
+
497
+ Returns:
498
+ A list containing the results from each worker.
499
+
500
+ Note:
501
+ It is recommended to use this API to only pass control messages,
502
+ and set up data-plane communication to pass data.
503
+ """
504
+
505
+ return self.llm_engine.collective_rpc(method, timeout, args, kwargs)
506
+
507
+ def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
508
+ """
509
+ Run a function directly on the model inside each worker,
510
+ returning the result for each of them.
511
+ """
512
+ executor = self.llm_engine.model_executor
513
+ return executor.apply_model(func)
514
+
515
+ def _get_beam_search_lora_requests(
516
+ self,
517
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]],
518
+ prompts: list[Union[TokensPrompt, TextPrompt]],
519
+ ) -> list[Optional[LoRARequest]]:
520
+ """Get the optional lora request corresponding to each prompt."""
521
+ if isinstance(lora_request,
522
+ Sequence) and len(lora_request) != len(prompts):
523
+ raise ValueError(
524
+ "Lora request list should be the same length as the prompts")
525
+
526
+ if lora_request is None or isinstance(lora_request, LoRARequest):
527
+ return [lora_request] * len(prompts)
528
+
529
+ raise TypeError(f"Invalid lora_request type {type(lora_request)}")
530
+
531
+ def beam_search(
532
+ self,
533
+ prompts: list[Union[TokensPrompt, TextPrompt]],
534
+ params: BeamSearchParams,
535
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
536
+ ) -> list[BeamSearchOutput]:
537
+ """
538
+ Generate sequences using beam search.
539
+
540
+ Args:
541
+ prompts: A list of prompts. Each prompt can be a string or a list
542
+ of token IDs.
543
+ params: The beam search parameters.
544
+ lora_request: LoRA request to use for generation, if any.
545
+ """
546
+ # TODO: how does beam search work together with length penalty,
547
+ # frequency, penalty, and stopping criteria, etc.?
548
+ beam_width = params.beam_width
549
+ max_tokens = params.max_tokens
550
+ temperature = params.temperature
551
+ ignore_eos = params.ignore_eos
552
+ length_penalty = params.length_penalty
553
+
554
+ lora_requests = self._get_beam_search_lora_requests(
555
+ lora_request, prompts)
556
+
557
+ def sort_beams_key(x: BeamSearchSequence) -> float:
558
+ return get_beam_search_score(x.tokens, x.cum_logprob,
559
+ tokenizer.eos_token_id,
560
+ length_penalty)
561
+
562
+ def create_tokens_prompt_from_beam(
563
+ beam: BeamSearchSequence) -> TokensPrompt:
564
+ token_prompt_kwargs: TokensPrompt = {
565
+ "prompt_token_ids": beam.tokens
566
+ }
567
+ if beam.multi_modal_data is not None:
568
+ token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data
569
+
570
+ if beam.mm_processor_kwargs is not None:
571
+ token_prompt_kwargs[
572
+ "mm_processor_kwargs"] = beam.mm_processor_kwargs
573
+ return TokensPrompt(**token_prompt_kwargs)
574
+
575
+ tokenizer = self.get_tokenizer()
576
+ # generate 2 * beam_width candidates at each step
577
+ # following the huggingface transformers implementation
578
+ # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
579
+ beam_search_params = SamplingParams(logprobs=2 * beam_width,
580
+ max_tokens=1,
581
+ temperature=temperature)
582
+ instances: list[BeamSearchInstance] = []
583
+
584
+ for lora_req, prompt in zip(lora_requests, prompts):
585
+ # Add multimodal processor kwargs & data
586
+ mm_kwargs = {}
587
+ if "multi_modal_data" in prompt:
588
+ mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"]
589
+ if "mm_processor_kwargs" in prompt:
590
+ mm_kwargs["mm_processor_kwargs"] = prompt[
591
+ "mm_processor_kwargs"]
592
+
593
+ if "prompt_token_ids" in prompt:
594
+ prompt = cast(TokensPrompt, prompt) # Needed for mypy
595
+ prompt_tokens = prompt["prompt_token_ids"]
596
+ else:
597
+ prompt_tokens = tokenizer.encode(prompt["prompt"])
598
+
599
+ instances.append(
600
+ BeamSearchInstance(
601
+ prompt_tokens,
602
+ lora_request=lora_req,
603
+ logprobs=None,
604
+ **mm_kwargs,
605
+ ), )
606
+
607
+ for _ in range(max_tokens):
608
+ all_beams: list[BeamSearchSequence] = list(
609
+ sum((instance.beams for instance in instances), []))
610
+ pos = [0] + list(
611
+ itertools.accumulate(
612
+ len(instance.beams) for instance in instances))
613
+ instance_start_and_end: list[tuple[int, int]] = list(
614
+ zip(pos[:-1], pos[1:]))
615
+
616
+ if len(all_beams) == 0:
617
+ break
618
+
619
+ # create the corresponding batch entries for prompt & optional lora
620
+ prompts_batch, lora_req_batch = zip(
621
+ *[(create_tokens_prompt_from_beam(beam), beam.lora_request)
622
+ for beam in all_beams])
623
+
624
+ # only runs for one step
625
+ # we don't need to use tqdm here
626
+ output = self.generate(prompts_batch,
627
+ sampling_params=beam_search_params,
628
+ use_tqdm=False,
629
+ lora_request=lora_req_batch)
630
+
631
+ for (start, end), instance in zip(instance_start_and_end,
632
+ instances):
633
+ instance_new_beams = []
634
+ for i in range(start, end):
635
+ current_beam = all_beams[i]
636
+ result = output[i]
637
+
638
+ if result.outputs[0].logprobs is not None:
639
+ # if `result.outputs[0].logprobs` is None, it means
640
+ # the sequence is completed because of the max-model-len
641
+ # or abortion. we don't need to add it to the new beams.
642
+ logprobs = result.outputs[0].logprobs[0]
643
+ for token_id, logprob_obj in logprobs.items():
644
+ new_beam = BeamSearchSequence(
645
+ tokens=current_beam.tokens + [token_id],
646
+ logprobs=current_beam.logprobs + [logprobs],
647
+ lora_request=current_beam.lora_request,
648
+ cum_logprob=current_beam.cum_logprob +
649
+ logprob_obj.logprob,
650
+ multi_modal_data=current_beam.multi_modal_data,
651
+ mm_processor_kwargs=current_beam.
652
+ mm_processor_kwargs)
653
+
654
+ if token_id == tokenizer.eos_token_id and \
655
+ not ignore_eos:
656
+ instance.completed.append(new_beam)
657
+ else:
658
+ instance_new_beams.append(new_beam)
659
+ sorted_beams = sorted(instance_new_beams,
660
+ key=sort_beams_key,
661
+ reverse=True)
662
+ instance.beams = sorted_beams[:beam_width]
663
+
664
+ outputs = []
665
+ for instance in instances:
666
+ instance.completed.extend(instance.beams)
667
+ sorted_completed = sorted(instance.completed,
668
+ key=sort_beams_key,
669
+ reverse=True)
670
+ best_beams = sorted_completed[:beam_width]
671
+
672
+ for beam in best_beams:
673
+ beam.text = tokenizer.decode(beam.tokens)
674
+ outputs.append(BeamSearchOutput(sequences=best_beams))
675
+
676
+ return outputs
677
+
678
+ def chat(
679
+ self,
680
+ messages: Union[list[ChatCompletionMessageParam],
681
+ list[list[ChatCompletionMessageParam]]],
682
+ sampling_params: Optional[Union[SamplingParams,
683
+ list[SamplingParams]]] = None,
684
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
685
+ lora_request: Optional[LoRARequest] = None,
686
+ chat_template: Optional[str] = None,
687
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
688
+ add_generation_prompt: bool = True,
689
+ continue_final_message: bool = False,
690
+ tools: Optional[list[dict[str, Any]]] = None,
691
+ chat_template_kwargs: Optional[dict[str, Any]] = None,
692
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
693
+ ) -> list[RequestOutput]:
694
+ """
695
+ Generate responses for a chat conversation.
696
+
697
+ The chat conversation is converted into a text prompt using the
698
+ tokenizer and calls the [generate][] method to generate the
699
+ responses.
700
+
701
+ Multi-modal inputs can be passed in the same way you would pass them
702
+ to the OpenAI API.
703
+
704
+ Args:
705
+ messages: A list of conversations or a single conversation.
706
+
707
+ - Each conversation is represented as a list of messages.
708
+ - Each message is a dictionary with 'role' and 'content' keys.
709
+
710
+ sampling_params: The sampling parameters for text generation.
711
+ If None, we use the default sampling parameters. When it
712
+ is a single value, it is applied to every prompt. When it
713
+ is a list, the list must have the same length as the
714
+ prompts and it is paired one by one with the prompt.
715
+ use_tqdm: If `True`, shows a tqdm progress bar.
716
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
717
+ it is used to create the progress bar.
718
+ If `False`, no progress bar is created.
719
+ lora_request: LoRA request to use for generation, if any.
720
+ chat_template: The template to use for structuring the chat.
721
+ If not provided, the model's default chat template will be used.
722
+ chat_template_content_format: The format to render message content.
723
+
724
+ - "string" will render the content as a string.
725
+ Example: `"Who are you?"`
726
+ - "openai" will render the content as a list of dictionaries,
727
+ similar to OpenAI schema.
728
+ Example: `[{"type": "text", "text": "Who are you?"}]`
729
+
730
+ add_generation_prompt: If True, adds a generation template
731
+ to each message.
732
+ continue_final_message: If True, continues the final message in
733
+ the conversation instead of starting a new one. Cannot be
734
+ `True` if `add_generation_prompt` is also `True`.
735
+ chat_template_kwargs: Additional kwargs to pass to the chat
736
+ template.
737
+ mm_processor_kwargs: Multimodal processor kwarg overrides for this
738
+ chat request. Only used for offline requests.
739
+
740
+ Returns:
741
+ A list of `RequestOutput` objects containing the generated
742
+ responses in the same order as the input messages.
743
+ """
744
+ list_of_messages: list[list[ChatCompletionMessageParam]]
745
+
746
+ # Handle multi and single conversations
747
+ if is_list_of(messages, list):
748
+ # messages is list[list[...]]
749
+ list_of_messages = cast(list[list[ChatCompletionMessageParam]],
750
+ messages)
751
+ else:
752
+ # messages is list[...]
753
+ list_of_messages = [
754
+ cast(list[ChatCompletionMessageParam], messages)
755
+ ]
756
+
757
+ tokenizer = self.get_tokenizer(lora_request)
758
+ model_config = self.llm_engine.get_model_config()
759
+ resolved_content_format = resolve_chat_template_content_format(
760
+ chat_template,
761
+ tools,
762
+ chat_template_content_format,
763
+ tokenizer,
764
+ model_config=model_config,
765
+ )
766
+
767
+ _chat_template_kwargs: dict[str, Any] = dict(
768
+ chat_template=chat_template,
769
+ add_generation_prompt=add_generation_prompt,
770
+ continue_final_message=continue_final_message,
771
+ tools=tools,
772
+ )
773
+ _chat_template_kwargs.update(chat_template_kwargs or {})
774
+
775
+ prompts: list[Union[TokensPrompt, TextPrompt]] = []
776
+
777
+ for msgs in list_of_messages:
778
+ # NOTE: _parse_chat_message_content_parts() currently doesn't
779
+ # handle mm_processor_kwargs, since there is no implementation in
780
+ # the chat message parsing for it.
781
+ conversation, mm_data = parse_chat_messages(
782
+ msgs,
783
+ model_config,
784
+ tokenizer,
785
+ content_format=resolved_content_format,
786
+ )
787
+
788
+ if isinstance(tokenizer, MistralTokenizer):
789
+ prompt_token_ids = apply_mistral_chat_template(
790
+ tokenizer,
791
+ messages=msgs,
792
+ **_chat_template_kwargs,
793
+ )
794
+ else:
795
+ prompt_str = apply_hf_chat_template(
796
+ tokenizer=tokenizer,
797
+ conversation=conversation,
798
+ model_config=model_config,
799
+ **_chat_template_kwargs,
800
+ )
801
+ # Special tokens are already included in chat templates so
802
+ # should not be added by the tokenizer in this case.
803
+ prompt_token_ids = tokenizer.encode(prompt_str,
804
+ add_special_tokens=False)
805
+
806
+ prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
807
+
808
+ if mm_data is not None:
809
+ prompt["multi_modal_data"] = mm_data
810
+
811
+ if mm_processor_kwargs is not None:
812
+ prompt["mm_processor_kwargs"] = mm_processor_kwargs
813
+
814
+ prompts.append(prompt)
815
+
816
+ return self.generate(
817
+ prompts,
818
+ sampling_params=sampling_params,
819
+ use_tqdm=use_tqdm,
820
+ lora_request=lora_request,
821
+ )
822
+
823
+ @overload
824
+ def encode(
825
+ self,
826
+ prompts: Union[PromptType, Sequence[PromptType]],
827
+ /,
828
+ pooling_params: Optional[Union[PoolingParams,
829
+ Sequence[PoolingParams]]] = None,
830
+ *,
831
+ truncate_prompt_tokens: Optional[int] = None,
832
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
833
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
834
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
835
+ ) -> list[PoolingRequestOutput]:
836
+ ...
837
+
838
+ @overload # LEGACY: single (prompt + optional token ids)
839
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
840
+ def encode(
841
+ self,
842
+ prompts: str,
843
+ pooling_params: Optional[Union[PoolingParams,
844
+ Sequence[PoolingParams]]] = None,
845
+ prompt_token_ids: Optional[list[int]] = None,
846
+ truncate_prompt_tokens: Optional[int] = None,
847
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
848
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
849
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
850
+ ) -> list[PoolingRequestOutput]:
851
+ ...
852
+
853
+ @overload # LEGACY: multi (prompt + optional token ids)
854
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
855
+ def encode(
856
+ self,
857
+ prompts: list[str],
858
+ pooling_params: Optional[Union[PoolingParams,
859
+ Sequence[PoolingParams]]] = None,
860
+ prompt_token_ids: Optional[list[list[int]]] = None,
861
+ truncate_prompt_tokens: Optional[int] = None,
862
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
863
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
864
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
865
+ ) -> list[PoolingRequestOutput]:
866
+ ...
867
+
868
+ @overload # LEGACY: single (token ids + optional prompt)
869
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
870
+ def encode(
871
+ self,
872
+ prompts: Optional[str] = None,
873
+ pooling_params: Optional[Union[PoolingParams,
874
+ Sequence[PoolingParams]]] = None,
875
+ *,
876
+ prompt_token_ids: list[int],
877
+ truncate_prompt_tokens: Optional[int] = None,
878
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
879
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
880
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
881
+ ) -> list[PoolingRequestOutput]:
882
+ ...
883
+
884
+ @overload # LEGACY: multi (token ids + optional prompt)
885
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
886
+ def encode(
887
+ self,
888
+ prompts: Optional[list[str]] = None,
889
+ pooling_params: Optional[Union[PoolingParams,
890
+ Sequence[PoolingParams]]] = None,
891
+ *,
892
+ prompt_token_ids: list[list[int]],
893
+ truncate_prompt_tokens: Optional[int] = None,
894
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
895
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
896
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
897
+ ) -> list[PoolingRequestOutput]:
898
+ ...
899
+
900
+ @overload # LEGACY: single or multi token ids [pos-only]
901
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
902
+ def encode(
903
+ self,
904
+ prompts: None,
905
+ pooling_params: None,
906
+ prompt_token_ids: Union[list[int], list[list[int]]],
907
+ truncate_prompt_tokens: Optional[int] = None,
908
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
909
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
910
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
911
+ ) -> list[PoolingRequestOutput]:
912
+ ...
913
+
914
+ @deprecate_kwargs(
915
+ "prompt_token_ids",
916
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
917
+ additional_message="Please use the 'prompts' parameter instead.",
918
+ )
919
+ def encode(
920
+ self,
921
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
922
+ Optional[Union[str, list[str]]]] = None,
923
+ pooling_params: Optional[Union[PoolingParams,
924
+ Sequence[PoolingParams]]] = None,
925
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
926
+ truncate_prompt_tokens: Optional[int] = None,
927
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
928
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
929
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
930
+ ) -> list[PoolingRequestOutput]:
931
+ """Apply pooling to the hidden states corresponding to the input
932
+ prompts.
933
+
934
+ This class automatically batches the given prompts, considering
935
+ the memory constraint. For the best performance, put all of your prompts
936
+ into a single list and pass it to this method.
937
+
938
+ Args:
939
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
940
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
941
+ for more details about the format of each prompts.
942
+ pooling_params: The pooling parameters for pooling. If None, we
943
+ use the default pooling parameters.
944
+ use_tqdm: If `True`, shows a tqdm progress bar.
945
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
946
+ it is used to create the progress bar.
947
+ If `False`, no progress bar is created.
948
+ lora_request: LoRA request to use for generation, if any.
949
+ prompt_adapter_request: Prompt Adapter request to use for
950
+ generation, if any.
951
+
952
+ Returns:
953
+ A list of `PoolingRequestOutput` objects containing the
954
+ pooled hidden states in the same order as the input prompts.
955
+
956
+ Note:
957
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
958
+ considered legacy and may be deprecated in the future. You should
959
+ instead pass them via the `inputs` parameter.
960
+ """
961
+ runner_type = self.llm_engine.model_config.runner_type
962
+ if runner_type != "pooling":
963
+ messages = ["LLM.encode() is only supported for pooling models."]
964
+
965
+ supported_runner_types = self.llm_engine.model_config \
966
+ .supported_runner_types
967
+ if "pooling" in supported_runner_types:
968
+ messages.append(
969
+ "Your model supports the 'pooling' runner, but is "
970
+ f"currently initialized for the '{runner_type}' runner. "
971
+ "Please initialize vLLM using `--task embed`, "
972
+ "`--task classify`, `--task score` etc.")
973
+
974
+ raise ValueError(" ".join(messages))
975
+
976
+ if prompt_token_ids is not None:
977
+ parsed_prompts = self._convert_v1_inputs(
978
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
979
+ prompt_token_ids=prompt_token_ids,
980
+ )
981
+ else:
982
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
983
+ prompts)
984
+
985
+ if pooling_params is None:
986
+ # Use default pooling params.
987
+ pooling_params = PoolingParams()
988
+ elif isinstance(pooling_params, PoolingParams):
989
+ pooling_params.verify(self.llm_engine.model_config)
990
+ else:
991
+ for pooling_param in pooling_params:
992
+ pooling_param.verify(self.llm_engine.model_config)
993
+
994
+ tokenization_kwargs: dict[str, Any] = {}
995
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
996
+ truncate_prompt_tokens, tokenization_kwargs)
997
+
998
+ self._validate_and_add_requests(
999
+ prompts=parsed_prompts,
1000
+ params=pooling_params,
1001
+ use_tqdm=use_tqdm,
1002
+ lora_request=lora_request,
1003
+ tokenization_kwargs=tokenization_kwargs,
1004
+ prompt_adapter_request=prompt_adapter_request,
1005
+ )
1006
+
1007
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1008
+ return self.engine_class.validate_outputs(outputs,
1009
+ PoolingRequestOutput)
1010
+
1011
+ def embed(
1012
+ self,
1013
+ prompts: Union[PromptType, Sequence[PromptType]],
1014
+ /,
1015
+ *,
1016
+ truncate_prompt_tokens: Optional[int] = None,
1017
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1018
+ pooling_params: Optional[Union[PoolingParams,
1019
+ Sequence[PoolingParams]]] = None,
1020
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1021
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1022
+ ) -> list[EmbeddingRequestOutput]:
1023
+ """
1024
+ Generate an embedding vector for each prompt.
1025
+
1026
+ This class automatically batches the given prompts, considering
1027
+ the memory constraint. For the best performance, put all of your prompts
1028
+ into a single list and pass it to this method.
1029
+
1030
+ Args:
1031
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1032
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1033
+ for more details about the format of each prompts.
1034
+ pooling_params: The pooling parameters for pooling. If None, we
1035
+ use the default pooling parameters.
1036
+ use_tqdm: If `True`, shows a tqdm progress bar.
1037
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1038
+ it is used to create the progress bar.
1039
+ If `False`, no progress bar is created.
1040
+ lora_request: LoRA request to use for generation, if any.
1041
+ prompt_adapter_request: Prompt Adapter request to use for
1042
+ generation, if any.
1043
+
1044
+ Returns:
1045
+ A list of `EmbeddingRequestOutput` objects containing the
1046
+ embedding vectors in the same order as the input prompts.
1047
+ """
1048
+ if self.llm_engine.model_config.task != "embed":
1049
+ raise ValueError(
1050
+ "Embedding API is only enabled for `--task embed`")
1051
+
1052
+ items = self.encode(prompts,
1053
+ truncate_prompt_tokens=truncate_prompt_tokens,
1054
+ use_tqdm=use_tqdm,
1055
+ pooling_params=pooling_params,
1056
+ lora_request=lora_request,
1057
+ prompt_adapter_request=prompt_adapter_request)
1058
+
1059
+ return [EmbeddingRequestOutput.from_base(item) for item in items]
1060
+
1061
+ def classify(
1062
+ self,
1063
+ prompts: Union[PromptType, Sequence[PromptType]],
1064
+ /,
1065
+ *,
1066
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1067
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1068
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1069
+ ) -> list[ClassificationRequestOutput]:
1070
+ """
1071
+ Generate class logits for each prompt.
1072
+
1073
+ This class automatically batches the given prompts, considering
1074
+ the memory constraint. For the best performance, put all of your prompts
1075
+ into a single list and pass it to this method.
1076
+
1077
+ Args:
1078
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1079
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1080
+ for more details about the format of each prompts.
1081
+ use_tqdm: If `True`, shows a tqdm progress bar.
1082
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1083
+ it is used to create the progress bar.
1084
+ If `False`, no progress bar is created.
1085
+ lora_request: LoRA request to use for generation, if any.
1086
+ prompt_adapter_request: Prompt Adapter request to use for
1087
+ generation, if any.
1088
+
1089
+ Returns:
1090
+ A list of `ClassificationRequestOutput` objects containing the
1091
+ embedding vectors in the same order as the input prompts.
1092
+ """
1093
+ if self.llm_engine.model_config.task != "classify":
1094
+ raise ValueError(
1095
+ "Classification API is only enabled for `--task classify`")
1096
+
1097
+ items = self.encode(prompts,
1098
+ use_tqdm=use_tqdm,
1099
+ lora_request=lora_request,
1100
+ prompt_adapter_request=prompt_adapter_request)
1101
+
1102
+ return [ClassificationRequestOutput.from_base(item) for item in items]
1103
+
1104
+ def _embedding_score(
1105
+ self,
1106
+ tokenizer: AnyTokenizer,
1107
+ text_1: list[Union[str, TextPrompt, TokensPrompt]],
1108
+ text_2: list[Union[str, TextPrompt, TokensPrompt]],
1109
+ truncate_prompt_tokens: Optional[int] = None,
1110
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1111
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1112
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1113
+ ) -> list[ScoringRequestOutput]:
1114
+
1115
+ encoded_output: list[PoolingRequestOutput] = self.encode(
1116
+ text_1 + text_2,
1117
+ truncate_prompt_tokens=truncate_prompt_tokens,
1118
+ use_tqdm=use_tqdm,
1119
+ lora_request=lora_request,
1120
+ prompt_adapter_request=prompt_adapter_request)
1121
+
1122
+ encoded_output_1: list[PoolingRequestOutput] = encoded_output[
1123
+ 0:len(text_1)]
1124
+ encoded_output_2: list[PoolingRequestOutput] = encoded_output[
1125
+ len(text_1):]
1126
+
1127
+ if len(encoded_output_1) == 1:
1128
+ encoded_output_1 = encoded_output_1 * len(encoded_output_2)
1129
+
1130
+ scores = _cosine_similarity(tokenizer=tokenizer,
1131
+ embed_1=encoded_output_1,
1132
+ embed_2=encoded_output_2)
1133
+
1134
+ items = self.engine_class.validate_outputs(scores,
1135
+ PoolingRequestOutput)
1136
+ return [ScoringRequestOutput.from_base(item) for item in items]
1137
+
1138
+ def _cross_encoding_score(
1139
+ self,
1140
+ tokenizer: AnyTokenizer,
1141
+ text_1: list[str],
1142
+ text_2: list[str],
1143
+ truncate_prompt_tokens: Optional[int] = None,
1144
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1145
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1146
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1147
+ ) -> list[ScoringRequestOutput]:
1148
+
1149
+ if isinstance(tokenizer, MistralTokenizer):
1150
+ raise ValueError(
1151
+ "Score API is only enabled for `--task embed or score`")
1152
+
1153
+ if len(text_1) == 1:
1154
+ text_1 = text_1 * len(text_2)
1155
+
1156
+ input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)]
1157
+
1158
+ pooling_params = PoolingParams()
1159
+
1160
+ tokenization_kwargs: dict[str, Any] = {}
1161
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
1162
+ truncate_prompt_tokens, tokenization_kwargs)
1163
+
1164
+ parsed_prompts = []
1165
+
1166
+ for q, t in input_pairs:
1167
+ prompt_inputs = tokenizer(text=q,
1168
+ text_pair=t,
1169
+ **tokenization_kwargs)
1170
+ engine_prompt = TokensPrompt(
1171
+ prompt_token_ids=prompt_inputs["input_ids"],
1172
+ token_type_ids=prompt_inputs.get("token_type_ids"))
1173
+ parsed_prompts.append(engine_prompt)
1174
+
1175
+ self._validate_and_add_requests(
1176
+ prompts=parsed_prompts,
1177
+ params=pooling_params,
1178
+ use_tqdm=use_tqdm,
1179
+ lora_request=lora_request,
1180
+ prompt_adapter_request=prompt_adapter_request,
1181
+ )
1182
+
1183
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1184
+ items = self.engine_class.validate_outputs(outputs,
1185
+ PoolingRequestOutput)
1186
+
1187
+ return [ScoringRequestOutput.from_base(item) for item in items]
1188
+
1189
+ def score(
1190
+ self,
1191
+ text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1192
+ text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1193
+ /,
1194
+ *,
1195
+ truncate_prompt_tokens: Optional[int] = None,
1196
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1197
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1198
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1199
+ ) -> list[ScoringRequestOutput]:
1200
+ """Generate similarity scores for all pairs `<text,text_pair>`.
1201
+
1202
+ The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
1203
+ In the `1 - N` case the `text_1` sentence will be replicated `N`
1204
+ times to pair with the `text_2` sentences.
1205
+ The input pairs are used to build a list of prompts for the
1206
+ cross encoder model. This class automatically batches the prompts,
1207
+ considering the memory constraint. For the best performance, put all
1208
+ of your texts into a single list and pass it to this method.
1209
+
1210
+ Args:
1211
+ text_1: can be a single prompt or a list of prompts, in which
1212
+ case it has to have the same length as the `text_2` list
1213
+ text_2: The texts to pair with the query to form the input
1214
+ to the LLM. See [PromptType][vllm.inputs.PromptType] for
1215
+ more details about the format of each prompts.
1216
+ use_tqdm: If `True`, shows a tqdm progress bar.
1217
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1218
+ it is used to create the progress bar.
1219
+ If `False`, no progress bar is created.
1220
+ lora_request: LoRA request to use for generation, if any.
1221
+ prompt_adapter_request: Prompt Adapter request to use for
1222
+ generation, if any.
1223
+
1224
+ Returns:
1225
+ A list of `ScoringRequestOutput` objects containing the
1226
+ generated scores in the same order as the input prompts.
1227
+ """
1228
+ runner_type = self.llm_engine.model_config.runner_type
1229
+ if runner_type != "pooling":
1230
+ messages = ["LLM.score() is only supported for pooling models."]
1231
+
1232
+ supported_runner_types = self.llm_engine.model_config \
1233
+ .supported_runner_types
1234
+ if "pooling" in supported_runner_types:
1235
+ messages.append(
1236
+ "Your model supports the 'pooling' runner, but is "
1237
+ f"currently initialized for the '{runner_type}' runner. "
1238
+ "Please initialize vLLM using `--task embed`, "
1239
+ "`--task classify`, `--task score` etc.")
1240
+
1241
+ raise ValueError(" ".join(messages))
1242
+
1243
+ if self.llm_engine.model_config.task not in ("embed", "score"):
1244
+ raise ValueError(
1245
+ "Score API is only enabled for `--task embed or --task score`")
1246
+
1247
+ # the tokenizer for models such as
1248
+ # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
1249
+ # lists of tokens to the `text` and `text_pair` kwargs
1250
+ tokenizer = self.llm_engine.get_tokenizer()
1251
+
1252
+ def ensure_str(prompt: SingletonPrompt):
1253
+ if isinstance(prompt, dict):
1254
+ if "multi_modal_data" in prompt:
1255
+ raise ValueError("Multi-modal prompt is not "
1256
+ "supported for scoring")
1257
+ elif "prompt_token_ids" in prompt:
1258
+ prompt = tokenizer.decode(
1259
+ cast(TokensPrompt, prompt)["prompt_token_ids"])
1260
+ elif "prompt" in prompt:
1261
+ prompt = cast(TextPrompt, prompt)["prompt"]
1262
+ assert type(prompt) is str
1263
+ return prompt
1264
+
1265
+ if isinstance(text_1, (str, dict)):
1266
+ # Convert a single prompt to a list.
1267
+ text_1 = [text_1]
1268
+ input_text_1: list[str] = [ensure_str(t) for t in text_1]
1269
+
1270
+ if isinstance(text_2, (str, dict)):
1271
+ # Convert a single prompt to a list.
1272
+ text_2 = [text_2]
1273
+ input_text_2: list[str] = [ensure_str(t) for t in text_2]
1274
+
1275
+ _validate_score_input_lens(input_text_1, input_text_2)
1276
+
1277
+ if self.llm_engine.model_config.is_cross_encoder:
1278
+ return self._cross_encoding_score(tokenizer, input_text_1,
1279
+ input_text_2,
1280
+ truncate_prompt_tokens, use_tqdm,
1281
+ lora_request,
1282
+ prompt_adapter_request)
1283
+ else:
1284
+ return self._embedding_score(
1285
+ tokenizer,
1286
+ input_text_1, # type: ignore[arg-type]
1287
+ input_text_2, # type: ignore[arg-type]
1288
+ truncate_prompt_tokens,
1289
+ use_tqdm,
1290
+ lora_request,
1291
+ prompt_adapter_request)
1292
+
1293
+ def start_profile(self) -> None:
1294
+ self.llm_engine.start_profile()
1295
+
1296
+ def stop_profile(self) -> None:
1297
+ self.llm_engine.stop_profile()
1298
+
1299
+ def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
1300
+ return self.llm_engine.reset_prefix_cache(device)
1301
+
1302
+ def sleep(self, level: int = 1):
1303
+ """
1304
+ Put the engine to sleep. The engine should not process any requests.
1305
+ The caller should guarantee that no requests are being processed
1306
+ during the sleep period, before `wake_up` is called.
1307
+
1308
+ Args:
1309
+ level: The sleep level. Level 1 sleep will offload the model
1310
+ weights and discard the kv cache. The content of kv cache
1311
+ is forgotten. Level 1 sleep is good for sleeping and waking
1312
+ up the engine to run the same model again. The model weights
1313
+ are backed up in CPU memory. Please make sure there's enough
1314
+ CPU memory to store the model weights. Level 2 sleep will
1315
+ discard both the model weights and the kv cache. The content
1316
+ of both the model weights and kv cache is forgotten. Level 2
1317
+ sleep is good for sleeping and waking up the engine to run a
1318
+ different model or update the model, where previous model
1319
+ weights are not needed. It reduces CPU memory pressure.
1320
+ """
1321
+ self.reset_prefix_cache()
1322
+ self.llm_engine.sleep(level=level)
1323
+
1324
+ def wake_up(self, tags: Optional[list[str]] = None):
1325
+ """
1326
+ Wake up the engine from sleep mode. See the [sleep][] method
1327
+ for more details.
1328
+
1329
+ Args:
1330
+ tags: An optional list of tags to reallocate the engine memory
1331
+ for specific memory allocations. Values must be in
1332
+ `("weights", "kv_cache")`. If None, all memory is reallocated.
1333
+ wake_up should be called with all tags (or None) before the
1334
+ engine is used again.
1335
+ """
1336
+ self.llm_engine.wake_up(tags)
1337
+
1338
+ def get_metrics(self) -> list["Metric"]:
1339
+ """Return a snapshot of aggregated metrics from Prometheus.
1340
+
1341
+ Returns:
1342
+ A ``MetricSnapshot`` instance capturing the current state
1343
+ of all aggregated metrics from Prometheus.
1344
+
1345
+ Note:
1346
+ This method is only available with the V1 LLM engine.
1347
+ """
1348
+ from vllm.v1.engine.llm_engine import LLMEngine as V1LLMEngine
1349
+ assert isinstance(self.llm_engine, V1LLMEngine)
1350
+ return self.llm_engine.get_metrics()
1351
+
1352
+ # LEGACY
1353
+ def _convert_v1_inputs(
1354
+ self,
1355
+ prompts: Optional[Union[str, list[str]]],
1356
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]],
1357
+ ):
1358
+ # skip_tokenizer_init is now checked in engine
1359
+
1360
+ if prompts is None and prompt_token_ids is None:
1361
+ raise ValueError(
1362
+ "Either prompts or prompt_token_ids must be provided.")
1363
+ if prompts is not None and prompt_token_ids is not None \
1364
+ and len(prompts) != len(prompt_token_ids):
1365
+ raise ValueError(
1366
+ "The lengths of prompts and prompt_token_ids must be the same."
1367
+ )
1368
+
1369
+ if prompts is not None:
1370
+ prompts = [p["content"] for p in parse_and_batch_prompt(prompts)]
1371
+ if prompt_token_ids is not None:
1372
+ prompt_token_ids = [
1373
+ p["content"] for p in parse_and_batch_prompt(prompt_token_ids)
1374
+ ]
1375
+ if prompts is not None:
1376
+ num_requests = len(prompts)
1377
+ elif prompt_token_ids is not None:
1378
+ num_requests = len(prompt_token_ids)
1379
+ parsed_prompts: list[PromptType] = []
1380
+ for i in range(num_requests):
1381
+ item: PromptType
1382
+
1383
+ if prompts is not None:
1384
+ item = TextPrompt(prompt=prompts[i])
1385
+ elif prompt_token_ids is not None:
1386
+ item = TokensPrompt(prompt_token_ids=prompt_token_ids[i])
1387
+ else:
1388
+ raise AssertionError
1389
+
1390
+ parsed_prompts.append(item)
1391
+
1392
+ return parsed_prompts
1393
+
1394
+ def _validate_and_add_requests(
1395
+ self,
1396
+ prompts: Union[PromptType, Sequence[PromptType]],
1397
+ params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
1398
+ Sequence[PoolingParams]],
1399
+ *,
1400
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1401
+ lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
1402
+ prompt_adapter_request: Optional[PromptAdapterRequest],
1403
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1404
+ guided_options: Optional[GuidedDecodingRequest] = None,
1405
+ priority: Optional[list[int]] = None,
1406
+ ) -> None:
1407
+ if guided_options is not None:
1408
+ warnings.warn(
1409
+ "guided_options_request is deprecated, use "
1410
+ "SamplingParams.guided_decoding instead",
1411
+ DeprecationWarning,
1412
+ stacklevel=2,
1413
+ )
1414
+
1415
+ if isinstance(prompts, (str, dict)):
1416
+ # Convert a single prompt to a list.
1417
+ prompts = [prompts]
1418
+
1419
+ num_requests = len(prompts)
1420
+ if isinstance(params, list) and len(params) != num_requests:
1421
+ raise ValueError("The lengths of prompts and params "
1422
+ "must be the same.")
1423
+ if isinstance(lora_request,
1424
+ list) and len(lora_request) != num_requests:
1425
+ raise ValueError("The lengths of prompts and lora_request "
1426
+ "must be the same.")
1427
+
1428
+ for sp in params if isinstance(params, list) else (params, ):
1429
+ if isinstance(sp, SamplingParams):
1430
+ self._add_guided_params(sp, guided_options)
1431
+
1432
+ # We only care about the final output
1433
+ sp.output_kind = RequestOutputKind.FINAL_ONLY
1434
+
1435
+ # Add requests to the engine.
1436
+ it = prompts
1437
+ if use_tqdm:
1438
+ tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
1439
+ it = tqdm_func(it, desc="Adding requests")
1440
+
1441
+ for i, prompt in enumerate(it):
1442
+ self._add_request(
1443
+ prompt,
1444
+ params[i] if isinstance(params, Sequence) else params,
1445
+ tokenization_kwargs=tokenization_kwargs,
1446
+ lora_request=lora_request[i] if isinstance(
1447
+ lora_request, Sequence) else lora_request,
1448
+ prompt_adapter_request=prompt_adapter_request,
1449
+ priority=priority[i] if priority else 0,
1450
+ )
1451
+
1452
+ def _add_request(
1453
+ self,
1454
+ prompt: PromptType,
1455
+ params: Union[SamplingParams, PoolingParams],
1456
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1457
+ lora_request: Optional[LoRARequest] = None,
1458
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1459
+ priority: int = 0,
1460
+ ) -> None:
1461
+ request_id = str(next(self.request_counter))
1462
+ self.llm_engine.add_request(
1463
+ request_id,
1464
+ prompt,
1465
+ params,
1466
+ lora_request=lora_request,
1467
+ tokenization_kwargs=tokenization_kwargs,
1468
+ prompt_adapter_request=prompt_adapter_request,
1469
+ priority=priority,
1470
+ )
1471
+
1472
+ def _add_guided_params(
1473
+ self,
1474
+ params: SamplingParams,
1475
+ guided_options: Optional[GuidedDecodingRequest] = None):
1476
+ if guided_options is None:
1477
+ return params
1478
+
1479
+ if params.guided_decoding is not None:
1480
+ raise ValueError("Cannot set both guided_options_request and "
1481
+ "params.guided_decoding.")
1482
+
1483
+ params.guided_decoding = GuidedDecodingParams(
1484
+ json=guided_options.guided_json,
1485
+ regex=guided_options.guided_regex,
1486
+ choice=guided_options.guided_choice,
1487
+ grammar=guided_options.guided_grammar,
1488
+ json_object=guided_options.guided_json_object,
1489
+ backend=guided_options.guided_decoding_backend,
1490
+ whitespace_pattern=guided_options.guided_whitespace_pattern,
1491
+ structural_tag=guided_options.structural_tag,
1492
+ )
1493
+ return params
1494
+
1495
+ def _run_engine(
1496
+ self,
1497
+ *,
1498
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True
1499
+ ) -> list[Union[RequestOutput, PoolingRequestOutput]]:
1500
+ # Initialize tqdm.
1501
+ if use_tqdm:
1502
+ num_requests = self.llm_engine.get_num_unfinished_requests()
1503
+ tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
1504
+ pbar = tqdm_func(
1505
+ total=num_requests,
1506
+ desc="Processed prompts",
1507
+ dynamic_ncols=True,
1508
+ postfix=(f"est. speed input: {0:.2f} toks/s, "
1509
+ f"output: {0:.2f} toks/s"),
1510
+ )
1511
+
1512
+ # Run the engine.
1513
+ outputs: list[Union[RequestOutput, PoolingRequestOutput]] = []
1514
+ total_in_toks = 0
1515
+ total_out_toks = 0
1516
+ while self.llm_engine.has_unfinished_requests():
1517
+ step_outputs = self.llm_engine.step()
1518
+ for output in step_outputs:
1519
+ if output.finished:
1520
+ outputs.append(output)
1521
+ if use_tqdm:
1522
+ if isinstance(output, RequestOutput):
1523
+ # Calculate tokens only for RequestOutput
1524
+ n = len(output.outputs)
1525
+ assert output.prompt_token_ids is not None
1526
+ total_in_toks += len(output.prompt_token_ids) * n
1527
+ in_spd = total_in_toks / pbar.format_dict["elapsed"]
1528
+ total_out_toks += sum(
1529
+ len(stp.token_ids) for stp in output.outputs)
1530
+ out_spd = (total_out_toks /
1531
+ pbar.format_dict["elapsed"])
1532
+ pbar.postfix = (
1533
+ f"est. speed input: {in_spd:.2f} toks/s, "
1534
+ f"output: {out_spd:.2f} toks/s")
1535
+ pbar.update(n)
1536
+ else:
1537
+ pbar.update(1)
1538
+
1539
+ if use_tqdm:
1540
+ pbar.close()
1541
+ # Sort the outputs by request ID.
1542
+ # This is necessary because some requests may be finished earlier than
1543
+ # its previous requests.
1544
+ return sorted(outputs, key=lambda x: int(x.request_id))