vllm-cpu-avx512bf16 0.9.0.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1175) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1742 -0
  4. vllm/_ipex_ops.py +243 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +15 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +44 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +33 -0
  16. vllm/assets/video.py +114 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +305 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1494 -0
  23. vllm/attention/backends/flash_attn.py +999 -0
  24. vllm/attention/backends/flashinfer.py +1100 -0
  25. vllm/attention/backends/flashmla.py +242 -0
  26. vllm/attention/backends/hpu_attn.py +309 -0
  27. vllm/attention/backends/ipex_attn.py +394 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1381 -0
  30. vllm/attention/backends/pallas.py +347 -0
  31. vllm/attention/backends/placeholder_attn.py +399 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +970 -0
  34. vllm/attention/backends/torch_sdpa.py +691 -0
  35. vllm/attention/backends/triton_mla.py +113 -0
  36. vllm/attention/backends/utils.py +609 -0
  37. vllm/attention/backends/xformers.py +798 -0
  38. vllm/attention/layer.py +452 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +245 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +367 -0
  45. vllm/attention/ops/flashmla.py +115 -0
  46. vllm/attention/ops/hpu_paged_attn.py +87 -0
  47. vllm/attention/ops/ipex_attn.py +194 -0
  48. vllm/attention/ops/merge_attn_states.py +42 -0
  49. vllm/attention/ops/nki_flash_attn.py +905 -0
  50. vllm/attention/ops/paged_attn.py +255 -0
  51. vllm/attention/ops/prefix_prefill.py +901 -0
  52. vllm/attention/ops/rocm_aiter_mla.py +99 -0
  53. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  54. vllm/attention/ops/triton_decode_attention.py +673 -0
  55. vllm/attention/ops/triton_flash_attention.py +1374 -0
  56. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  57. vllm/attention/ops/triton_unified_attention.py +337 -0
  58. vllm/attention/selector.py +186 -0
  59. vllm/attention/utils/fa_utils.py +54 -0
  60. vllm/beam_search.py +82 -0
  61. vllm/benchmarks/__init__.py +0 -0
  62. vllm/benchmarks/datasets.py +921 -0
  63. vllm/benchmarks/endpoint_request_func.py +160 -0
  64. vllm/benchmarks/latency.py +184 -0
  65. vllm/benchmarks/serve.py +925 -0
  66. vllm/benchmarks/throughput.py +609 -0
  67. vllm/benchmarks/utils.py +69 -0
  68. vllm/collect_env.py +818 -0
  69. vllm/compilation/__init__.py +0 -0
  70. vllm/compilation/activation_quant_fusion.py +88 -0
  71. vllm/compilation/backends.py +560 -0
  72. vllm/compilation/base_piecewise_backend.py +71 -0
  73. vllm/compilation/collective_fusion.py +126 -0
  74. vllm/compilation/compiler_interface.py +533 -0
  75. vllm/compilation/counter.py +33 -0
  76. vllm/compilation/cuda_piecewise_backend.py +213 -0
  77. vllm/compilation/decorators.py +249 -0
  78. vllm/compilation/fix_functionalization.py +190 -0
  79. vllm/compilation/fusion.py +617 -0
  80. vllm/compilation/fx_utils.py +61 -0
  81. vllm/compilation/inductor_pass.py +114 -0
  82. vllm/compilation/monitor.py +38 -0
  83. vllm/compilation/multi_output_match.py +108 -0
  84. vllm/compilation/noop_elimination.py +136 -0
  85. vllm/compilation/pass_manager.py +77 -0
  86. vllm/compilation/sequence_parallelism.py +267 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  88. vllm/compilation/vllm_inductor_pass.py +66 -0
  89. vllm/compilation/wrapper.py +129 -0
  90. vllm/config.py +4600 -0
  91. vllm/connections.py +173 -0
  92. vllm/core/__init__.py +0 -0
  93. vllm/core/block/__init__.py +0 -0
  94. vllm/core/block/block_table.py +398 -0
  95. vllm/core/block/common.py +370 -0
  96. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  97. vllm/core/block/interfaces.py +318 -0
  98. vllm/core/block/naive_block.py +465 -0
  99. vllm/core/block/prefix_caching_block.py +1134 -0
  100. vllm/core/block/utils.py +27 -0
  101. vllm/core/block_manager.py +520 -0
  102. vllm/core/evictor.py +156 -0
  103. vllm/core/interfaces.py +134 -0
  104. vllm/core/placeholder_block_space_manager.py +99 -0
  105. vllm/core/scheduler.py +2092 -0
  106. vllm/device_allocator/__init__.py +0 -0
  107. vllm/device_allocator/cumem.py +280 -0
  108. vllm/distributed/__init__.py +5 -0
  109. vllm/distributed/communication_op.py +40 -0
  110. vllm/distributed/device_communicators/__init__.py +0 -0
  111. vllm/distributed/device_communicators/all2all.py +126 -0
  112. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  113. vllm/distributed/device_communicators/cpu_communicator.py +144 -0
  114. vllm/distributed/device_communicators/cuda_communicator.py +167 -0
  115. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  116. vllm/distributed/device_communicators/custom_all_reduce.py +303 -0
  117. vllm/distributed/device_communicators/custom_all_reduce_utils.py +258 -0
  118. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  119. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  120. vllm/distributed/device_communicators/pynccl.py +217 -0
  121. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  122. vllm/distributed/device_communicators/shm_broadcast.py +541 -0
  123. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  124. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  125. vllm/distributed/kv_events.py +296 -0
  126. vllm/distributed/kv_transfer/README.md +29 -0
  127. vllm/distributed/kv_transfer/__init__.py +11 -0
  128. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  129. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  130. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  131. vllm/distributed/kv_transfer/kv_connector/factory.py +126 -0
  132. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  133. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +202 -0
  134. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +91 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +5 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +259 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +133 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +189 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +851 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  142. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  145. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  146. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  147. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  149. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  150. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  151. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  152. vllm/distributed/parallel_state.py +1294 -0
  153. vllm/distributed/utils.py +520 -0
  154. vllm/engine/__init__.py +0 -0
  155. vllm/engine/arg_utils.py +1649 -0
  156. vllm/engine/async_llm_engine.py +1274 -0
  157. vllm/engine/async_timeout.py +191 -0
  158. vllm/engine/llm_engine.py +2153 -0
  159. vllm/engine/metrics.py +717 -0
  160. vllm/engine/metrics_types.py +96 -0
  161. vllm/engine/multiprocessing/__init__.py +188 -0
  162. vllm/engine/multiprocessing/client.py +755 -0
  163. vllm/engine/multiprocessing/engine.py +459 -0
  164. vllm/engine/output_processor/__init__.py +0 -0
  165. vllm/engine/output_processor/interfaces.py +74 -0
  166. vllm/engine/output_processor/multi_step.py +215 -0
  167. vllm/engine/output_processor/single_step.py +144 -0
  168. vllm/engine/output_processor/stop_checker.py +130 -0
  169. vllm/engine/output_processor/util.py +27 -0
  170. vllm/engine/protocol.py +310 -0
  171. vllm/entrypoints/__init__.py +0 -0
  172. vllm/entrypoints/api_server.py +177 -0
  173. vllm/entrypoints/chat_utils.py +1298 -0
  174. vllm/entrypoints/cli/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  176. vllm/entrypoints/cli/benchmark/base.py +38 -0
  177. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  178. vllm/entrypoints/cli/benchmark/main.py +53 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  180. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  181. vllm/entrypoints/cli/collect_env.py +34 -0
  182. vllm/entrypoints/cli/main.py +62 -0
  183. vllm/entrypoints/cli/openai.py +204 -0
  184. vllm/entrypoints/cli/serve.py +141 -0
  185. vllm/entrypoints/cli/types.py +24 -0
  186. vllm/entrypoints/launcher.py +146 -0
  187. vllm/entrypoints/llm.py +1503 -0
  188. vllm/entrypoints/logger.py +49 -0
  189. vllm/entrypoints/openai/__init__.py +0 -0
  190. vllm/entrypoints/openai/api_server.py +1376 -0
  191. vllm/entrypoints/openai/cli_args.py +306 -0
  192. vllm/entrypoints/openai/logits_processors.py +89 -0
  193. vllm/entrypoints/openai/protocol.py +1890 -0
  194. vllm/entrypoints/openai/run_batch.py +439 -0
  195. vllm/entrypoints/openai/serving_chat.py +1192 -0
  196. vllm/entrypoints/openai/serving_classification.py +159 -0
  197. vllm/entrypoints/openai/serving_completion.py +590 -0
  198. vllm/entrypoints/openai/serving_embedding.py +200 -0
  199. vllm/entrypoints/openai/serving_engine.py +985 -0
  200. vllm/entrypoints/openai/serving_models.py +314 -0
  201. vllm/entrypoints/openai/serving_pooling.py +231 -0
  202. vllm/entrypoints/openai/serving_score.py +432 -0
  203. vllm/entrypoints/openai/serving_tokenization.py +151 -0
  204. vllm/entrypoints/openai/serving_transcription.py +421 -0
  205. vllm/entrypoints/openai/tool_parsers/__init__.py +22 -0
  206. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  207. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +369 -0
  208. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +258 -0
  209. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +236 -0
  210. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  211. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +215 -0
  212. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +307 -0
  213. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +302 -0
  214. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +266 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  216. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +111 -0
  217. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +296 -0
  218. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  219. vllm/entrypoints/score_utils.py +49 -0
  220. vllm/entrypoints/ssl.py +74 -0
  221. vllm/entrypoints/utils.py +219 -0
  222. vllm/env_override.py +34 -0
  223. vllm/envs.py +896 -0
  224. vllm/executor/__init__.py +0 -0
  225. vllm/executor/executor_base.py +400 -0
  226. vllm/executor/mp_distributed_executor.py +243 -0
  227. vllm/executor/msgspec_utils.py +29 -0
  228. vllm/executor/multiproc_worker_utils.py +312 -0
  229. vllm/executor/ray_distributed_executor.py +700 -0
  230. vllm/executor/ray_utils.py +398 -0
  231. vllm/executor/uniproc_executor.py +138 -0
  232. vllm/forward_context.py +147 -0
  233. vllm/inputs/__init__.py +40 -0
  234. vllm/inputs/data.py +330 -0
  235. vllm/inputs/parse.py +150 -0
  236. vllm/inputs/preprocess.py +908 -0
  237. vllm/inputs/registry.py +214 -0
  238. vllm/jsontree.py +79 -0
  239. vllm/logger.py +211 -0
  240. vllm/logging_utils/__init__.py +7 -0
  241. vllm/logging_utils/dump_input.py +84 -0
  242. vllm/logging_utils/formatter.py +17 -0
  243. vllm/logits_process.py +118 -0
  244. vllm/lora/__init__.py +0 -0
  245. vllm/lora/fully_sharded_layers.py +354 -0
  246. vllm/lora/layers.py +1284 -0
  247. vllm/lora/lora.py +198 -0
  248. vllm/lora/models.py +817 -0
  249. vllm/lora/ops/__init__.py +0 -0
  250. vllm/lora/ops/torch_ops/__init__.py +15 -0
  251. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  252. vllm/lora/ops/triton_ops/__init__.py +11 -0
  253. vllm/lora/ops/triton_ops/kernel_utils.py +242 -0
  254. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  255. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  256. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  257. vllm/lora/ops/triton_ops/utils.py +119 -0
  258. vllm/lora/ops/xla_ops/__init__.py +6 -0
  259. vllm/lora/ops/xla_ops/lora_ops.py +106 -0
  260. vllm/lora/ops/xla_ops/pallas.py +133 -0
  261. vllm/lora/peft_helper.py +135 -0
  262. vllm/lora/punica_wrapper/__init__.py +9 -0
  263. vllm/lora/punica_wrapper/punica_base.py +484 -0
  264. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  265. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  266. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  267. vllm/lora/punica_wrapper/punica_selector.py +19 -0
  268. vllm/lora/punica_wrapper/punica_tpu.py +325 -0
  269. vllm/lora/punica_wrapper/utils.py +163 -0
  270. vllm/lora/request.py +98 -0
  271. vllm/lora/resolver.py +84 -0
  272. vllm/lora/utils.py +239 -0
  273. vllm/lora/worker_manager.py +253 -0
  274. vllm/model_executor/__init__.py +15 -0
  275. vllm/model_executor/custom_op.py +151 -0
  276. vllm/model_executor/guided_decoding/__init__.py +180 -0
  277. vllm/model_executor/guided_decoding/guidance_decoding.py +62 -0
  278. vllm/model_executor/guided_decoding/guidance_logits_processors.py +103 -0
  279. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  280. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  281. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  282. vllm/model_executor/guided_decoding/outlines_logits_processors.py +283 -0
  283. vllm/model_executor/guided_decoding/utils.py +241 -0
  284. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  285. vllm/model_executor/layers/__init__.py +0 -0
  286. vllm/model_executor/layers/activation.py +368 -0
  287. vllm/model_executor/layers/fused_moe/__init__.py +53 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  449. vllm/model_executor/layers/fused_moe/cutlass_moe.py +382 -0
  450. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +227 -0
  451. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +755 -0
  452. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +231 -0
  453. vllm/model_executor/layers/fused_moe/fused_moe.py +1722 -0
  454. vllm/model_executor/layers/fused_moe/layer.py +1366 -0
  455. vllm/model_executor/layers/fused_moe/modular_kernel.py +364 -0
  456. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +242 -0
  457. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  458. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +188 -0
  459. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  460. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +146 -0
  461. vllm/model_executor/layers/fused_moe/prepare_finalize.py +60 -0
  462. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +372 -0
  463. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +112 -0
  464. vllm/model_executor/layers/fused_moe/utils.py +97 -0
  465. vllm/model_executor/layers/layernorm.py +287 -0
  466. vllm/model_executor/layers/lightning_attn.py +651 -0
  467. vllm/model_executor/layers/linear.py +1523 -0
  468. vllm/model_executor/layers/logits_processor.py +196 -0
  469. vllm/model_executor/layers/mamba/__init__.py +0 -0
  470. vllm/model_executor/layers/mamba/mamba2_metadata.py +124 -0
  471. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  472. vllm/model_executor/layers/mamba/mamba_mixer2.py +615 -0
  473. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  474. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  475. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +413 -0
  476. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  477. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  478. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  479. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  480. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  481. vllm/model_executor/layers/pooler.py +343 -0
  482. vllm/model_executor/layers/quantization/__init__.py +156 -0
  483. vllm/model_executor/layers/quantization/aqlm.py +375 -0
  484. vllm/model_executor/layers/quantization/auto_round.py +308 -0
  485. vllm/model_executor/layers/quantization/awq.py +185 -0
  486. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  487. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  488. vllm/model_executor/layers/quantization/base_config.py +150 -0
  489. vllm/model_executor/layers/quantization/bitblas.py +460 -0
  490. vllm/model_executor/layers/quantization/bitsandbytes.py +397 -0
  491. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  492. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +644 -0
  493. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1252 -0
  494. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +21 -0
  495. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  496. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  497. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  498. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +92 -0
  499. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +120 -0
  500. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  501. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  502. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  503. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  504. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +214 -0
  505. vllm/model_executor/layers/quantization/deepspeedfp.py +194 -0
  506. vllm/model_executor/layers/quantization/experts_int8.py +195 -0
  507. vllm/model_executor/layers/quantization/fbgemm_fp8.py +171 -0
  508. vllm/model_executor/layers/quantization/fp8.py +876 -0
  509. vllm/model_executor/layers/quantization/gguf.py +564 -0
  510. vllm/model_executor/layers/quantization/gptq.py +277 -0
  511. vllm/model_executor/layers/quantization/gptq_bitblas.py +444 -0
  512. vllm/model_executor/layers/quantization/gptq_marlin.py +647 -0
  513. vllm/model_executor/layers/quantization/gptq_marlin_24.py +296 -0
  514. vllm/model_executor/layers/quantization/hqq_marlin.py +331 -0
  515. vllm/model_executor/layers/quantization/ipex_quant.py +249 -0
  516. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  517. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  518. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  519. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  520. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  521. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  522. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  523. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +130 -0
  524. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  525. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  526. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  527. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  528. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  529. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  530. vllm/model_executor/layers/quantization/kv_cache.py +138 -0
  531. vllm/model_executor/layers/quantization/marlin.py +260 -0
  532. vllm/model_executor/layers/quantization/modelopt.py +734 -0
  533. vllm/model_executor/layers/quantization/moe_wna16.py +448 -0
  534. vllm/model_executor/layers/quantization/neuron_quant.py +68 -0
  535. vllm/model_executor/layers/quantization/ptpc_fp8.py +126 -0
  536. vllm/model_executor/layers/quantization/qqq.py +274 -0
  537. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  538. vllm/model_executor/layers/quantization/quark/quark.py +440 -0
  539. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  540. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +8 -0
  541. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  542. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +125 -0
  543. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +145 -0
  544. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  545. vllm/model_executor/layers/quantization/quark/utils.py +104 -0
  546. vllm/model_executor/layers/quantization/schema.py +85 -0
  547. vllm/model_executor/layers/quantization/torchao.py +143 -0
  548. vllm/model_executor/layers/quantization/tpu_int8.py +120 -0
  549. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  550. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  551. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +207 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  754. vllm/model_executor/layers/quantization/utils/fp8_utils.py +611 -0
  755. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  756. vllm/model_executor/layers/quantization/utils/int8_utils.py +484 -0
  757. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  758. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  759. vllm/model_executor/layers/quantization/utils/marlin_utils.py +475 -0
  760. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +277 -0
  761. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +324 -0
  762. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  763. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +463 -0
  764. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +125 -0
  765. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +44 -0
  766. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +61 -0
  767. vllm/model_executor/layers/quantization/utils/quant_utils.py +572 -0
  768. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  769. vllm/model_executor/layers/rejection_sampler.py +405 -0
  770. vllm/model_executor/layers/resampler.py +269 -0
  771. vllm/model_executor/layers/rotary_embedding.py +1861 -0
  772. vllm/model_executor/layers/sampler.py +1203 -0
  773. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  774. vllm/model_executor/layers/typical_acceptance_sampler.py +165 -0
  775. vllm/model_executor/layers/utils.py +99 -0
  776. vllm/model_executor/layers/vocab_parallel_embedding.py +486 -0
  777. vllm/model_executor/model_loader/__init__.py +75 -0
  778. vllm/model_executor/model_loader/base_loader.py +24 -0
  779. vllm/model_executor/model_loader/bitsandbytes_loader.py +582 -0
  780. vllm/model_executor/model_loader/default_loader.py +295 -0
  781. vllm/model_executor/model_loader/dummy_loader.py +37 -0
  782. vllm/model_executor/model_loader/gguf_loader.py +113 -0
  783. vllm/model_executor/model_loader/neuron.py +475 -0
  784. vllm/model_executor/model_loader/neuronx_distributed.py +622 -0
  785. vllm/model_executor/model_loader/runai_streamer_loader.py +120 -0
  786. vllm/model_executor/model_loader/sharded_state_loader.py +211 -0
  787. vllm/model_executor/model_loader/tensorizer.py +632 -0
  788. vllm/model_executor/model_loader/tensorizer_loader.py +122 -0
  789. vllm/model_executor/model_loader/utils.py +301 -0
  790. vllm/model_executor/model_loader/weight_utils.py +781 -0
  791. vllm/model_executor/models/__init__.py +27 -0
  792. vllm/model_executor/models/adapters.py +247 -0
  793. vllm/model_executor/models/aimv2.py +199 -0
  794. vllm/model_executor/models/arctic.py +558 -0
  795. vllm/model_executor/models/aria.py +656 -0
  796. vllm/model_executor/models/aya_vision.py +461 -0
  797. vllm/model_executor/models/baichuan.py +473 -0
  798. vllm/model_executor/models/bamba.py +542 -0
  799. vllm/model_executor/models/bart.py +937 -0
  800. vllm/model_executor/models/bert.py +517 -0
  801. vllm/model_executor/models/bert_with_rope.py +714 -0
  802. vllm/model_executor/models/blip.py +338 -0
  803. vllm/model_executor/models/blip2.py +717 -0
  804. vllm/model_executor/models/bloom.py +372 -0
  805. vllm/model_executor/models/chameleon.py +1135 -0
  806. vllm/model_executor/models/chatglm.py +477 -0
  807. vllm/model_executor/models/clip.py +411 -0
  808. vllm/model_executor/models/commandr.py +471 -0
  809. vllm/model_executor/models/constant_size_cache.py +136 -0
  810. vllm/model_executor/models/dbrx.py +471 -0
  811. vllm/model_executor/models/deepseek.py +485 -0
  812. vllm/model_executor/models/deepseek_mtp.py +268 -0
  813. vllm/model_executor/models/deepseek_v2.py +842 -0
  814. vllm/model_executor/models/deepseek_vl2.py +647 -0
  815. vllm/model_executor/models/eagle.py +259 -0
  816. vllm/model_executor/models/exaone.py +550 -0
  817. vllm/model_executor/models/fairseq2_llama.py +153 -0
  818. vllm/model_executor/models/falcon.py +509 -0
  819. vllm/model_executor/models/falcon_h1.py +684 -0
  820. vllm/model_executor/models/florence2.py +1102 -0
  821. vllm/model_executor/models/fuyu.py +388 -0
  822. vllm/model_executor/models/gemma.py +424 -0
  823. vllm/model_executor/models/gemma2.py +424 -0
  824. vllm/model_executor/models/gemma3.py +532 -0
  825. vllm/model_executor/models/gemma3_mm.py +708 -0
  826. vllm/model_executor/models/glm.py +22 -0
  827. vllm/model_executor/models/glm4.py +304 -0
  828. vllm/model_executor/models/glm4v.py +647 -0
  829. vllm/model_executor/models/gpt2.py +327 -0
  830. vllm/model_executor/models/gpt_bigcode.py +334 -0
  831. vllm/model_executor/models/gpt_j.py +338 -0
  832. vllm/model_executor/models/gpt_neox.py +331 -0
  833. vllm/model_executor/models/granite.py +492 -0
  834. vllm/model_executor/models/granite_speech.py +778 -0
  835. vllm/model_executor/models/granitemoe.py +436 -0
  836. vllm/model_executor/models/granitemoehybrid.py +585 -0
  837. vllm/model_executor/models/granitemoeshared.py +340 -0
  838. vllm/model_executor/models/gritlm.py +223 -0
  839. vllm/model_executor/models/grok1.py +545 -0
  840. vllm/model_executor/models/h2ovl.py +545 -0
  841. vllm/model_executor/models/idefics2_vision_model.py +388 -0
  842. vllm/model_executor/models/idefics3.py +767 -0
  843. vllm/model_executor/models/interfaces.py +571 -0
  844. vllm/model_executor/models/interfaces_base.py +163 -0
  845. vllm/model_executor/models/intern_vit.py +475 -0
  846. vllm/model_executor/models/internlm2.py +454 -0
  847. vllm/model_executor/models/internlm2_ve.py +146 -0
  848. vllm/model_executor/models/internvl.py +1405 -0
  849. vllm/model_executor/models/jais.py +372 -0
  850. vllm/model_executor/models/jamba.py +591 -0
  851. vllm/model_executor/models/kimi_vl.py +576 -0
  852. vllm/model_executor/models/llama.py +643 -0
  853. vllm/model_executor/models/llama4.py +531 -0
  854. vllm/model_executor/models/llama_eagle.py +166 -0
  855. vllm/model_executor/models/llama_eagle3.py +257 -0
  856. vllm/model_executor/models/llava.py +865 -0
  857. vllm/model_executor/models/llava_next.py +585 -0
  858. vllm/model_executor/models/llava_next_video.py +470 -0
  859. vllm/model_executor/models/llava_onevision.py +955 -0
  860. vllm/model_executor/models/mamba.py +272 -0
  861. vllm/model_executor/models/mamba2.py +302 -0
  862. vllm/model_executor/models/mamba_cache.py +75 -0
  863. vllm/model_executor/models/medusa.py +218 -0
  864. vllm/model_executor/models/mimo.py +191 -0
  865. vllm/model_executor/models/mimo_mtp.py +284 -0
  866. vllm/model_executor/models/minicpm.py +590 -0
  867. vllm/model_executor/models/minicpm3.py +229 -0
  868. vllm/model_executor/models/minicpmo.py +758 -0
  869. vllm/model_executor/models/minicpmv.py +1286 -0
  870. vllm/model_executor/models/minimax_cache.py +35 -0
  871. vllm/model_executor/models/minimax_text_01.py +1303 -0
  872. vllm/model_executor/models/minimax_vl_01.py +363 -0
  873. vllm/model_executor/models/mistral3.py +603 -0
  874. vllm/model_executor/models/mixtral.py +487 -0
  875. vllm/model_executor/models/mixtral_quant.py +452 -0
  876. vllm/model_executor/models/mllama.py +1623 -0
  877. vllm/model_executor/models/mllama4.py +838 -0
  878. vllm/model_executor/models/mlp_speculator.py +205 -0
  879. vllm/model_executor/models/modernbert.py +329 -0
  880. vllm/model_executor/models/module_mapping.py +71 -0
  881. vllm/model_executor/models/molmo.py +1567 -0
  882. vllm/model_executor/models/moonvit.py +629 -0
  883. vllm/model_executor/models/mpt.py +330 -0
  884. vllm/model_executor/models/nemotron.py +507 -0
  885. vllm/model_executor/models/nemotron_nas.py +483 -0
  886. vllm/model_executor/models/nvlm_d.py +215 -0
  887. vllm/model_executor/models/olmo.py +388 -0
  888. vllm/model_executor/models/olmo2.py +413 -0
  889. vllm/model_executor/models/olmoe.py +446 -0
  890. vllm/model_executor/models/opt.py +411 -0
  891. vllm/model_executor/models/orion.py +348 -0
  892. vllm/model_executor/models/ovis.py +554 -0
  893. vllm/model_executor/models/paligemma.py +397 -0
  894. vllm/model_executor/models/persimmon.py +343 -0
  895. vllm/model_executor/models/phi.py +355 -0
  896. vllm/model_executor/models/phi3.py +18 -0
  897. vllm/model_executor/models/phi3_small.py +464 -0
  898. vllm/model_executor/models/phi3v.py +722 -0
  899. vllm/model_executor/models/phi4mm.py +1245 -0
  900. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  901. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  902. vllm/model_executor/models/phimoe.py +664 -0
  903. vllm/model_executor/models/pixtral.py +1315 -0
  904. vllm/model_executor/models/plamo2.py +737 -0
  905. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  906. vllm/model_executor/models/qwen.py +361 -0
  907. vllm/model_executor/models/qwen2.py +567 -0
  908. vllm/model_executor/models/qwen2_5_omni_thinker.py +903 -0
  909. vllm/model_executor/models/qwen2_5_vl.py +1171 -0
  910. vllm/model_executor/models/qwen2_audio.py +409 -0
  911. vllm/model_executor/models/qwen2_moe.py +539 -0
  912. vllm/model_executor/models/qwen2_rm.py +131 -0
  913. vllm/model_executor/models/qwen2_vl.py +1410 -0
  914. vllm/model_executor/models/qwen3.py +320 -0
  915. vllm/model_executor/models/qwen3_moe.py +534 -0
  916. vllm/model_executor/models/qwen_vl.py +784 -0
  917. vllm/model_executor/models/registry.py +618 -0
  918. vllm/model_executor/models/roberta.py +273 -0
  919. vllm/model_executor/models/siglip.py +523 -0
  920. vllm/model_executor/models/skyworkr1v.py +950 -0
  921. vllm/model_executor/models/smolvlm.py +51 -0
  922. vllm/model_executor/models/solar.py +505 -0
  923. vllm/model_executor/models/stablelm.py +342 -0
  924. vllm/model_executor/models/starcoder2.py +355 -0
  925. vllm/model_executor/models/telechat2.py +139 -0
  926. vllm/model_executor/models/teleflm.py +78 -0
  927. vllm/model_executor/models/transformers.py +507 -0
  928. vllm/model_executor/models/ultravox.py +655 -0
  929. vllm/model_executor/models/utils.py +730 -0
  930. vllm/model_executor/models/vision.py +146 -0
  931. vllm/model_executor/models/whisper.py +746 -0
  932. vllm/model_executor/models/zamba2.py +1008 -0
  933. vllm/model_executor/parameter.py +458 -0
  934. vllm/model_executor/pooling_metadata.py +71 -0
  935. vllm/model_executor/sampling_metadata.py +596 -0
  936. vllm/model_executor/utils.py +53 -0
  937. vllm/multimodal/__init__.py +32 -0
  938. vllm/multimodal/audio.py +105 -0
  939. vllm/multimodal/base.py +218 -0
  940. vllm/multimodal/hasher.py +117 -0
  941. vllm/multimodal/image.py +96 -0
  942. vllm/multimodal/inputs.py +872 -0
  943. vllm/multimodal/parse.py +460 -0
  944. vllm/multimodal/processing.py +1894 -0
  945. vllm/multimodal/profiling.py +273 -0
  946. vllm/multimodal/registry.py +330 -0
  947. vllm/multimodal/utils.py +392 -0
  948. vllm/multimodal/video.py +197 -0
  949. vllm/outputs.py +525 -0
  950. vllm/platforms/__init__.py +290 -0
  951. vllm/platforms/cpu.py +205 -0
  952. vllm/platforms/cuda.py +461 -0
  953. vllm/platforms/hpu.py +105 -0
  954. vllm/platforms/interface.py +492 -0
  955. vllm/platforms/neuron.py +152 -0
  956. vllm/platforms/rocm.py +388 -0
  957. vllm/platforms/tpu.py +215 -0
  958. vllm/platforms/xpu.py +155 -0
  959. vllm/plugins/__init__.py +86 -0
  960. vllm/plugins/lora_resolvers/README.md +15 -0
  961. vllm/plugins/lora_resolvers/__init__.py +0 -0
  962. vllm/plugins/lora_resolvers/filesystem_resolver.py +49 -0
  963. vllm/pooling_params.py +53 -0
  964. vllm/profiler/__init__.py +0 -0
  965. vllm/profiler/layerwise_profile.py +374 -0
  966. vllm/profiler/utils.py +147 -0
  967. vllm/prompt_adapter/__init__.py +0 -0
  968. vllm/prompt_adapter/layers.py +82 -0
  969. vllm/prompt_adapter/models.py +357 -0
  970. vllm/prompt_adapter/request.py +36 -0
  971. vllm/prompt_adapter/utils.py +97 -0
  972. vllm/prompt_adapter/worker_manager.py +178 -0
  973. vllm/py.typed +2 -0
  974. vllm/reasoning/__init__.py +14 -0
  975. vllm/reasoning/abs_reasoning_parsers.py +191 -0
  976. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  977. vllm/reasoning/granite_reasoning_parser.py +362 -0
  978. vllm/reasoning/qwen3_reasoning_parser.py +150 -0
  979. vllm/sampling_params.py +590 -0
  980. vllm/scalar_type.py +346 -0
  981. vllm/scripts.py +14 -0
  982. vllm/sequence.py +1567 -0
  983. vllm/spec_decode/__init__.py +0 -0
  984. vllm/spec_decode/batch_expansion.py +505 -0
  985. vllm/spec_decode/draft_model_runner.py +349 -0
  986. vllm/spec_decode/interfaces.py +98 -0
  987. vllm/spec_decode/medusa_worker.py +137 -0
  988. vllm/spec_decode/metrics.py +212 -0
  989. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  990. vllm/spec_decode/mqa_scorer.py +159 -0
  991. vllm/spec_decode/multi_step_worker.py +422 -0
  992. vllm/spec_decode/ngram_worker.py +195 -0
  993. vllm/spec_decode/proposer_worker_base.py +58 -0
  994. vllm/spec_decode/smaller_tp_proposer_worker.py +195 -0
  995. vllm/spec_decode/spec_decode_worker.py +1325 -0
  996. vllm/spec_decode/target_model_runner.py +44 -0
  997. vllm/spec_decode/top1_proposer.py +274 -0
  998. vllm/spec_decode/util.py +276 -0
  999. vllm/test_utils.py +129 -0
  1000. vllm/third_party/__init__.py +0 -0
  1001. vllm/third_party/pynvml.py +6139 -0
  1002. vllm/tracing.py +130 -0
  1003. vllm/transformers_utils/__init__.py +23 -0
  1004. vllm/transformers_utils/chat_templates/__init__.py +4 -0
  1005. vllm/transformers_utils/chat_templates/registry.py +59 -0
  1006. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1007. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1008. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1009. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1010. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1011. vllm/transformers_utils/config.py +835 -0
  1012. vllm/transformers_utils/configs/__init__.py +58 -0
  1013. vllm/transformers_utils/configs/arctic.py +206 -0
  1014. vllm/transformers_utils/configs/chatglm.py +71 -0
  1015. vllm/transformers_utils/configs/cohere2.py +194 -0
  1016. vllm/transformers_utils/configs/dbrx.py +279 -0
  1017. vllm/transformers_utils/configs/deepseek_vl2.py +215 -0
  1018. vllm/transformers_utils/configs/eagle.py +84 -0
  1019. vllm/transformers_utils/configs/exaone.py +189 -0
  1020. vllm/transformers_utils/configs/falcon.py +89 -0
  1021. vllm/transformers_utils/configs/h2ovl.py +15 -0
  1022. vllm/transformers_utils/configs/internvl.py +53 -0
  1023. vllm/transformers_utils/configs/jais.py +237 -0
  1024. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  1025. vllm/transformers_utils/configs/medusa.py +62 -0
  1026. vllm/transformers_utils/configs/minimax_text_01.py +69 -0
  1027. vllm/transformers_utils/configs/minimax_vl_01.py +70 -0
  1028. vllm/transformers_utils/configs/mllama.py +30 -0
  1029. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  1030. vllm/transformers_utils/configs/moonvit.py +32 -0
  1031. vllm/transformers_utils/configs/mpt.py +179 -0
  1032. vllm/transformers_utils/configs/nemotron.py +204 -0
  1033. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  1034. vllm/transformers_utils/configs/ovis.py +183 -0
  1035. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  1036. vllm/transformers_utils/configs/solar.py +246 -0
  1037. vllm/transformers_utils/configs/telechat2.py +63 -0
  1038. vllm/transformers_utils/configs/ultravox.py +107 -0
  1039. vllm/transformers_utils/detokenizer.py +167 -0
  1040. vllm/transformers_utils/detokenizer_utils.py +188 -0
  1041. vllm/transformers_utils/processor.py +220 -0
  1042. vllm/transformers_utils/processors/__init__.py +7 -0
  1043. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1044. vllm/transformers_utils/processors/ovis.py +419 -0
  1045. vllm/transformers_utils/s3_utils.py +161 -0
  1046. vllm/transformers_utils/tokenizer.py +301 -0
  1047. vllm/transformers_utils/tokenizer_base.py +148 -0
  1048. vllm/transformers_utils/tokenizer_group.py +119 -0
  1049. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  1050. vllm/transformers_utils/tokenizers/mistral.py +490 -0
  1051. vllm/transformers_utils/utils.py +98 -0
  1052. vllm/triton_utils/__init__.py +13 -0
  1053. vllm/triton_utils/importing.py +49 -0
  1054. vllm/usage/__init__.py +0 -0
  1055. vllm/usage/usage_lib.py +255 -0
  1056. vllm/utils.py +2844 -0
  1057. vllm/v1/__init__.py +0 -0
  1058. vllm/v1/attention/__init__.py +0 -0
  1059. vllm/v1/attention/backends/__init__.py +0 -0
  1060. vllm/v1/attention/backends/flash_attn.py +833 -0
  1061. vllm/v1/attention/backends/flashinfer.py +639 -0
  1062. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1063. vllm/v1/attention/backends/mla/common.py +926 -0
  1064. vllm/v1/attention/backends/mla/flashmla.py +150 -0
  1065. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +221 -0
  1066. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1067. vllm/v1/attention/backends/pallas.py +235 -0
  1068. vllm/v1/attention/backends/triton_attn.py +279 -0
  1069. vllm/v1/attention/backends/utils.py +18 -0
  1070. vllm/v1/core/__init__.py +0 -0
  1071. vllm/v1/core/block_pool.py +328 -0
  1072. vllm/v1/core/encoder_cache_manager.py +149 -0
  1073. vllm/v1/core/kv_cache_manager.py +372 -0
  1074. vllm/v1/core/kv_cache_utils.py +748 -0
  1075. vllm/v1/core/sched/__init__.py +0 -0
  1076. vllm/v1/core/sched/interface.py +143 -0
  1077. vllm/v1/core/sched/output.py +153 -0
  1078. vllm/v1/core/sched/scheduler.py +1015 -0
  1079. vllm/v1/core/sched/utils.py +22 -0
  1080. vllm/v1/core/single_type_kv_cache_manager.py +358 -0
  1081. vllm/v1/engine/__init__.py +171 -0
  1082. vllm/v1/engine/async_llm.py +546 -0
  1083. vllm/v1/engine/core.py +801 -0
  1084. vllm/v1/engine/core_client.py +1020 -0
  1085. vllm/v1/engine/detokenizer.py +260 -0
  1086. vllm/v1/engine/exceptions.py +16 -0
  1087. vllm/v1/engine/llm_engine.py +316 -0
  1088. vllm/v1/engine/logprobs.py +198 -0
  1089. vllm/v1/engine/mm_input_cache.py +90 -0
  1090. vllm/v1/engine/output_processor.py +427 -0
  1091. vllm/v1/engine/parallel_sampling.py +132 -0
  1092. vllm/v1/engine/processor.py +398 -0
  1093. vllm/v1/executor/__init__.py +0 -0
  1094. vllm/v1/executor/abstract.py +112 -0
  1095. vllm/v1/executor/multiproc_executor.py +532 -0
  1096. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1097. vllm/v1/kv_cache_interface.py +208 -0
  1098. vllm/v1/metrics/__init__.py +0 -0
  1099. vllm/v1/metrics/loggers.py +511 -0
  1100. vllm/v1/metrics/ray_wrappers.py +120 -0
  1101. vllm/v1/metrics/reader.py +245 -0
  1102. vllm/v1/metrics/stats.py +238 -0
  1103. vllm/v1/outputs.py +115 -0
  1104. vllm/v1/request.py +191 -0
  1105. vllm/v1/sample/__init__.py +0 -0
  1106. vllm/v1/sample/metadata.py +43 -0
  1107. vllm/v1/sample/ops/__init__.py +0 -0
  1108. vllm/v1/sample/ops/bad_words.py +38 -0
  1109. vllm/v1/sample/ops/penalties.py +58 -0
  1110. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1111. vllm/v1/sample/rejection_sampler.py +630 -0
  1112. vllm/v1/sample/sampler.py +270 -0
  1113. vllm/v1/sample/tpu/__init__.py +0 -0
  1114. vllm/v1/sample/tpu/metadata.py +123 -0
  1115. vllm/v1/sample/tpu/sampler.py +144 -0
  1116. vllm/v1/serial_utils.py +313 -0
  1117. vllm/v1/spec_decode/__init__.py +0 -0
  1118. vllm/v1/spec_decode/eagle.py +424 -0
  1119. vllm/v1/spec_decode/medusa.py +61 -0
  1120. vllm/v1/spec_decode/metadata.py +61 -0
  1121. vllm/v1/spec_decode/metrics.py +177 -0
  1122. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1123. vllm/v1/spec_decode/utils.py +45 -0
  1124. vllm/v1/structured_output/__init__.py +215 -0
  1125. vllm/v1/structured_output/backend_guidance.py +244 -0
  1126. vllm/v1/structured_output/backend_types.py +133 -0
  1127. vllm/v1/structured_output/backend_xgrammar.py +317 -0
  1128. vllm/v1/structured_output/request.py +85 -0
  1129. vllm/v1/structured_output/utils.py +174 -0
  1130. vllm/v1/utils.py +294 -0
  1131. vllm/v1/worker/__init__.py +0 -0
  1132. vllm/v1/worker/block_table.py +139 -0
  1133. vllm/v1/worker/gpu_input_batch.py +680 -0
  1134. vllm/v1/worker/gpu_model_runner.py +2084 -0
  1135. vllm/v1/worker/gpu_worker.py +373 -0
  1136. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1137. vllm/v1/worker/tpu_model_runner.py +1510 -0
  1138. vllm/v1/worker/tpu_worker.py +276 -0
  1139. vllm/v1/worker/utils.py +74 -0
  1140. vllm/v1/worker/worker_base.py +64 -0
  1141. vllm/version.py +40 -0
  1142. vllm/vllm_flash_attn/.gitkeep +0 -0
  1143. vllm/worker/__init__.py +0 -0
  1144. vllm/worker/cache_engine.py +144 -0
  1145. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1146. vllm/worker/cpu_model_runner.py +671 -0
  1147. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1148. vllm/worker/cpu_worker.py +400 -0
  1149. vllm/worker/enc_dec_model_runner.py +555 -0
  1150. vllm/worker/hpu_model_runner.py +2319 -0
  1151. vllm/worker/hpu_worker.py +483 -0
  1152. vllm/worker/model_runner.py +2178 -0
  1153. vllm/worker/model_runner_base.py +281 -0
  1154. vllm/worker/multi_step_hpu_worker.py +122 -0
  1155. vllm/worker/multi_step_model_runner.py +910 -0
  1156. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1157. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1158. vllm/worker/multi_step_tpu_worker.py +107 -0
  1159. vllm/worker/multi_step_worker.py +196 -0
  1160. vllm/worker/neuron_model_runner.py +418 -0
  1161. vllm/worker/neuron_worker.py +158 -0
  1162. vllm/worker/neuronx_distributed_model_runner.py +136 -0
  1163. vllm/worker/pooling_model_runner.py +211 -0
  1164. vllm/worker/tpu_model_runner.py +908 -0
  1165. vllm/worker/tpu_worker.py +336 -0
  1166. vllm/worker/utils.py +52 -0
  1167. vllm/worker/worker.py +574 -0
  1168. vllm/worker/worker_base.py +644 -0
  1169. vllm/worker/xpu_model_runner.py +606 -0
  1170. vllm/worker/xpu_worker.py +185 -0
  1171. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/METADATA +335 -0
  1172. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/RECORD +1175 -0
  1173. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/WHEEL +5 -0
  1174. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/entry_points.txt +5 -0
  1175. vllm_cpu_avx512bf16-0.9.0.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1503 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import itertools
4
+ import warnings
5
+ from collections.abc import Sequence
6
+ from contextlib import contextmanager
7
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Optional, Union,
8
+ cast, overload)
9
+
10
+ import cloudpickle
11
+ import torch.nn as nn
12
+ from tqdm.auto import tqdm
13
+ from typing_extensions import TypeVar, deprecated
14
+
15
+ from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
16
+ BeamSearchSequence, get_beam_search_score)
17
+ from vllm.config import (CompilationConfig, ModelDType, TokenizerMode,
18
+ is_init_field)
19
+ from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig,
20
+ TaskOption)
21
+ from vllm.engine.llm_engine import LLMEngine
22
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
23
+ ChatTemplateContentFormatOption,
24
+ apply_hf_chat_template,
25
+ apply_mistral_chat_template,
26
+ parse_chat_messages,
27
+ resolve_chat_template_content_format)
28
+ from vllm.entrypoints.score_utils import (_cosine_similarity,
29
+ _validate_score_input_lens)
30
+ from vllm.entrypoints.utils import _validate_truncation_size
31
+ from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt
32
+ from vllm.inputs.parse import parse_and_batch_prompt
33
+ from vllm.logger import init_logger
34
+ from vllm.lora.request import LoRARequest
35
+ from vllm.model_executor.guided_decoding.guided_fields import (
36
+ GuidedDecodingRequest, LLMGuidedOptions)
37
+ from vllm.model_executor.layers.quantization import QuantizationMethods
38
+ from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
39
+ PoolingRequestOutput, RequestOutput,
40
+ ScoringRequestOutput)
41
+ from vllm.pooling_params import PoolingParams
42
+ from vllm.prompt_adapter.request import PromptAdapterRequest
43
+ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
44
+ RequestOutputKind, SamplingParams)
45
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
46
+ get_cached_tokenizer)
47
+ from vllm.usage.usage_lib import UsageContext
48
+ from vllm.utils import (Counter, Device, deprecate_args, deprecate_kwargs,
49
+ is_list_of)
50
+
51
+ if TYPE_CHECKING:
52
+ from vllm.v1.metrics.reader import Metric
53
+
54
+ logger = init_logger(__name__)
55
+
56
+ _R = TypeVar("_R", default=Any)
57
+
58
+
59
+ class LLM:
60
+ """An LLM for generating texts from given prompts and sampling parameters.
61
+
62
+ This class includes a tokenizer, a language model (possibly distributed
63
+ across multiple GPUs), and GPU memory space allocated for intermediate
64
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
65
+ this class generates texts from the model, using an intelligent batching
66
+ mechanism and efficient memory management.
67
+
68
+ Args:
69
+ model: The name or path of a HuggingFace Transformers model.
70
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
71
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
72
+ if available, and "slow" will always use the slow tokenizer.
73
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
74
+ detokenizer. Expect valid prompt_token_ids and None for prompt
75
+ from the input.
76
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
77
+ downloading the model and tokenizer.
78
+ allowed_local_media_path: Allowing API requests to read local images
79
+ or videos from directories specified by the server file system.
80
+ This is a security risk. Should only be enabled in trusted
81
+ environments.
82
+ tensor_parallel_size: The number of GPUs to use for distributed
83
+ execution with tensor parallelism.
84
+ dtype: The data type for the model weights and activations. Currently,
85
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
86
+ the `torch_dtype` attribute specified in the model config file.
87
+ However, if the `torch_dtype` in the config is `float32`, we will
88
+ use `float16` instead.
89
+ quantization: The method used to quantize the model weights. Currently,
90
+ we support "awq", "gptq", and "fp8" (experimental).
91
+ If None, we first check the `quantization_config` attribute in the
92
+ model config file. If that is None, we assume the model weights are
93
+ not quantized and use `dtype` to determine the data type of
94
+ the weights.
95
+ revision: The specific model version to use. It can be a branch name,
96
+ a tag name, or a commit id.
97
+ tokenizer_revision: The specific tokenizer version to use. It can be a
98
+ branch name, a tag name, or a commit id.
99
+ seed: The seed to initialize the random number generator for sampling.
100
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
101
+ reserve for the model weights, activations, and KV cache. Higher
102
+ values will increase the KV cache size and thus improve the model's
103
+ throughput. However, if the value is too high, it may cause out-of-
104
+ memory (OOM) errors.
105
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
106
+ This can be used for temporarily storing the states of the requests
107
+ when their `best_of` sampling parameters are larger than 1. If all
108
+ requests will have `best_of=1`, you can safely set this to 0.
109
+ Noting that `best_of` is only supported in V0. Otherwise, too small
110
+ values may cause out-of-memory (OOM) errors.
111
+ cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
112
+ the model weights. This virtually increases the GPU memory space
113
+ you can use to hold the model weights, at the cost of CPU-GPU data
114
+ transfer for every forward pass.
115
+ enforce_eager: Whether to enforce eager execution. If True, we will
116
+ disable CUDA graph and always execute the model in eager mode.
117
+ If False, we will use CUDA graph and eager execution in hybrid.
118
+ max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
119
+ When a sequence has context length larger than this, we fall back
120
+ to eager mode. Additionally for encoder-decoder models, if the
121
+ sequence length of the encoder input is larger than this, we fall
122
+ back to the eager mode.
123
+ disable_custom_all_reduce: See
124
+ [ParallelConfig][vllm.config.ParallelConfig].
125
+ disable_async_output_proc: Disable async output processing.
126
+ This may result in lower performance.
127
+ hf_token: The token to use as HTTP bearer authorization for remote files
128
+ . If `True`, will use the token generated when running
129
+ `huggingface-cli login` (stored in `~/.huggingface`).
130
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
131
+ HuggingFace config. If a callable, it is called to update the
132
+ HuggingFace config.
133
+ compilation_config: Either an integer or a dictionary. If it is an
134
+ integer, it is used as the level of compilation optimization. If it
135
+ is a dictionary, it can specify the full compilation configuration.
136
+ **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].
137
+
138
+ Note:
139
+ This class is intended to be used for offline inference. For online
140
+ serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
141
+ """
142
+
143
+ DEPRECATE_LEGACY: ClassVar[bool] = True
144
+ """A flag to toggle whether to deprecate the legacy generate/encode API."""
145
+
146
+ DEPRECATE_INIT_POSARGS: ClassVar[bool] = True
147
+ """
148
+ A flag to toggle whether to deprecate positional arguments in
149
+ [LLM.__init__][].
150
+ """
151
+
152
+ @classmethod
153
+ @contextmanager
154
+ def deprecate_legacy_api(cls):
155
+ cls.DEPRECATE_LEGACY = True
156
+
157
+ yield
158
+
159
+ cls.DEPRECATE_LEGACY = False
160
+
161
+ @deprecate_args(
162
+ start_index=2, # Ignore self and model
163
+ is_deprecated=lambda: LLM.DEPRECATE_INIT_POSARGS,
164
+ additional_message=(
165
+ "All positional arguments other than `model` will be "
166
+ "replaced with keyword arguments in an upcoming version."),
167
+ )
168
+ def __init__(
169
+ self,
170
+ model: str,
171
+ tokenizer: Optional[str] = None,
172
+ tokenizer_mode: TokenizerMode = "auto",
173
+ skip_tokenizer_init: bool = False,
174
+ trust_remote_code: bool = False,
175
+ allowed_local_media_path: str = "",
176
+ tensor_parallel_size: int = 1,
177
+ dtype: ModelDType = "auto",
178
+ quantization: Optional[QuantizationMethods] = None,
179
+ revision: Optional[str] = None,
180
+ tokenizer_revision: Optional[str] = None,
181
+ seed: Optional[int] = None,
182
+ gpu_memory_utilization: float = 0.9,
183
+ swap_space: float = 4,
184
+ cpu_offload_gb: float = 0,
185
+ enforce_eager: bool = False,
186
+ max_seq_len_to_capture: int = 8192,
187
+ disable_custom_all_reduce: bool = False,
188
+ disable_async_output_proc: bool = False,
189
+ hf_token: Optional[Union[bool, str]] = None,
190
+ hf_overrides: Optional[HfOverrides] = None,
191
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
192
+ # After positional args are removed, move this right below `model`
193
+ task: TaskOption = "auto",
194
+ override_pooler_config: Optional[PoolerConfig] = None,
195
+ compilation_config: Optional[Union[int, dict[str, Any]]] = None,
196
+ **kwargs,
197
+ ) -> None:
198
+ """LLM constructor."""
199
+
200
+ if "disable_log_stats" not in kwargs:
201
+ kwargs["disable_log_stats"] = True
202
+
203
+ if "worker_cls" in kwargs:
204
+ worker_cls = kwargs["worker_cls"]
205
+ # if the worker_cls is not qualified string name,
206
+ # we serialize it using cloudpickle to avoid pickling issues
207
+ if isinstance(worker_cls, type):
208
+ kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
209
+
210
+ if compilation_config is not None:
211
+ if isinstance(compilation_config, int):
212
+ compilation_config_instance = CompilationConfig(
213
+ level=compilation_config)
214
+ elif isinstance(compilation_config, dict):
215
+ predicate = lambda x: is_init_field(CompilationConfig, x[0])
216
+ compilation_config_instance = CompilationConfig(
217
+ **dict(filter(predicate, compilation_config.items())))
218
+ else:
219
+ compilation_config_instance = compilation_config
220
+ else:
221
+ compilation_config_instance = None
222
+
223
+ engine_args = EngineArgs(
224
+ model=model,
225
+ task=task,
226
+ tokenizer=tokenizer,
227
+ tokenizer_mode=tokenizer_mode,
228
+ skip_tokenizer_init=skip_tokenizer_init,
229
+ trust_remote_code=trust_remote_code,
230
+ allowed_local_media_path=allowed_local_media_path,
231
+ tensor_parallel_size=tensor_parallel_size,
232
+ dtype=dtype,
233
+ quantization=quantization,
234
+ revision=revision,
235
+ tokenizer_revision=tokenizer_revision,
236
+ seed=seed,
237
+ gpu_memory_utilization=gpu_memory_utilization,
238
+ swap_space=swap_space,
239
+ cpu_offload_gb=cpu_offload_gb,
240
+ enforce_eager=enforce_eager,
241
+ max_seq_len_to_capture=max_seq_len_to_capture,
242
+ disable_custom_all_reduce=disable_custom_all_reduce,
243
+ disable_async_output_proc=disable_async_output_proc,
244
+ hf_token=hf_token,
245
+ hf_overrides=hf_overrides,
246
+ mm_processor_kwargs=mm_processor_kwargs,
247
+ override_pooler_config=override_pooler_config,
248
+ compilation_config=compilation_config_instance,
249
+ **kwargs,
250
+ )
251
+
252
+ # Create the Engine (autoselects V0 vs V1)
253
+ self.llm_engine = LLMEngine.from_engine_args(
254
+ engine_args=engine_args, usage_context=UsageContext.LLM_CLASS)
255
+ self.engine_class = type(self.llm_engine)
256
+
257
+ self.request_counter = Counter()
258
+ self.default_sampling_params: Union[dict[str, Any], None] = None
259
+
260
+ def get_tokenizer(
261
+ self,
262
+ lora_request: Optional[LoRARequest] = None,
263
+ ) -> AnyTokenizer:
264
+ return self.llm_engine.get_tokenizer_group().get_lora_tokenizer(
265
+ lora_request)
266
+
267
+ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
268
+ tokenizer_group = self.llm_engine.get_tokenizer_group()
269
+
270
+ # While CachedTokenizer is dynamic, have no choice but
271
+ # compare class name. Misjudgment will arise from
272
+ # user-defined tokenizer started with 'Cached'
273
+ if tokenizer.__class__.__name__.startswith("Cached"):
274
+ tokenizer_group.tokenizer = tokenizer
275
+ else:
276
+ tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
277
+
278
+ def get_default_sampling_params(self) -> SamplingParams:
279
+ if self.default_sampling_params is None:
280
+ self.default_sampling_params = (
281
+ self.llm_engine.model_config.get_diff_sampling_param())
282
+ if self.default_sampling_params:
283
+ return SamplingParams.from_optional(**self.default_sampling_params)
284
+ return SamplingParams()
285
+
286
+ @overload
287
+ def generate(
288
+ self,
289
+ prompts: Union[PromptType, Sequence[PromptType]],
290
+ /,
291
+ sampling_params: Optional[Union[SamplingParams,
292
+ Sequence[SamplingParams]]] = None,
293
+ *,
294
+ use_tqdm: bool = True,
295
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
296
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
297
+ guided_options_request: Optional[Union[LLMGuidedOptions,
298
+ GuidedDecodingRequest]] = None,
299
+ ) -> list[RequestOutput]:
300
+ ...
301
+
302
+ @overload # LEGACY: single (prompt + optional token ids)
303
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
304
+ def generate(
305
+ self,
306
+ prompts: str,
307
+ sampling_params: Optional[Union[SamplingParams,
308
+ list[SamplingParams]]] = None,
309
+ prompt_token_ids: Optional[list[int]] = None,
310
+ use_tqdm: bool = True,
311
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
312
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
313
+ guided_options_request: Optional[Union[LLMGuidedOptions,
314
+ GuidedDecodingRequest]] = None,
315
+ ) -> list[RequestOutput]:
316
+ ...
317
+
318
+ @overload # LEGACY: multi (prompt + optional token ids)
319
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
320
+ def generate(
321
+ self,
322
+ prompts: list[str],
323
+ sampling_params: Optional[Union[SamplingParams,
324
+ list[SamplingParams]]] = None,
325
+ prompt_token_ids: Optional[list[list[int]]] = None,
326
+ use_tqdm: bool = True,
327
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
328
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
329
+ guided_options_request: Optional[Union[LLMGuidedOptions,
330
+ GuidedDecodingRequest]] = None,
331
+ ) -> list[RequestOutput]:
332
+ ...
333
+
334
+ @overload # LEGACY: single (token ids + optional prompt)
335
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
336
+ def generate(
337
+ self,
338
+ prompts: Optional[str] = None,
339
+ sampling_params: Optional[Union[SamplingParams,
340
+ list[SamplingParams]]] = None,
341
+ *,
342
+ prompt_token_ids: list[int],
343
+ use_tqdm: bool = True,
344
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
345
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
346
+ guided_options_request: Optional[Union[LLMGuidedOptions,
347
+ GuidedDecodingRequest]] = None,
348
+ ) -> list[RequestOutput]:
349
+ ...
350
+
351
+ @overload # LEGACY: multi (token ids + optional prompt)
352
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
353
+ def generate(
354
+ self,
355
+ prompts: Optional[list[str]] = None,
356
+ sampling_params: Optional[Union[SamplingParams,
357
+ list[SamplingParams]]] = None,
358
+ *,
359
+ prompt_token_ids: list[list[int]],
360
+ use_tqdm: bool = True,
361
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
362
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
363
+ guided_options_request: Optional[Union[LLMGuidedOptions,
364
+ GuidedDecodingRequest]] = None,
365
+ ) -> list[RequestOutput]:
366
+ ...
367
+
368
+ @overload # LEGACY: single or multi token ids [pos-only]
369
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
370
+ def generate(
371
+ self,
372
+ prompts: None,
373
+ sampling_params: None,
374
+ prompt_token_ids: Union[list[int], list[list[int]]],
375
+ use_tqdm: bool = True,
376
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
377
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
378
+ guided_options_request: Optional[Union[LLMGuidedOptions,
379
+ GuidedDecodingRequest]] = None,
380
+ ) -> list[RequestOutput]:
381
+ ...
382
+
383
+ @deprecate_kwargs(
384
+ "prompt_token_ids",
385
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
386
+ additional_message="Please use the 'prompts' parameter instead.",
387
+ )
388
+ def generate(
389
+ self,
390
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
391
+ Optional[Union[str, list[str]]]] = None,
392
+ sampling_params: Optional[Union[SamplingParams,
393
+ Sequence[SamplingParams]]] = None,
394
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
395
+ use_tqdm: bool = True,
396
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
397
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
398
+ guided_options_request: Optional[Union[LLMGuidedOptions,
399
+ GuidedDecodingRequest]] = None,
400
+ priority: Optional[list[int]] = None,
401
+ ) -> list[RequestOutput]:
402
+ """Generates the completions for the input prompts.
403
+
404
+ This class automatically batches the given prompts, considering
405
+ the memory constraint. For the best performance, put all of your prompts
406
+ into a single list and pass it to this method.
407
+
408
+ Args:
409
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
410
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
411
+ for more details about the format of each prompts.
412
+ sampling_params: The sampling parameters for text generation. If
413
+ None, we use the default sampling parameters.
414
+ When it is a single value, it is applied to every prompt.
415
+ When it is a list, the list must have the same length as the
416
+ prompts and it is paired one by one with the prompt.
417
+ use_tqdm: Whether to use tqdm to display the progress bar.
418
+ lora_request: LoRA request to use for generation, if any.
419
+ prompt_adapter_request: Prompt Adapter request to use for
420
+ generation, if any.
421
+ priority: The priority of the requests, if any.
422
+ Only applicable when priority scheduling policy is enabled.
423
+
424
+ Returns:
425
+ A list of `RequestOutput` objects containing the
426
+ generated completions in the same order as the input prompts.
427
+
428
+ Note:
429
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
430
+ considered legacy and may be deprecated in the future. You should
431
+ instead pass them via the `inputs` parameter.
432
+ """
433
+ runner_type = self.llm_engine.model_config.runner_type
434
+ if runner_type not in ["generate", "transcription"]:
435
+ messages = [
436
+ "LLM.generate() is only supported for (conditional) generation "
437
+ "models (XForCausalLM, XForConditionalGeneration).",
438
+ ]
439
+
440
+ supported_runner_types = self.llm_engine.model_config \
441
+ .supported_runner_types
442
+ if "generate" in supported_runner_types:
443
+ messages.append(
444
+ "Your model supports the 'generate' runner, but is "
445
+ f"currently initialized for the '{runner_type}' runner. "
446
+ "Please initialize vLLM using `--task generate`.")
447
+
448
+ raise ValueError(" ".join(messages))
449
+
450
+ if prompt_token_ids is not None:
451
+ parsed_prompts = self._convert_v1_inputs(
452
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
453
+ prompt_token_ids=prompt_token_ids,
454
+ )
455
+ else:
456
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
457
+ prompts)
458
+
459
+ if isinstance(guided_options_request, dict):
460
+ if len(guided_options_request) > 1:
461
+ raise ValueError(
462
+ "You can only use one guided decoding but multiple is "
463
+ f"specified: {guided_options_request}")
464
+ guided_options_request = GuidedDecodingRequest(
465
+ **guided_options_request)
466
+
467
+ if sampling_params is None:
468
+ # Use default sampling params.
469
+ sampling_params = self.get_default_sampling_params()
470
+
471
+ self._validate_and_add_requests(
472
+ prompts=parsed_prompts,
473
+ params=sampling_params,
474
+ use_tqdm=use_tqdm,
475
+ lora_request=lora_request,
476
+ prompt_adapter_request=prompt_adapter_request,
477
+ guided_options=guided_options_request,
478
+ priority=priority,
479
+ )
480
+
481
+ outputs = self._run_engine(use_tqdm=use_tqdm)
482
+ return self.engine_class.validate_outputs(outputs, RequestOutput)
483
+
484
+ def collective_rpc(self,
485
+ method: Union[str, Callable[..., _R]],
486
+ timeout: Optional[float] = None,
487
+ args: tuple = (),
488
+ kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
489
+ """
490
+ Execute an RPC call on all workers.
491
+
492
+ Args:
493
+ method: Name of the worker method to execute, or a callable that
494
+ is serialized and sent to all workers to execute.
495
+
496
+ If the method is a callable, it should accept an additional
497
+ `self` argument, in addition to the arguments passed in `args`
498
+ and `kwargs`. The `self` argument will be the worker object.
499
+ timeout: Maximum time in seconds to wait for execution. Raises a
500
+ [`TimeoutError`][] on timeout. `None` means wait indefinitely.
501
+ args: Positional arguments to pass to the worker method.
502
+ kwargs: Keyword arguments to pass to the worker method.
503
+
504
+ Returns:
505
+ A list containing the results from each worker.
506
+
507
+ Note:
508
+ It is recommended to use this API to only pass control messages,
509
+ and set up data-plane communication to pass data.
510
+ """
511
+
512
+ return self.llm_engine.collective_rpc(method, timeout, args, kwargs)
513
+
514
+ def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
515
+ """
516
+ Run a function directly on the model inside each worker,
517
+ returning the result for each of them.
518
+ """
519
+ executor = self.llm_engine.model_executor
520
+ return executor.apply_model(func)
521
+
522
+ def beam_search(
523
+ self,
524
+ prompts: list[Union[TokensPrompt, TextPrompt]],
525
+ params: BeamSearchParams,
526
+ ) -> list[BeamSearchOutput]:
527
+ """
528
+ Generate sequences using beam search.
529
+
530
+ Args:
531
+ prompts: A list of prompts. Each prompt can be a string or a list
532
+ of token IDs.
533
+ params: The beam search parameters.
534
+ """
535
+ # TODO: how does beam search work together with length penalty,
536
+ # frequency, penalty, and stopping criteria, etc.?
537
+ beam_width = params.beam_width
538
+ max_tokens = params.max_tokens
539
+ temperature = params.temperature
540
+ ignore_eos = params.ignore_eos
541
+ length_penalty = params.length_penalty
542
+
543
+ def sort_beams_key(x: BeamSearchSequence) -> float:
544
+ return get_beam_search_score(x.tokens, x.cum_logprob,
545
+ tokenizer.eos_token_id,
546
+ length_penalty)
547
+
548
+ def create_tokens_prompt_from_beam(
549
+ beam: BeamSearchSequence) -> TokensPrompt:
550
+ token_prompt_kwargs: TokensPrompt = {
551
+ "prompt_token_ids": beam.tokens
552
+ }
553
+ if beam.multi_modal_data is not None:
554
+ token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data
555
+
556
+ if beam.mm_processor_kwargs is not None:
557
+ token_prompt_kwargs[
558
+ "mm_processor_kwargs"] = beam.mm_processor_kwargs
559
+ return TokensPrompt(**token_prompt_kwargs)
560
+
561
+ tokenizer = self.get_tokenizer()
562
+ # generate 2 * beam_width candidates at each step
563
+ # following the huggingface transformers implementation
564
+ # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
565
+ beam_search_params = SamplingParams(logprobs=2 * beam_width,
566
+ max_tokens=1,
567
+ temperature=temperature)
568
+ instances: list[BeamSearchInstance] = []
569
+
570
+ for prompt in prompts:
571
+ # Add multimodal processor kwargs & data
572
+ mm_kwargs = {}
573
+ if "multi_modal_data" in prompt:
574
+ mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"]
575
+ if "mm_processor_kwargs" in prompt:
576
+ mm_kwargs["mm_processor_kwargs"] = prompt[
577
+ "mm_processor_kwargs"]
578
+
579
+ if "prompt_token_ids" in prompt:
580
+ prompt = cast(TokensPrompt, prompt) # Needed for mypy
581
+ prompt_tokens = prompt["prompt_token_ids"]
582
+ else:
583
+ prompt_tokens = tokenizer.encode(prompt["prompt"])
584
+
585
+ instances.append(
586
+ BeamSearchInstance(prompt_tokens, logprobs=None, **mm_kwargs))
587
+
588
+ for _ in range(max_tokens):
589
+ all_beams: list[BeamSearchSequence] = list(
590
+ sum((instance.beams for instance in instances), []))
591
+ pos = [0] + list(
592
+ itertools.accumulate(
593
+ len(instance.beams) for instance in instances))
594
+ instance_start_and_end: list[tuple[int, int]] = list(
595
+ zip(pos[:-1], pos[1:]))
596
+
597
+ if len(all_beams) == 0:
598
+ break
599
+
600
+ prompts_batch = [
601
+ create_tokens_prompt_from_beam(beam) for beam in all_beams
602
+ ]
603
+
604
+ # only runs for one step
605
+ # we don't need to use tqdm here
606
+ output = self.generate(prompts_batch,
607
+ sampling_params=beam_search_params,
608
+ use_tqdm=False)
609
+
610
+ for (start, end), instance in zip(instance_start_and_end,
611
+ instances):
612
+ instance_new_beams = []
613
+ for i in range(start, end):
614
+ current_beam = all_beams[i]
615
+ result = output[i]
616
+
617
+ if result.outputs[0].logprobs is not None:
618
+ # if `result.outputs[0].logprobs` is None, it means
619
+ # the sequence is completed because of the max-model-len
620
+ # or abortion. we don't need to add it to the new beams.
621
+ logprobs = result.outputs[0].logprobs[0]
622
+ for token_id, logprob_obj in logprobs.items():
623
+ new_beam = BeamSearchSequence(
624
+ tokens=current_beam.tokens + [token_id],
625
+ logprobs=current_beam.logprobs + [logprobs],
626
+ cum_logprob=current_beam.cum_logprob +
627
+ logprob_obj.logprob,
628
+ multi_modal_data=current_beam.multi_modal_data,
629
+ mm_processor_kwargs=current_beam.
630
+ mm_processor_kwargs)
631
+
632
+ if token_id == tokenizer.eos_token_id and \
633
+ not ignore_eos:
634
+ instance.completed.append(new_beam)
635
+ else:
636
+ instance_new_beams.append(new_beam)
637
+ sorted_beams = sorted(instance_new_beams,
638
+ key=sort_beams_key,
639
+ reverse=True)
640
+ instance.beams = sorted_beams[:beam_width]
641
+
642
+ outputs = []
643
+ for instance in instances:
644
+ instance.completed.extend(instance.beams)
645
+ sorted_completed = sorted(instance.completed,
646
+ key=sort_beams_key,
647
+ reverse=True)
648
+ best_beams = sorted_completed[:beam_width]
649
+
650
+ for beam in best_beams:
651
+ beam.text = tokenizer.decode(beam.tokens)
652
+ outputs.append(BeamSearchOutput(sequences=best_beams))
653
+
654
+ return outputs
655
+
656
+ def chat(
657
+ self,
658
+ messages: Union[list[ChatCompletionMessageParam],
659
+ list[list[ChatCompletionMessageParam]]],
660
+ sampling_params: Optional[Union[SamplingParams,
661
+ list[SamplingParams]]] = None,
662
+ use_tqdm: bool = True,
663
+ lora_request: Optional[LoRARequest] = None,
664
+ chat_template: Optional[str] = None,
665
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
666
+ add_generation_prompt: bool = True,
667
+ continue_final_message: bool = False,
668
+ tools: Optional[list[dict[str, Any]]] = None,
669
+ chat_template_kwargs: Optional[dict[str, Any]] = None,
670
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
671
+ ) -> list[RequestOutput]:
672
+ """
673
+ Generate responses for a chat conversation.
674
+
675
+ The chat conversation is converted into a text prompt using the
676
+ tokenizer and calls the [generate][] method to generate the
677
+ responses.
678
+
679
+ Multi-modal inputs can be passed in the same way you would pass them
680
+ to the OpenAI API.
681
+
682
+ Args:
683
+ messages: A list of conversations or a single conversation.
684
+
685
+ - Each conversation is represented as a list of messages.
686
+ - Each message is a dictionary with 'role' and 'content' keys.
687
+
688
+ sampling_params: The sampling parameters for text generation.
689
+ If None, we use the default sampling parameters. When it
690
+ is a single value, it is applied to every prompt. When it
691
+ is a list, the list must have the same length as the
692
+ prompts and it is paired one by one with the prompt.
693
+ use_tqdm: Whether to use tqdm to display the progress bar.
694
+ lora_request: LoRA request to use for generation, if any.
695
+ chat_template: The template to use for structuring the chat.
696
+ If not provided, the model's default chat template will be used.
697
+ chat_template_content_format: The format to render message content.
698
+
699
+ - "string" will render the content as a string.
700
+ Example: `"Who are you?"`
701
+ - "openai" will render the content as a list of dictionaries,
702
+ similar to OpenAI schema.
703
+ Example: `[{"type": "text", "text": "Who are you?"}]`
704
+
705
+ add_generation_prompt: If True, adds a generation template
706
+ to each message.
707
+ continue_final_message: If True, continues the final message in
708
+ the conversation instead of starting a new one. Cannot be
709
+ `True` if `add_generation_prompt` is also `True`.
710
+ chat_template_kwargs: Additional kwargs to pass to the chat
711
+ template.
712
+ mm_processor_kwargs: Multimodal processor kwarg overrides for this
713
+ chat request. Only used for offline requests.
714
+
715
+ Returns:
716
+ A list of `RequestOutput` objects containing the generated
717
+ responses in the same order as the input messages.
718
+ """
719
+ list_of_messages: list[list[ChatCompletionMessageParam]]
720
+
721
+ # Handle multi and single conversations
722
+ if is_list_of(messages, list):
723
+ # messages is list[list[...]]
724
+ list_of_messages = cast(list[list[ChatCompletionMessageParam]],
725
+ messages)
726
+ else:
727
+ # messages is list[...]
728
+ list_of_messages = [
729
+ cast(list[ChatCompletionMessageParam], messages)
730
+ ]
731
+
732
+ tokenizer = self.get_tokenizer(lora_request)
733
+ model_config = self.llm_engine.get_model_config()
734
+ resolved_content_format = resolve_chat_template_content_format(
735
+ chat_template,
736
+ tools,
737
+ chat_template_content_format,
738
+ tokenizer,
739
+ model_config=model_config,
740
+ )
741
+
742
+ _chat_template_kwargs: dict[str, Any] = dict(
743
+ chat_template=chat_template,
744
+ add_generation_prompt=add_generation_prompt,
745
+ continue_final_message=continue_final_message,
746
+ tools=tools,
747
+ )
748
+ _chat_template_kwargs.update(chat_template_kwargs or {})
749
+
750
+ prompts: list[Union[TokensPrompt, TextPrompt]] = []
751
+
752
+ for msgs in list_of_messages:
753
+ # NOTE: _parse_chat_message_content_parts() currently doesn't
754
+ # handle mm_processor_kwargs, since there is no implementation in
755
+ # the chat message parsing for it.
756
+ conversation, mm_data = parse_chat_messages(
757
+ msgs,
758
+ model_config,
759
+ tokenizer,
760
+ content_format=resolved_content_format,
761
+ )
762
+
763
+ if isinstance(tokenizer, MistralTokenizer):
764
+ prompt_token_ids = apply_mistral_chat_template(
765
+ tokenizer,
766
+ messages=msgs,
767
+ **_chat_template_kwargs,
768
+ )
769
+ else:
770
+ prompt_str = apply_hf_chat_template(
771
+ tokenizer=tokenizer,
772
+ conversation=conversation,
773
+ model_config=model_config,
774
+ **_chat_template_kwargs,
775
+ )
776
+ # Special tokens are already included in chat templates so
777
+ # should not be added by the tokenizer in this case.
778
+ prompt_token_ids = tokenizer.encode(prompt_str,
779
+ add_special_tokens=False)
780
+
781
+ prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
782
+
783
+ if mm_data is not None:
784
+ prompt["multi_modal_data"] = mm_data
785
+
786
+ if mm_processor_kwargs is not None:
787
+ prompt["mm_processor_kwargs"] = mm_processor_kwargs
788
+
789
+ prompts.append(prompt)
790
+
791
+ return self.generate(
792
+ prompts,
793
+ sampling_params=sampling_params,
794
+ use_tqdm=use_tqdm,
795
+ lora_request=lora_request,
796
+ )
797
+
798
+ @overload
799
+ def encode(
800
+ self,
801
+ prompts: Union[PromptType, Sequence[PromptType]],
802
+ /,
803
+ pooling_params: Optional[Union[PoolingParams,
804
+ Sequence[PoolingParams]]] = None,
805
+ *,
806
+ truncate_prompt_tokens: Optional[int] = None,
807
+ use_tqdm: bool = True,
808
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
809
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
810
+ ) -> list[PoolingRequestOutput]:
811
+ ...
812
+
813
+ @overload # LEGACY: single (prompt + optional token ids)
814
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
815
+ def encode(
816
+ self,
817
+ prompts: str,
818
+ pooling_params: Optional[Union[PoolingParams,
819
+ Sequence[PoolingParams]]] = None,
820
+ prompt_token_ids: Optional[list[int]] = None,
821
+ truncate_prompt_tokens: Optional[int] = None,
822
+ use_tqdm: bool = True,
823
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
824
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
825
+ ) -> list[PoolingRequestOutput]:
826
+ ...
827
+
828
+ @overload # LEGACY: multi (prompt + optional token ids)
829
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
830
+ def encode(
831
+ self,
832
+ prompts: list[str],
833
+ pooling_params: Optional[Union[PoolingParams,
834
+ Sequence[PoolingParams]]] = None,
835
+ prompt_token_ids: Optional[list[list[int]]] = None,
836
+ truncate_prompt_tokens: Optional[int] = None,
837
+ use_tqdm: bool = True,
838
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
839
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
840
+ ) -> list[PoolingRequestOutput]:
841
+ ...
842
+
843
+ @overload # LEGACY: single (token ids + optional prompt)
844
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
845
+ def encode(
846
+ self,
847
+ prompts: Optional[str] = None,
848
+ pooling_params: Optional[Union[PoolingParams,
849
+ Sequence[PoolingParams]]] = None,
850
+ *,
851
+ prompt_token_ids: list[int],
852
+ truncate_prompt_tokens: Optional[int] = None,
853
+ use_tqdm: bool = True,
854
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
855
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
856
+ ) -> list[PoolingRequestOutput]:
857
+ ...
858
+
859
+ @overload # LEGACY: multi (token ids + optional prompt)
860
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
861
+ def encode(
862
+ self,
863
+ prompts: Optional[list[str]] = None,
864
+ pooling_params: Optional[Union[PoolingParams,
865
+ Sequence[PoolingParams]]] = None,
866
+ *,
867
+ prompt_token_ids: list[list[int]],
868
+ truncate_prompt_tokens: Optional[int] = None,
869
+ use_tqdm: bool = True,
870
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
871
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
872
+ ) -> list[PoolingRequestOutput]:
873
+ ...
874
+
875
+ @overload # LEGACY: single or multi token ids [pos-only]
876
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
877
+ def encode(
878
+ self,
879
+ prompts: None,
880
+ pooling_params: None,
881
+ prompt_token_ids: Union[list[int], list[list[int]]],
882
+ truncate_prompt_tokens: Optional[int] = None,
883
+ use_tqdm: bool = True,
884
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
885
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
886
+ ) -> list[PoolingRequestOutput]:
887
+ ...
888
+
889
+ @deprecate_kwargs(
890
+ "prompt_token_ids",
891
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
892
+ additional_message="Please use the 'prompts' parameter instead.",
893
+ )
894
+ def encode(
895
+ self,
896
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
897
+ Optional[Union[str, list[str]]]] = None,
898
+ pooling_params: Optional[Union[PoolingParams,
899
+ Sequence[PoolingParams]]] = None,
900
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
901
+ truncate_prompt_tokens: Optional[int] = None,
902
+ use_tqdm: bool = True,
903
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
904
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
905
+ ) -> list[PoolingRequestOutput]:
906
+ """Apply pooling to the hidden states corresponding to the input
907
+ prompts.
908
+
909
+ This class automatically batches the given prompts, considering
910
+ the memory constraint. For the best performance, put all of your prompts
911
+ into a single list and pass it to this method.
912
+
913
+ Args:
914
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
915
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
916
+ for more details about the format of each prompts.
917
+ pooling_params: The pooling parameters for pooling. If None, we
918
+ use the default pooling parameters.
919
+ use_tqdm: Whether to use tqdm to display the progress bar.
920
+ lora_request: LoRA request to use for generation, if any.
921
+ prompt_adapter_request: Prompt Adapter request to use for
922
+ generation, if any.
923
+
924
+ Returns:
925
+ A list of `PoolingRequestOutput` objects containing the
926
+ pooled hidden states in the same order as the input prompts.
927
+
928
+ Note:
929
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
930
+ considered legacy and may be deprecated in the future. You should
931
+ instead pass them via the `inputs` parameter.
932
+ """
933
+ runner_type = self.llm_engine.model_config.runner_type
934
+ if runner_type != "pooling":
935
+ messages = ["LLM.encode() is only supported for pooling models."]
936
+
937
+ supported_runner_types = self.llm_engine.model_config \
938
+ .supported_runner_types
939
+ if "pooling" in supported_runner_types:
940
+ messages.append(
941
+ "Your model supports the 'pooling' runner, but is "
942
+ f"currently initialized for the '{runner_type}' runner. "
943
+ "Please initialize vLLM using `--task embed`, "
944
+ "`--task classify`, `--task score` etc.")
945
+
946
+ raise ValueError(" ".join(messages))
947
+
948
+ if prompt_token_ids is not None:
949
+ parsed_prompts = self._convert_v1_inputs(
950
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
951
+ prompt_token_ids=prompt_token_ids,
952
+ )
953
+ else:
954
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
955
+ prompts)
956
+
957
+ if pooling_params is None:
958
+ # Use default pooling params.
959
+ pooling_params = PoolingParams()
960
+ elif isinstance(pooling_params, PoolingParams):
961
+ pooling_params.verify(self.llm_engine.model_config)
962
+ else:
963
+ for pooling_param in pooling_params:
964
+ pooling_param.verify(self.llm_engine.model_config)
965
+
966
+ tokenization_kwargs: dict[str, Any] = {}
967
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
968
+ truncate_prompt_tokens, tokenization_kwargs)
969
+
970
+ self._validate_and_add_requests(
971
+ prompts=parsed_prompts,
972
+ params=pooling_params,
973
+ use_tqdm=use_tqdm,
974
+ lora_request=lora_request,
975
+ tokenization_kwargs=tokenization_kwargs,
976
+ prompt_adapter_request=prompt_adapter_request,
977
+ )
978
+
979
+ outputs = self._run_engine(use_tqdm=use_tqdm)
980
+ return self.engine_class.validate_outputs(outputs,
981
+ PoolingRequestOutput)
982
+
983
+ def embed(
984
+ self,
985
+ prompts: Union[PromptType, Sequence[PromptType]],
986
+ /,
987
+ *,
988
+ truncate_prompt_tokens: Optional[int] = None,
989
+ use_tqdm: bool = True,
990
+ pooling_params: Optional[Union[PoolingParams,
991
+ Sequence[PoolingParams]]] = None,
992
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
993
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
994
+ ) -> list[EmbeddingRequestOutput]:
995
+ """
996
+ Generate an embedding vector for each prompt.
997
+
998
+ This class automatically batches the given prompts, considering
999
+ the memory constraint. For the best performance, put all of your prompts
1000
+ into a single list and pass it to this method.
1001
+
1002
+ Args:
1003
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1004
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1005
+ for more details about the format of each prompts.
1006
+ pooling_params: The pooling parameters for pooling. If None, we
1007
+ use the default pooling parameters.
1008
+ use_tqdm: Whether to use tqdm to display the progress bar.
1009
+ lora_request: LoRA request to use for generation, if any.
1010
+ prompt_adapter_request: Prompt Adapter request to use for
1011
+ generation, if any.
1012
+
1013
+ Returns:
1014
+ A list of `EmbeddingRequestOutput` objects containing the
1015
+ embedding vectors in the same order as the input prompts.
1016
+ """
1017
+ if self.llm_engine.model_config.task != "embed":
1018
+ raise ValueError(
1019
+ "Embedding API is only enabled for `--task embed`")
1020
+
1021
+ items = self.encode(prompts,
1022
+ truncate_prompt_tokens=truncate_prompt_tokens,
1023
+ use_tqdm=use_tqdm,
1024
+ pooling_params=pooling_params,
1025
+ lora_request=lora_request,
1026
+ prompt_adapter_request=prompt_adapter_request)
1027
+
1028
+ return [EmbeddingRequestOutput.from_base(item) for item in items]
1029
+
1030
+ def classify(
1031
+ self,
1032
+ prompts: Union[PromptType, Sequence[PromptType]],
1033
+ /,
1034
+ *,
1035
+ use_tqdm: bool = True,
1036
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1037
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1038
+ ) -> list[ClassificationRequestOutput]:
1039
+ """
1040
+ Generate class logits for each prompt.
1041
+
1042
+ This class automatically batches the given prompts, considering
1043
+ the memory constraint. For the best performance, put all of your prompts
1044
+ into a single list and pass it to this method.
1045
+
1046
+ Args:
1047
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1048
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1049
+ for more details about the format of each prompts.
1050
+ use_tqdm: Whether to use tqdm to display the progress bar.
1051
+ lora_request: LoRA request to use for generation, if any.
1052
+ prompt_adapter_request: Prompt Adapter request to use for
1053
+ generation, if any.
1054
+
1055
+ Returns:
1056
+ A list of `ClassificationRequestOutput` objects containing the
1057
+ embedding vectors in the same order as the input prompts.
1058
+ """
1059
+ if self.llm_engine.model_config.task != "classify":
1060
+ raise ValueError(
1061
+ "Classification API is only enabled for `--task classify`")
1062
+
1063
+ items = self.encode(prompts,
1064
+ use_tqdm=use_tqdm,
1065
+ lora_request=lora_request,
1066
+ prompt_adapter_request=prompt_adapter_request)
1067
+
1068
+ return [ClassificationRequestOutput.from_base(item) for item in items]
1069
+
1070
+ def _embedding_score(
1071
+ self,
1072
+ tokenizer: AnyTokenizer,
1073
+ text_1: list[Union[str, TextPrompt, TokensPrompt]],
1074
+ text_2: list[Union[str, TextPrompt, TokensPrompt]],
1075
+ truncate_prompt_tokens: Optional[int] = None,
1076
+ use_tqdm: bool = True,
1077
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1078
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1079
+ ) -> list[ScoringRequestOutput]:
1080
+
1081
+ encoded_output: list[PoolingRequestOutput] = self.encode(
1082
+ text_1 + text_2,
1083
+ truncate_prompt_tokens=truncate_prompt_tokens,
1084
+ use_tqdm=use_tqdm,
1085
+ lora_request=lora_request,
1086
+ prompt_adapter_request=prompt_adapter_request)
1087
+
1088
+ encoded_output_1: list[PoolingRequestOutput] = encoded_output[
1089
+ 0:len(text_1)]
1090
+ encoded_output_2: list[PoolingRequestOutput] = encoded_output[
1091
+ len(text_1):]
1092
+
1093
+ if len(encoded_output_1) == 1:
1094
+ encoded_output_1 = encoded_output_1 * len(encoded_output_2)
1095
+
1096
+ scores = _cosine_similarity(tokenizer=tokenizer,
1097
+ embed_1=encoded_output_1,
1098
+ embed_2=encoded_output_2)
1099
+
1100
+ items = self.engine_class.validate_outputs(scores,
1101
+ PoolingRequestOutput)
1102
+ return [ScoringRequestOutput.from_base(item) for item in items]
1103
+
1104
+ def _cross_encoding_score(
1105
+ self,
1106
+ tokenizer: AnyTokenizer,
1107
+ text_1: list[str],
1108
+ text_2: list[str],
1109
+ truncate_prompt_tokens: Optional[int] = None,
1110
+ use_tqdm: bool = True,
1111
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1112
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1113
+ ) -> list[ScoringRequestOutput]:
1114
+
1115
+ if isinstance(tokenizer, MistralTokenizer):
1116
+ raise ValueError(
1117
+ "Score API is only enabled for `--task embed or score`")
1118
+
1119
+ if len(text_1) == 1:
1120
+ text_1 = text_1 * len(text_2)
1121
+
1122
+ input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)]
1123
+
1124
+ pooling_params = PoolingParams()
1125
+
1126
+ tokenization_kwargs: dict[str, Any] = {}
1127
+ _validate_truncation_size(self.llm_engine.model_config.max_model_len,
1128
+ truncate_prompt_tokens, tokenization_kwargs)
1129
+
1130
+ parsed_prompts = []
1131
+
1132
+ for q, t in input_pairs:
1133
+ prompt_inputs = tokenizer(text=q,
1134
+ text_pair=t,
1135
+ **tokenization_kwargs)
1136
+ engine_prompt = TokensPrompt(
1137
+ prompt_token_ids=prompt_inputs["input_ids"],
1138
+ token_type_ids=prompt_inputs.get("token_type_ids"))
1139
+ parsed_prompts.append(engine_prompt)
1140
+
1141
+ self._validate_and_add_requests(
1142
+ prompts=parsed_prompts,
1143
+ params=pooling_params,
1144
+ use_tqdm=use_tqdm,
1145
+ lora_request=lora_request,
1146
+ prompt_adapter_request=prompt_adapter_request,
1147
+ )
1148
+
1149
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1150
+ items = self.engine_class.validate_outputs(outputs,
1151
+ PoolingRequestOutput)
1152
+
1153
+ return [ScoringRequestOutput.from_base(item) for item in items]
1154
+
1155
+ def score(
1156
+ self,
1157
+ text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1158
+ text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1159
+ /,
1160
+ *,
1161
+ truncate_prompt_tokens: Optional[int] = None,
1162
+ use_tqdm: bool = True,
1163
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1164
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1165
+ ) -> list[ScoringRequestOutput]:
1166
+ """Generate similarity scores for all pairs `<text,text_pair>`.
1167
+
1168
+ The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
1169
+ In the `1 - N` case the `text_1` sentence will be replicated `N`
1170
+ times to pair with the `text_2` sentences.
1171
+ The input pairs are used to build a list of prompts for the
1172
+ cross encoder model. This class automatically batches the prompts,
1173
+ considering the memory constraint. For the best performance, put all
1174
+ of your texts into a single list and pass it to this method.
1175
+
1176
+ Args:
1177
+ text_1: can be a single prompt or a list of prompts, in which
1178
+ case it has to have the same length as the `text_2` list
1179
+ text_2: The texts to pair with the query to form the input
1180
+ to the LLM. See [PromptType][vllm.inputs.PromptType] for
1181
+ more details about the format of each prompts.
1182
+ use_tqdm: Whether to use tqdm to display the progress bar.
1183
+ lora_request: LoRA request to use for generation, if any.
1184
+ prompt_adapter_request: Prompt Adapter request to use for
1185
+ generation, if any.
1186
+
1187
+ Returns:
1188
+ A list of `ScoringRequestOutput` objects containing the
1189
+ generated scores in the same order as the input prompts.
1190
+ """
1191
+ runner_type = self.llm_engine.model_config.runner_type
1192
+ if runner_type != "pooling":
1193
+ messages = ["LLM.score() is only supported for pooling models."]
1194
+
1195
+ supported_runner_types = self.llm_engine.model_config \
1196
+ .supported_runner_types
1197
+ if "pooling" in supported_runner_types:
1198
+ messages.append(
1199
+ "Your model supports the 'pooling' runner, but is "
1200
+ f"currently initialized for the '{runner_type}' runner. "
1201
+ "Please initialize vLLM using `--task embed`, "
1202
+ "`--task classify`, `--task score` etc.")
1203
+
1204
+ raise ValueError(" ".join(messages))
1205
+
1206
+ if self.llm_engine.model_config.task not in ("embed", "score"):
1207
+ raise ValueError(
1208
+ "Score API is only enabled for `--task embed or --task score`")
1209
+
1210
+ # the tokenizer for models such as
1211
+ # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
1212
+ # lists of tokens to the `text` and `text_pair` kwargs
1213
+ tokenizer = self.llm_engine.get_tokenizer()
1214
+
1215
+ def ensure_str(prompt: SingletonPrompt):
1216
+ if isinstance(prompt, dict):
1217
+ if "multi_modal_data" in prompt:
1218
+ raise ValueError("Multi-modal prompt is not "
1219
+ "supported for scoring")
1220
+ elif "prompt_token_ids" in prompt:
1221
+ prompt = tokenizer.decode(
1222
+ cast(TokensPrompt, prompt)["prompt_token_ids"])
1223
+ elif "prompt" in prompt:
1224
+ prompt = cast(TextPrompt, prompt)["prompt"]
1225
+ assert type(prompt) is str
1226
+ return prompt
1227
+
1228
+ if isinstance(text_1, (str, dict)):
1229
+ # Convert a single prompt to a list.
1230
+ text_1 = [text_1]
1231
+ input_text_1: list[str] = [ensure_str(t) for t in text_1]
1232
+
1233
+ if isinstance(text_2, (str, dict)):
1234
+ # Convert a single prompt to a list.
1235
+ text_2 = [text_2]
1236
+ input_text_2: list[str] = [ensure_str(t) for t in text_2]
1237
+
1238
+ _validate_score_input_lens(input_text_1, input_text_2)
1239
+
1240
+ if self.llm_engine.model_config.is_cross_encoder:
1241
+ return self._cross_encoding_score(tokenizer, input_text_1,
1242
+ input_text_2,
1243
+ truncate_prompt_tokens, use_tqdm,
1244
+ lora_request,
1245
+ prompt_adapter_request)
1246
+ else:
1247
+ return self._embedding_score(
1248
+ tokenizer,
1249
+ input_text_1, # type: ignore[arg-type]
1250
+ input_text_2, # type: ignore[arg-type]
1251
+ truncate_prompt_tokens,
1252
+ use_tqdm,
1253
+ lora_request,
1254
+ prompt_adapter_request)
1255
+
1256
+ def start_profile(self) -> None:
1257
+ self.llm_engine.start_profile()
1258
+
1259
+ def stop_profile(self) -> None:
1260
+ self.llm_engine.stop_profile()
1261
+
1262
+ def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
1263
+ return self.llm_engine.reset_prefix_cache(device)
1264
+
1265
+ def sleep(self, level: int = 1):
1266
+ """
1267
+ Put the engine to sleep. The engine should not process any requests.
1268
+ The caller should guarantee that no requests are being processed
1269
+ during the sleep period, before `wake_up` is called.
1270
+
1271
+ Args:
1272
+ level: The sleep level. Level 1 sleep will offload the model
1273
+ weights and discard the kv cache. The content of kv cache
1274
+ is forgotten. Level 1 sleep is good for sleeping and waking
1275
+ up the engine to run the same model again. The model weights
1276
+ are backed up in CPU memory. Please make sure there's enough
1277
+ CPU memory to store the model weights. Level 2 sleep will
1278
+ discard both the model weights and the kv cache. The content
1279
+ of both the model weights and kv cache is forgotten. Level 2
1280
+ sleep is good for sleeping and waking up the engine to run a
1281
+ different model or update the model, where previous model
1282
+ weights are not needed. It reduces CPU memory pressure.
1283
+ """
1284
+ self.reset_prefix_cache()
1285
+ self.llm_engine.sleep(level=level)
1286
+
1287
+ def wake_up(self, tags: Optional[list[str]] = None):
1288
+ """
1289
+ Wake up the engine from sleep mode. See the [sleep][] method
1290
+ for more details.
1291
+
1292
+ Args:
1293
+ tags: An optional list of tags to reallocate the engine memory
1294
+ for specific memory allocations. Values must be in
1295
+ `("weights", "kv_cache")`. If None, all memory is reallocated.
1296
+ wake_up should be called with all tags (or None) before the
1297
+ engine is used again.
1298
+ """
1299
+ self.llm_engine.wake_up(tags)
1300
+
1301
+ def get_metrics(self) -> list["Metric"]:
1302
+ """Return a snapshot of aggregated metrics from Prometheus.
1303
+
1304
+ Returns:
1305
+ A ``MetricSnapshot`` instance capturing the current state
1306
+ of all aggregated metrics from Prometheus.
1307
+
1308
+ Note:
1309
+ This method is only available with the V1 LLM engine.
1310
+ """
1311
+ from vllm.v1.engine.llm_engine import LLMEngine as V1LLMEngine
1312
+ assert isinstance(self.llm_engine, V1LLMEngine)
1313
+ return self.llm_engine.get_metrics()
1314
+
1315
+ # LEGACY
1316
+ def _convert_v1_inputs(
1317
+ self,
1318
+ prompts: Optional[Union[str, list[str]]],
1319
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]],
1320
+ ):
1321
+ # skip_tokenizer_init is now checked in engine
1322
+
1323
+ if prompts is None and prompt_token_ids is None:
1324
+ raise ValueError(
1325
+ "Either prompts or prompt_token_ids must be provided.")
1326
+ if prompts is not None and prompt_token_ids is not None \
1327
+ and len(prompts) != len(prompt_token_ids):
1328
+ raise ValueError(
1329
+ "The lengths of prompts and prompt_token_ids must be the same."
1330
+ )
1331
+
1332
+ if prompts is not None:
1333
+ prompts = [p["content"] for p in parse_and_batch_prompt(prompts)]
1334
+ if prompt_token_ids is not None:
1335
+ prompt_token_ids = [
1336
+ p["content"] for p in parse_and_batch_prompt(prompt_token_ids)
1337
+ ]
1338
+ if prompts is not None:
1339
+ num_requests = len(prompts)
1340
+ elif prompt_token_ids is not None:
1341
+ num_requests = len(prompt_token_ids)
1342
+ parsed_prompts: list[PromptType] = []
1343
+ for i in range(num_requests):
1344
+ item: PromptType
1345
+
1346
+ if prompts is not None:
1347
+ item = TextPrompt(prompt=prompts[i])
1348
+ elif prompt_token_ids is not None:
1349
+ item = TokensPrompt(prompt_token_ids=prompt_token_ids[i])
1350
+ else:
1351
+ raise AssertionError
1352
+
1353
+ parsed_prompts.append(item)
1354
+
1355
+ return parsed_prompts
1356
+
1357
+ def _validate_and_add_requests(
1358
+ self,
1359
+ prompts: Union[PromptType, Sequence[PromptType]],
1360
+ params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
1361
+ Sequence[PoolingParams]],
1362
+ *,
1363
+ use_tqdm: bool,
1364
+ lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
1365
+ prompt_adapter_request: Optional[PromptAdapterRequest],
1366
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1367
+ guided_options: Optional[GuidedDecodingRequest] = None,
1368
+ priority: Optional[list[int]] = None,
1369
+ ) -> None:
1370
+ if guided_options is not None:
1371
+ warnings.warn(
1372
+ "guided_options_request is deprecated, use "
1373
+ "SamplingParams.guided_decoding instead",
1374
+ DeprecationWarning,
1375
+ stacklevel=2,
1376
+ )
1377
+
1378
+ if isinstance(prompts, (str, dict)):
1379
+ # Convert a single prompt to a list.
1380
+ prompts = [prompts]
1381
+
1382
+ num_requests = len(prompts)
1383
+ if isinstance(params, list) and len(params) != num_requests:
1384
+ raise ValueError("The lengths of prompts and params "
1385
+ "must be the same.")
1386
+ if isinstance(lora_request,
1387
+ list) and len(lora_request) != num_requests:
1388
+ raise ValueError("The lengths of prompts and lora_request "
1389
+ "must be the same.")
1390
+
1391
+ for sp in params if isinstance(params, list) else (params, ):
1392
+ if isinstance(sp, SamplingParams):
1393
+ self._add_guided_params(sp, guided_options)
1394
+
1395
+ # We only care about the final output
1396
+ sp.output_kind = RequestOutputKind.FINAL_ONLY
1397
+
1398
+ # Add requests to the engine.
1399
+ it = prompts
1400
+ if use_tqdm:
1401
+ it = tqdm(it, desc="Adding requests")
1402
+
1403
+ for i, prompt in enumerate(it):
1404
+ self._add_request(
1405
+ prompt,
1406
+ params[i] if isinstance(params, Sequence) else params,
1407
+ tokenization_kwargs=tokenization_kwargs,
1408
+ lora_request=lora_request[i] if isinstance(
1409
+ lora_request, Sequence) else lora_request,
1410
+ prompt_adapter_request=prompt_adapter_request,
1411
+ priority=priority[i] if priority else 0,
1412
+ )
1413
+
1414
+ def _add_request(
1415
+ self,
1416
+ prompt: PromptType,
1417
+ params: Union[SamplingParams, PoolingParams],
1418
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1419
+ lora_request: Optional[LoRARequest] = None,
1420
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1421
+ priority: int = 0,
1422
+ ) -> None:
1423
+ request_id = str(next(self.request_counter))
1424
+ self.llm_engine.add_request(
1425
+ request_id,
1426
+ prompt,
1427
+ params,
1428
+ lora_request=lora_request,
1429
+ tokenization_kwargs=tokenization_kwargs,
1430
+ prompt_adapter_request=prompt_adapter_request,
1431
+ priority=priority,
1432
+ )
1433
+
1434
+ def _add_guided_params(
1435
+ self,
1436
+ params: SamplingParams,
1437
+ guided_options: Optional[GuidedDecodingRequest] = None):
1438
+ if guided_options is None:
1439
+ return params
1440
+
1441
+ if params.guided_decoding is not None:
1442
+ raise ValueError("Cannot set both guided_options_request and "
1443
+ "params.guided_decoding.")
1444
+
1445
+ params.guided_decoding = GuidedDecodingParams(
1446
+ json=guided_options.guided_json,
1447
+ regex=guided_options.guided_regex,
1448
+ choice=guided_options.guided_choice,
1449
+ grammar=guided_options.guided_grammar,
1450
+ json_object=guided_options.guided_json_object,
1451
+ backend=guided_options.guided_decoding_backend,
1452
+ whitespace_pattern=guided_options.guided_whitespace_pattern,
1453
+ structural_tag=guided_options.structural_tag,
1454
+ )
1455
+ return params
1456
+
1457
+ def _run_engine(
1458
+ self, *, use_tqdm: bool
1459
+ ) -> list[Union[RequestOutput, PoolingRequestOutput]]:
1460
+ # Initialize tqdm.
1461
+ if use_tqdm:
1462
+ num_requests = self.llm_engine.get_num_unfinished_requests()
1463
+ pbar = tqdm(
1464
+ total=num_requests,
1465
+ desc="Processed prompts",
1466
+ dynamic_ncols=True,
1467
+ postfix=(f"est. speed input: {0:.2f} toks/s, "
1468
+ f"output: {0:.2f} toks/s"),
1469
+ )
1470
+
1471
+ # Run the engine.
1472
+ outputs: list[Union[RequestOutput, PoolingRequestOutput]] = []
1473
+ total_in_toks = 0
1474
+ total_out_toks = 0
1475
+ while self.llm_engine.has_unfinished_requests():
1476
+ step_outputs = self.llm_engine.step()
1477
+ for output in step_outputs:
1478
+ if output.finished:
1479
+ outputs.append(output)
1480
+ if use_tqdm:
1481
+ if isinstance(output, RequestOutput):
1482
+ # Calculate tokens only for RequestOutput
1483
+ n = len(output.outputs)
1484
+ assert output.prompt_token_ids is not None
1485
+ total_in_toks += len(output.prompt_token_ids) * n
1486
+ in_spd = total_in_toks / pbar.format_dict["elapsed"]
1487
+ total_out_toks += sum(
1488
+ len(stp.token_ids) for stp in output.outputs)
1489
+ out_spd = (total_out_toks /
1490
+ pbar.format_dict["elapsed"])
1491
+ pbar.postfix = (
1492
+ f"est. speed input: {in_spd:.2f} toks/s, "
1493
+ f"output: {out_spd:.2f} toks/s")
1494
+ pbar.update(n)
1495
+ else:
1496
+ pbar.update(1)
1497
+
1498
+ if use_tqdm:
1499
+ pbar.close()
1500
+ # Sort the outputs by request ID.
1501
+ # This is necessary because some requests may be finished earlier than
1502
+ # its previous requests.
1503
+ return sorted(outputs, key=lambda x: int(x.request_id))