vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1450 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import itertools
4
+ import warnings
5
+ from collections.abc import Sequence
6
+ from contextlib import contextmanager
7
+ from typing import Any, Callable, ClassVar, Optional, Union, cast, overload
8
+
9
+ import cloudpickle
10
+ import torch.nn as nn
11
+ from tqdm.auto import tqdm
12
+ from typing_extensions import TypeVar, deprecated
13
+
14
+ from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
15
+ BeamSearchSequence, get_beam_search_score)
16
+ from vllm.config import CompilationConfig
17
+ from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig,
18
+ TaskOption)
19
+ from vllm.engine.llm_engine import LLMEngine
20
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
21
+ ChatTemplateContentFormatOption,
22
+ apply_hf_chat_template,
23
+ apply_mistral_chat_template,
24
+ parse_chat_messages,
25
+ resolve_chat_template_content_format)
26
+ from vllm.entrypoints.score_utils import (_cosine_similarity,
27
+ _validate_score_input_lens)
28
+ from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt
29
+ from vllm.inputs.parse import is_token_prompt, parse_and_batch_prompt
30
+ from vllm.logger import init_logger
31
+ from vllm.lora.request import LoRARequest
32
+ from vllm.model_executor.guided_decoding.guided_fields import (
33
+ GuidedDecodingRequest, LLMGuidedOptions)
34
+ from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
35
+ PoolingRequestOutput, RequestOutput,
36
+ ScoringRequestOutput)
37
+ from vllm.pooling_params import PoolingParams
38
+ from vllm.prompt_adapter.request import PromptAdapterRequest
39
+ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
40
+ RequestOutputKind, SamplingParams)
41
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
42
+ get_cached_tokenizer)
43
+ from vllm.usage.usage_lib import UsageContext
44
+ from vllm.utils import (Counter, Device, deprecate_args, deprecate_kwargs,
45
+ is_list_of)
46
+
47
+ logger = init_logger(__name__)
48
+
49
+ _R = TypeVar("_R", default=Any)
50
+
51
+
52
+ class LLM:
53
+ """An LLM for generating texts from given prompts and sampling parameters.
54
+
55
+ This class includes a tokenizer, a language model (possibly distributed
56
+ across multiple GPUs), and GPU memory space allocated for intermediate
57
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
58
+ this class generates texts from the model, using an intelligent batching
59
+ mechanism and efficient memory management.
60
+
61
+ Args:
62
+ model: The name or path of a HuggingFace Transformers model.
63
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
64
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
65
+ if available, and "slow" will always use the slow tokenizer.
66
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
67
+ detokenizer. Expect valid prompt_token_ids and None for prompt
68
+ from the input.
69
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
70
+ downloading the model and tokenizer.
71
+ allowed_local_media_path: Allowing API requests to read local images
72
+ or videos from directories specified by the server file system.
73
+ This is a security risk. Should only be enabled in trusted
74
+ environments.
75
+ tensor_parallel_size: The number of GPUs to use for distributed
76
+ execution with tensor parallelism.
77
+ dtype: The data type for the model weights and activations. Currently,
78
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
79
+ the `torch_dtype` attribute specified in the model config file.
80
+ However, if the `torch_dtype` in the config is `float32`, we will
81
+ use `float16` instead.
82
+ quantization: The method used to quantize the model weights. Currently,
83
+ we support "awq", "gptq", and "fp8" (experimental).
84
+ If None, we first check the `quantization_config` attribute in the
85
+ model config file. If that is None, we assume the model weights are
86
+ not quantized and use `dtype` to determine the data type of
87
+ the weights.
88
+ revision: The specific model version to use. It can be a branch name,
89
+ a tag name, or a commit id.
90
+ tokenizer_revision: The specific tokenizer version to use. It can be a
91
+ branch name, a tag name, or a commit id.
92
+ seed: The seed to initialize the random number generator for sampling.
93
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
94
+ reserve for the model weights, activations, and KV cache. Higher
95
+ values will increase the KV cache size and thus improve the model's
96
+ throughput. However, if the value is too high, it may cause out-of-
97
+ memory (OOM) errors.
98
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
99
+ This can be used for temporarily storing the states of the requests
100
+ when their `best_of` sampling parameters are larger than 1. If all
101
+ requests will have `best_of=1`, you can safely set this to 0.
102
+ Noting that `best_of` is only supported in V0. Otherwise, too small
103
+ values may cause out-of-memory (OOM) errors.
104
+ cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
105
+ the model weights. This virtually increases the GPU memory space
106
+ you can use to hold the model weights, at the cost of CPU-GPU data
107
+ transfer for every forward pass.
108
+ enforce_eager: Whether to enforce eager execution. If True, we will
109
+ disable CUDA graph and always execute the model in eager mode.
110
+ If False, we will use CUDA graph and eager execution in hybrid.
111
+ max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
112
+ When a sequence has context length larger than this, we fall back
113
+ to eager mode. Additionally for encoder-decoder models, if the
114
+ sequence length of the encoder input is larger than this, we fall
115
+ back to the eager mode.
116
+ disable_custom_all_reduce: See :class:`~vllm.config.ParallelConfig`
117
+ disable_async_output_proc: Disable async output processing.
118
+ This may result in lower performance.
119
+ hf_token: The token to use as HTTP bearer authorization for remote files
120
+ . If `True`, will use the token generated when running
121
+ `huggingface-cli login` (stored in `~/.huggingface`).
122
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
123
+ HuggingFace config. If a callable, it is called to update the
124
+ HuggingFace config.
125
+ compilation_config: Either an integer or a dictionary. If it is an
126
+ integer, it is used as the level of compilation optimization. If it
127
+ is a dictionary, it can specify the full compilation configuration.
128
+ **kwargs: Arguments for :class:`~vllm.EngineArgs`. (See
129
+ :ref:`engine-args`)
130
+
131
+ Note:
132
+ This class is intended to be used for offline inference. For online
133
+ serving, use the :class:`~vllm.AsyncLLMEngine` class instead.
134
+ """
135
+
136
+ DEPRECATE_LEGACY: ClassVar[bool] = True
137
+ """A flag to toggle whether to deprecate the legacy generate/encode API."""
138
+
139
+ DEPRECATE_INIT_POSARGS: ClassVar[bool] = True
140
+ """
141
+ A flag to toggle whether to deprecate positional arguments in
142
+ :meth:`LLM.__init__`.
143
+ """
144
+
145
+ @classmethod
146
+ @contextmanager
147
+ def deprecate_legacy_api(cls):
148
+ cls.DEPRECATE_LEGACY = True
149
+
150
+ yield
151
+
152
+ cls.DEPRECATE_LEGACY = False
153
+
154
+ @deprecate_args(
155
+ start_index=2, # Ignore self and model
156
+ is_deprecated=lambda: LLM.DEPRECATE_INIT_POSARGS,
157
+ additional_message=(
158
+ "All positional arguments other than `model` will be "
159
+ "replaced with keyword arguments in an upcoming version."),
160
+ )
161
+ def __init__(
162
+ self,
163
+ model: str,
164
+ tokenizer: Optional[str] = None,
165
+ tokenizer_mode: str = "auto",
166
+ skip_tokenizer_init: bool = False,
167
+ trust_remote_code: bool = False,
168
+ allowed_local_media_path: str = "",
169
+ tensor_parallel_size: int = 1,
170
+ dtype: str = "auto",
171
+ quantization: Optional[str] = None,
172
+ revision: Optional[str] = None,
173
+ tokenizer_revision: Optional[str] = None,
174
+ seed: Optional[int] = None,
175
+ gpu_memory_utilization: float = 0.9,
176
+ swap_space: float = 4,
177
+ cpu_offload_gb: float = 0,
178
+ enforce_eager: Optional[bool] = None,
179
+ max_seq_len_to_capture: int = 8192,
180
+ disable_custom_all_reduce: bool = False,
181
+ disable_async_output_proc: bool = False,
182
+ hf_token: Optional[Union[bool, str]] = None,
183
+ hf_overrides: Optional[HfOverrides] = None,
184
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
185
+ # After positional args are removed, move this right below `model`
186
+ task: TaskOption = "auto",
187
+ override_pooler_config: Optional[PoolerConfig] = None,
188
+ compilation_config: Optional[Union[int, dict[str, Any]]] = None,
189
+ **kwargs,
190
+ ) -> None:
191
+ '''
192
+ LLM constructor.
193
+
194
+ Note: if enforce_eager is unset (enforce_eager is None)
195
+ it defaults to False.
196
+ '''
197
+
198
+ if "disable_log_stats" not in kwargs:
199
+ kwargs["disable_log_stats"] = True
200
+
201
+ if "worker_cls" in kwargs:
202
+ worker_cls = kwargs["worker_cls"]
203
+ # if the worker_cls is not qualified string name,
204
+ # we serialize it using cloudpickle to avoid pickling issues
205
+ if isinstance(worker_cls, type):
206
+ kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
207
+
208
+ if compilation_config is not None:
209
+ if isinstance(compilation_config, (int, dict)):
210
+ compilation_config_instance = CompilationConfig.from_cli(
211
+ str(compilation_config))
212
+ else:
213
+ compilation_config_instance = compilation_config
214
+ else:
215
+ compilation_config_instance = None
216
+
217
+ engine_args = EngineArgs(
218
+ model=model,
219
+ task=task,
220
+ tokenizer=tokenizer,
221
+ tokenizer_mode=tokenizer_mode,
222
+ skip_tokenizer_init=skip_tokenizer_init,
223
+ trust_remote_code=trust_remote_code,
224
+ allowed_local_media_path=allowed_local_media_path,
225
+ tensor_parallel_size=tensor_parallel_size,
226
+ dtype=dtype,
227
+ quantization=quantization,
228
+ revision=revision,
229
+ tokenizer_revision=tokenizer_revision,
230
+ seed=seed,
231
+ gpu_memory_utilization=gpu_memory_utilization,
232
+ swap_space=swap_space,
233
+ cpu_offload_gb=cpu_offload_gb,
234
+ enforce_eager=enforce_eager,
235
+ max_seq_len_to_capture=max_seq_len_to_capture,
236
+ disable_custom_all_reduce=disable_custom_all_reduce,
237
+ disable_async_output_proc=disable_async_output_proc,
238
+ hf_token=hf_token,
239
+ hf_overrides=hf_overrides,
240
+ mm_processor_kwargs=mm_processor_kwargs,
241
+ override_pooler_config=override_pooler_config,
242
+ compilation_config=compilation_config_instance,
243
+ **kwargs,
244
+ )
245
+
246
+ # Create the Engine (autoselects V0 vs V1)
247
+ self.llm_engine = LLMEngine.from_engine_args(
248
+ engine_args=engine_args, usage_context=UsageContext.LLM_CLASS)
249
+ self.engine_class = type(self.llm_engine)
250
+
251
+ self.request_counter = Counter()
252
+ self.default_sampling_params: Union[dict[str, Any], None] = None
253
+
254
+ def get_tokenizer(
255
+ self,
256
+ lora_request: Optional[LoRARequest] = None,
257
+ ) -> AnyTokenizer:
258
+ return self.llm_engine.get_tokenizer_group().get_lora_tokenizer(
259
+ lora_request)
260
+
261
+ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
262
+ tokenizer_group = self.llm_engine.get_tokenizer_group()
263
+
264
+ # While CachedTokenizer is dynamic, have no choice but
265
+ # compare class name. Misjudgment will arise from
266
+ # user-defined tokenizer started with 'Cached'
267
+ if tokenizer.__class__.__name__.startswith("Cached"):
268
+ tokenizer_group.tokenizer = tokenizer
269
+ else:
270
+ tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
271
+
272
+ def get_default_sampling_params(self) -> SamplingParams:
273
+ if self.default_sampling_params is None:
274
+ self.default_sampling_params = (
275
+ self.llm_engine.model_config.get_diff_sampling_param())
276
+ if self.default_sampling_params:
277
+ return SamplingParams.from_optional(**self.default_sampling_params)
278
+ return SamplingParams()
279
+
280
+ @overload
281
+ def generate(
282
+ self,
283
+ prompts: Union[PromptType, Sequence[PromptType]],
284
+ /,
285
+ sampling_params: Optional[Union[SamplingParams,
286
+ Sequence[SamplingParams]]] = None,
287
+ *,
288
+ use_tqdm: bool = True,
289
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
290
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
291
+ guided_options_request: Optional[Union[LLMGuidedOptions,
292
+ GuidedDecodingRequest]] = None,
293
+ ) -> list[RequestOutput]:
294
+ ...
295
+
296
+ @overload # LEGACY: single (prompt + optional token ids)
297
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
298
+ def generate(
299
+ self,
300
+ prompts: str,
301
+ sampling_params: Optional[Union[SamplingParams,
302
+ list[SamplingParams]]] = None,
303
+ prompt_token_ids: Optional[list[int]] = None,
304
+ use_tqdm: bool = True,
305
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
306
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
307
+ guided_options_request: Optional[Union[LLMGuidedOptions,
308
+ GuidedDecodingRequest]] = None,
309
+ ) -> list[RequestOutput]:
310
+ ...
311
+
312
+ @overload # LEGACY: multi (prompt + optional token ids)
313
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
314
+ def generate(
315
+ self,
316
+ prompts: list[str],
317
+ sampling_params: Optional[Union[SamplingParams,
318
+ list[SamplingParams]]] = None,
319
+ prompt_token_ids: Optional[list[list[int]]] = None,
320
+ use_tqdm: bool = True,
321
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
322
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
323
+ guided_options_request: Optional[Union[LLMGuidedOptions,
324
+ GuidedDecodingRequest]] = None,
325
+ ) -> list[RequestOutput]:
326
+ ...
327
+
328
+ @overload # LEGACY: single (token ids + optional prompt)
329
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
330
+ def generate(
331
+ self,
332
+ prompts: Optional[str] = None,
333
+ sampling_params: Optional[Union[SamplingParams,
334
+ list[SamplingParams]]] = None,
335
+ *,
336
+ prompt_token_ids: list[int],
337
+ use_tqdm: bool = True,
338
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
339
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
340
+ guided_options_request: Optional[Union[LLMGuidedOptions,
341
+ GuidedDecodingRequest]] = None,
342
+ ) -> list[RequestOutput]:
343
+ ...
344
+
345
+ @overload # LEGACY: multi (token ids + optional prompt)
346
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
347
+ def generate(
348
+ self,
349
+ prompts: Optional[list[str]] = None,
350
+ sampling_params: Optional[Union[SamplingParams,
351
+ list[SamplingParams]]] = None,
352
+ *,
353
+ prompt_token_ids: list[list[int]],
354
+ use_tqdm: bool = True,
355
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
356
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
357
+ guided_options_request: Optional[Union[LLMGuidedOptions,
358
+ GuidedDecodingRequest]] = None,
359
+ ) -> list[RequestOutput]:
360
+ ...
361
+
362
+ @overload # LEGACY: single or multi token ids [pos-only]
363
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
364
+ def generate(
365
+ self,
366
+ prompts: None,
367
+ sampling_params: None,
368
+ prompt_token_ids: Union[list[int], list[list[int]]],
369
+ use_tqdm: bool = True,
370
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
371
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
372
+ guided_options_request: Optional[Union[LLMGuidedOptions,
373
+ GuidedDecodingRequest]] = None,
374
+ ) -> list[RequestOutput]:
375
+ ...
376
+
377
+ @deprecate_kwargs(
378
+ "prompt_token_ids",
379
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
380
+ additional_message="Please use the 'prompts' parameter instead.",
381
+ )
382
+ def generate(
383
+ self,
384
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
385
+ Optional[Union[str, list[str]]]] = None,
386
+ sampling_params: Optional[Union[SamplingParams,
387
+ Sequence[SamplingParams]]] = None,
388
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
389
+ use_tqdm: bool = True,
390
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
391
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
392
+ guided_options_request: Optional[Union[LLMGuidedOptions,
393
+ GuidedDecodingRequest]] = None,
394
+ priority: Optional[list[int]] = None,
395
+ ) -> list[RequestOutput]:
396
+ """Generates the completions for the input prompts.
397
+
398
+ This class automatically batches the given prompts, considering
399
+ the memory constraint. For the best performance, put all of your prompts
400
+ into a single list and pass it to this method.
401
+
402
+ Args:
403
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
404
+ for batch inference. See :class:`~vllm.inputs.PromptType`
405
+ for more details about the format of each prompts.
406
+ sampling_params: The sampling parameters for text generation. If
407
+ None, we use the default sampling parameters.
408
+ When it is a single value, it is applied to every prompt.
409
+ When it is a list, the list must have the same length as the
410
+ prompts and it is paired one by one with the prompt.
411
+ use_tqdm: Whether to use tqdm to display the progress bar.
412
+ lora_request: LoRA request to use for generation, if any.
413
+ prompt_adapter_request: Prompt Adapter request to use for
414
+ generation, if any.
415
+ priority: The priority of the requests, if any.
416
+ Only applicable when priority scheduling policy is enabled.
417
+
418
+ Returns:
419
+ A list of ``RequestOutput`` objects containing the
420
+ generated completions in the same order as the input prompts.
421
+
422
+ Note:
423
+ Using ``prompts`` and ``prompt_token_ids`` as keyword parameters is
424
+ considered legacy and may be deprecated in the future. You should
425
+ instead pass them via the ``inputs`` parameter.
426
+ """
427
+ runner_type = self.llm_engine.model_config.runner_type
428
+ if runner_type not in ["generate", "transcription"]:
429
+ messages = [
430
+ "LLM.generate() is only supported for (conditional) generation "
431
+ "models (XForCausalLM, XForConditionalGeneration).",
432
+ ]
433
+
434
+ supported_runner_types = self.llm_engine.model_config \
435
+ .supported_runner_types
436
+ if "generate" in supported_runner_types:
437
+ messages.append(
438
+ "Your model supports the 'generate' runner, but is "
439
+ f"currently initialized for the '{runner_type}' runner. "
440
+ "Please initialize vLLM using `--task generate`.")
441
+
442
+ raise ValueError(" ".join(messages))
443
+
444
+ if prompt_token_ids is not None:
445
+ parsed_prompts = self._convert_v1_inputs(
446
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
447
+ prompt_token_ids=prompt_token_ids,
448
+ )
449
+ else:
450
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
451
+ prompts)
452
+
453
+ if isinstance(guided_options_request, dict):
454
+ if len(guided_options_request) > 1:
455
+ raise ValueError(
456
+ "You can only use one guided decoding but multiple is "
457
+ f"specified: {guided_options_request}")
458
+ guided_options_request = GuidedDecodingRequest(
459
+ **guided_options_request)
460
+
461
+ if sampling_params is None:
462
+ # Use default sampling params.
463
+ sampling_params = self.get_default_sampling_params()
464
+
465
+ self._validate_and_add_requests(
466
+ prompts=parsed_prompts,
467
+ params=sampling_params,
468
+ lora_request=lora_request,
469
+ prompt_adapter_request=prompt_adapter_request,
470
+ guided_options=guided_options_request,
471
+ priority=priority)
472
+
473
+ outputs = self._run_engine(use_tqdm=use_tqdm)
474
+ return self.engine_class.validate_outputs(outputs, RequestOutput)
475
+
476
+ def collective_rpc(self,
477
+ method: Union[str, Callable[..., _R]],
478
+ timeout: Optional[float] = None,
479
+ args: tuple = (),
480
+ kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
481
+ """
482
+ Execute an RPC call on all workers.
483
+
484
+ Args:
485
+ method: Name of the worker method to execute, or a callable that
486
+ is serialized and sent to all workers to execute.
487
+
488
+ If the method is a callable, it should accept an additional
489
+ `self` argument, in addition to the arguments passed in `args`
490
+ and `kwargs`. The `self` argument will be the worker object.
491
+ timeout: Maximum time in seconds to wait for execution. Raises a
492
+ :exc:`TimeoutError` on timeout. `None` means wait indefinitely.
493
+ args: Positional arguments to pass to the worker method.
494
+ kwargs: Keyword arguments to pass to the worker method.
495
+
496
+ Returns:
497
+ A list containing the results from each worker.
498
+
499
+ Note:
500
+ It is recommended to use this API to only pass control messages,
501
+ and set up data-plane communication to pass data.
502
+ """
503
+
504
+ return self.llm_engine.collective_rpc(method, timeout, args, kwargs)
505
+
506
+ def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
507
+ """
508
+ Run a function directly on the model inside each worker,
509
+ returning the result for each of them.
510
+ """
511
+ executor = self.llm_engine.model_executor
512
+ return executor.apply_model(func)
513
+
514
+ def beam_search(
515
+ self,
516
+ prompts: list[Union[TokensPrompt, TextPrompt]],
517
+ params: BeamSearchParams,
518
+ ) -> list[BeamSearchOutput]:
519
+ """
520
+ Generate sequences using beam search.
521
+
522
+ Args:
523
+ prompts: A list of prompts. Each prompt can be a string or a list
524
+ of token IDs.
525
+ params: The beam search parameters.
526
+ """
527
+ # TODO: how does beam search work together with length penalty,
528
+ # frequency, penalty, and stopping criteria, etc.?
529
+ beam_width = params.beam_width
530
+ max_tokens = params.max_tokens
531
+ temperature = params.temperature
532
+ ignore_eos = params.ignore_eos
533
+ length_penalty = params.length_penalty
534
+
535
+ def sort_beams_key(x: BeamSearchSequence) -> float:
536
+ return get_beam_search_score(x.tokens, x.cum_logprob,
537
+ tokenizer.eos_token_id,
538
+ length_penalty)
539
+
540
+ def create_tokens_prompt_from_beam(
541
+ beam: BeamSearchSequence) -> TokensPrompt:
542
+ token_prompt_kwargs: TokensPrompt = {
543
+ "prompt_token_ids": beam.tokens
544
+ }
545
+ if beam.multi_modal_data is not None:
546
+ token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data
547
+
548
+ if beam.mm_processor_kwargs is not None:
549
+ token_prompt_kwargs[
550
+ "mm_processor_kwargs"] = beam.mm_processor_kwargs
551
+ return TokensPrompt(**token_prompt_kwargs)
552
+
553
+ tokenizer = self.get_tokenizer()
554
+ # generate 2 * beam_width candidates at each step
555
+ # following the huggingface transformers implementation
556
+ # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
557
+ beam_search_params = SamplingParams(logprobs=2 * beam_width,
558
+ max_tokens=1,
559
+ temperature=temperature)
560
+ instances: list[BeamSearchInstance] = []
561
+
562
+ for prompt in prompts:
563
+ # Add multimodal processor kwargs & data
564
+ mm_kwargs = {}
565
+ if "multi_modal_data" in prompt:
566
+ mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"]
567
+ if "mm_processor_kwargs" in prompt:
568
+ mm_kwargs["mm_processor_kwargs"] = prompt[
569
+ "mm_processor_kwargs"]
570
+
571
+ if is_token_prompt(prompt):
572
+ prompt_tokens = prompt["prompt_token_ids"]
573
+ else:
574
+ prompt_tokens = tokenizer.encode(prompt["prompt"])
575
+ instances.append(
576
+ BeamSearchInstance(prompt_tokens, logprobs=None, **mm_kwargs))
577
+
578
+ for _ in range(max_tokens):
579
+ all_beams: list[BeamSearchSequence] = list(
580
+ sum((instance.beams for instance in instances), []))
581
+ pos = [0] + list(
582
+ itertools.accumulate(
583
+ len(instance.beams) for instance in instances))
584
+ instance_start_and_end: list[tuple[int, int]] = list(
585
+ zip(pos[:-1], pos[1:]))
586
+
587
+ if len(all_beams) == 0:
588
+ break
589
+
590
+ prompts_batch = [
591
+ create_tokens_prompt_from_beam(beam) for beam in all_beams
592
+ ]
593
+
594
+ # only runs for one step
595
+ # we don't need to use tqdm here
596
+ output = self.generate(prompts_batch,
597
+ sampling_params=beam_search_params,
598
+ use_tqdm=False)
599
+
600
+ for (start, end), instance in zip(instance_start_and_end,
601
+ instances):
602
+ instance_new_beams = []
603
+ for i in range(start, end):
604
+ current_beam = all_beams[i]
605
+ result = output[i]
606
+
607
+ if result.outputs[0].logprobs is not None:
608
+ # if `result.outputs[0].logprobs` is None, it means
609
+ # the sequence is completed because of the max-model-len
610
+ # or abortion. we don't need to add it to the new beams.
611
+ logprobs = result.outputs[0].logprobs[0]
612
+ for token_id, logprob_obj in logprobs.items():
613
+ new_beam = BeamSearchSequence(
614
+ tokens=current_beam.tokens + [token_id],
615
+ logprobs=current_beam.logprobs + [logprobs],
616
+ cum_logprob=current_beam.cum_logprob +
617
+ logprob_obj.logprob,
618
+ multi_modal_data=current_beam.multi_modal_data,
619
+ mm_processor_kwargs=current_beam.
620
+ mm_processor_kwargs)
621
+
622
+ if token_id == tokenizer.eos_token_id and \
623
+ not ignore_eos:
624
+ instance.completed.append(new_beam)
625
+ else:
626
+ instance_new_beams.append(new_beam)
627
+ sorted_beams = sorted(instance_new_beams,
628
+ key=sort_beams_key,
629
+ reverse=True)
630
+ instance.beams = sorted_beams[:beam_width]
631
+
632
+ outputs = []
633
+ for instance in instances:
634
+ instance.completed.extend(instance.beams)
635
+ sorted_completed = sorted(instance.completed,
636
+ key=sort_beams_key,
637
+ reverse=True)
638
+ best_beams = sorted_completed[:beam_width]
639
+
640
+ for beam in best_beams:
641
+ beam.text = tokenizer.decode(beam.tokens)
642
+ outputs.append(BeamSearchOutput(sequences=best_beams))
643
+
644
+ return outputs
645
+
646
+ def chat(
647
+ self,
648
+ messages: Union[list[ChatCompletionMessageParam],
649
+ list[list[ChatCompletionMessageParam]]],
650
+ sampling_params: Optional[Union[SamplingParams,
651
+ list[SamplingParams]]] = None,
652
+ use_tqdm: bool = True,
653
+ lora_request: Optional[LoRARequest] = None,
654
+ chat_template: Optional[str] = None,
655
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
656
+ add_generation_prompt: bool = True,
657
+ continue_final_message: bool = False,
658
+ tools: Optional[list[dict[str, Any]]] = None,
659
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
660
+ ) -> list[RequestOutput]:
661
+ """
662
+ Generate responses for a chat conversation.
663
+
664
+ The chat conversation is converted into a text prompt using the
665
+ tokenizer and calls the :meth:`generate` method to generate the
666
+ responses.
667
+
668
+ Multi-modal inputs can be passed in the same way you would pass them
669
+ to the OpenAI API.
670
+
671
+ Args:
672
+ messages: A list of conversations or a single conversation.
673
+
674
+ - Each conversation is represented as a list of messages.
675
+ - Each message is a dictionary with 'role' and 'content' keys.
676
+
677
+ sampling_params: The sampling parameters for text generation.
678
+ If None, we use the default sampling parameters. When it
679
+ is a single value, it is applied to every prompt. When it
680
+ is a list, the list must have the same length as the
681
+ prompts and it is paired one by one with the prompt.
682
+ use_tqdm: Whether to use tqdm to display the progress bar.
683
+ lora_request: LoRA request to use for generation, if any.
684
+ chat_template: The template to use for structuring the chat.
685
+ If not provided, the model's default chat template will be used.
686
+ chat_template_content_format: The format to render message content.
687
+
688
+ - "string" will render the content as a string.
689
+ Example: ``"Who are you?"``
690
+ - "openai" will render the content as a list of dictionaries,
691
+ similar to OpenAI schema.
692
+ Example: ``[{"type": "text", "text": "Who are you?"}]``
693
+
694
+ add_generation_prompt: If True, adds a generation template
695
+ to each message.
696
+ continue_final_message: If True, continues the final message in
697
+ the conversation instead of starting a new one. Cannot be
698
+ ``True`` if ``add_generation_prompt`` is also ``True``.
699
+ mm_processor_kwargs: Multimodal processor kwarg overrides for this
700
+ chat request. Only used for offline requests.
701
+
702
+ Returns:
703
+ A list of ``RequestOutput`` objects containing the generated
704
+ responses in the same order as the input messages.
705
+ """
706
+ list_of_messages: list[list[ChatCompletionMessageParam]]
707
+
708
+ # Handle multi and single conversations
709
+ if is_list_of(messages, list):
710
+ # messages is list[list[...]]
711
+ list_of_messages = cast(list[list[ChatCompletionMessageParam]],
712
+ messages)
713
+ else:
714
+ # messages is list[...]
715
+ list_of_messages = [
716
+ cast(list[ChatCompletionMessageParam], messages)
717
+ ]
718
+
719
+ tokenizer = self.get_tokenizer(lora_request)
720
+ model_config = self.llm_engine.get_model_config()
721
+ resolved_content_format = resolve_chat_template_content_format(
722
+ chat_template,
723
+ tools,
724
+ chat_template_content_format,
725
+ tokenizer,
726
+ trust_remote_code=model_config.trust_remote_code,
727
+ )
728
+
729
+ prompts: list[Union[TokensPrompt, TextPrompt]] = []
730
+
731
+ for msgs in list_of_messages:
732
+ # NOTE: _parse_chat_message_content_parts() currently doesn't
733
+ # handle mm_processor_kwargs, since there is no implementation in
734
+ # the chat message parsing for it.
735
+ conversation, mm_data = parse_chat_messages(
736
+ msgs,
737
+ model_config,
738
+ tokenizer,
739
+ content_format=resolved_content_format,
740
+ )
741
+
742
+ if isinstance(tokenizer, MistralTokenizer):
743
+ prompt_token_ids = apply_mistral_chat_template(
744
+ tokenizer,
745
+ messages=msgs,
746
+ chat_template=chat_template,
747
+ tools=tools,
748
+ add_generation_prompt=add_generation_prompt,
749
+ continue_final_message=continue_final_message,
750
+ )
751
+ else:
752
+ prompt_str = apply_hf_chat_template(
753
+ tokenizer,
754
+ trust_remote_code=model_config.trust_remote_code,
755
+ conversation=conversation,
756
+ chat_template=chat_template,
757
+ tools=tools,
758
+ add_generation_prompt=add_generation_prompt,
759
+ continue_final_message=continue_final_message,
760
+ )
761
+ # Special tokens are already included in chat templates so
762
+ # should not be added by the tokenizer in this case.
763
+ prompt_token_ids = tokenizer.encode(prompt_str,
764
+ add_special_tokens=False)
765
+
766
+ prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
767
+
768
+ if mm_data is not None:
769
+ prompt["multi_modal_data"] = mm_data
770
+
771
+ if mm_processor_kwargs is not None:
772
+ prompt["mm_processor_kwargs"] = mm_processor_kwargs
773
+
774
+ prompts.append(prompt)
775
+
776
+ return self.generate(
777
+ prompts,
778
+ sampling_params=sampling_params,
779
+ use_tqdm=use_tqdm,
780
+ lora_request=lora_request,
781
+ )
782
+
783
+ @overload
784
+ def encode(
785
+ self,
786
+ prompts: Union[PromptType, Sequence[PromptType]],
787
+ /,
788
+ pooling_params: Optional[Union[PoolingParams,
789
+ Sequence[PoolingParams]]] = None,
790
+ *,
791
+ use_tqdm: bool = True,
792
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
793
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
794
+ ) -> list[PoolingRequestOutput]:
795
+ ...
796
+
797
+ @overload # LEGACY: single (prompt + optional token ids)
798
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
799
+ def encode(
800
+ self,
801
+ prompts: str,
802
+ pooling_params: Optional[Union[PoolingParams,
803
+ Sequence[PoolingParams]]] = None,
804
+ prompt_token_ids: Optional[list[int]] = None,
805
+ use_tqdm: bool = True,
806
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
807
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
808
+ ) -> list[PoolingRequestOutput]:
809
+ ...
810
+
811
+ @overload # LEGACY: multi (prompt + optional token ids)
812
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
813
+ def encode(
814
+ self,
815
+ prompts: list[str],
816
+ pooling_params: Optional[Union[PoolingParams,
817
+ Sequence[PoolingParams]]] = None,
818
+ prompt_token_ids: Optional[list[list[int]]] = None,
819
+ use_tqdm: bool = True,
820
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
821
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
822
+ ) -> list[PoolingRequestOutput]:
823
+ ...
824
+
825
+ @overload # LEGACY: single (token ids + optional prompt)
826
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
827
+ def encode(
828
+ self,
829
+ prompts: Optional[str] = None,
830
+ pooling_params: Optional[Union[PoolingParams,
831
+ Sequence[PoolingParams]]] = None,
832
+ *,
833
+ prompt_token_ids: list[int],
834
+ use_tqdm: bool = True,
835
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
836
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
837
+ ) -> list[PoolingRequestOutput]:
838
+ ...
839
+
840
+ @overload # LEGACY: multi (token ids + optional prompt)
841
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
842
+ def encode(
843
+ self,
844
+ prompts: Optional[list[str]] = None,
845
+ pooling_params: Optional[Union[PoolingParams,
846
+ Sequence[PoolingParams]]] = None,
847
+ *,
848
+ prompt_token_ids: list[list[int]],
849
+ use_tqdm: bool = True,
850
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
851
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
852
+ ) -> list[PoolingRequestOutput]:
853
+ ...
854
+
855
+ @overload # LEGACY: single or multi token ids [pos-only]
856
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
857
+ def encode(
858
+ self,
859
+ prompts: None,
860
+ pooling_params: None,
861
+ prompt_token_ids: Union[list[int], list[list[int]]],
862
+ use_tqdm: bool = True,
863
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
864
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
865
+ ) -> list[PoolingRequestOutput]:
866
+ ...
867
+
868
+ @deprecate_kwargs(
869
+ "prompt_token_ids",
870
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
871
+ additional_message="Please use the 'prompts' parameter instead.",
872
+ )
873
+ def encode(
874
+ self,
875
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
876
+ Optional[Union[str, list[str]]]] = None,
877
+ pooling_params: Optional[Union[PoolingParams,
878
+ Sequence[PoolingParams]]] = None,
879
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]] = None,
880
+ use_tqdm: bool = True,
881
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
882
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
883
+ ) -> list[PoolingRequestOutput]:
884
+ """Apply pooling to the hidden states corresponding to the input
885
+ prompts.
886
+
887
+ This class automatically batches the given prompts, considering
888
+ the memory constraint. For the best performance, put all of your prompts
889
+ into a single list and pass it to this method.
890
+
891
+ Args:
892
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
893
+ for batch inference. See :class:`~vllm.inputs.PromptType`
894
+ for more details about the format of each prompts.
895
+ pooling_params: The pooling parameters for pooling. If None, we
896
+ use the default pooling parameters.
897
+ use_tqdm: Whether to use tqdm to display the progress bar.
898
+ lora_request: LoRA request to use for generation, if any.
899
+ prompt_adapter_request: Prompt Adapter request to use for
900
+ generation, if any.
901
+
902
+ Returns:
903
+ A list of ``PoolingRequestOutput`` objects containing the
904
+ pooled hidden states in the same order as the input prompts.
905
+
906
+ Note:
907
+ Using ``prompts`` and ``prompt_token_ids`` as keyword parameters is
908
+ considered legacy and may be deprecated in the future. You should
909
+ instead pass them via the ``inputs`` parameter.
910
+ """
911
+ runner_type = self.llm_engine.model_config.runner_type
912
+ if runner_type != "pooling":
913
+ messages = ["LLM.encode() is only supported for pooling models."]
914
+
915
+ supported_runner_types = self.llm_engine.model_config \
916
+ .supported_runner_types
917
+ if "pooling" in supported_runner_types:
918
+ messages.append(
919
+ "Your model supports the 'pooling' runner, but is "
920
+ f"currently initialized for the '{runner_type}' runner. "
921
+ "Please initialize vLLM using `--task embed`, "
922
+ "`--task classify`, `--task score` etc.")
923
+
924
+ raise ValueError(" ".join(messages))
925
+
926
+ if prompt_token_ids is not None:
927
+ parsed_prompts = self._convert_v1_inputs(
928
+ prompts=cast(Optional[Union[str, list[str]]], prompts),
929
+ prompt_token_ids=prompt_token_ids,
930
+ )
931
+ else:
932
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
933
+ prompts)
934
+
935
+ if pooling_params is None:
936
+ # Use default pooling params.
937
+ pooling_params = PoolingParams()
938
+ elif isinstance(pooling_params, PoolingParams):
939
+ pooling_params.verify(self.llm_engine.model_config)
940
+ else:
941
+ for pooling_param in pooling_params:
942
+ pooling_param.verify(self.llm_engine.model_config)
943
+
944
+ self._validate_and_add_requests(
945
+ prompts=parsed_prompts,
946
+ params=pooling_params,
947
+ lora_request=lora_request,
948
+ prompt_adapter_request=prompt_adapter_request,
949
+ )
950
+
951
+ outputs = self._run_engine(use_tqdm=use_tqdm)
952
+ return self.engine_class.validate_outputs(outputs,
953
+ PoolingRequestOutput)
954
+
955
+ def embed(
956
+ self,
957
+ prompts: Union[PromptType, Sequence[PromptType]],
958
+ /,
959
+ *,
960
+ use_tqdm: bool = True,
961
+ pooling_params: Optional[Union[PoolingParams,
962
+ Sequence[PoolingParams]]] = None,
963
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
964
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
965
+ ) -> list[EmbeddingRequestOutput]:
966
+ """
967
+ Generate an embedding vector for each prompt.
968
+
969
+ This class automatically batches the given prompts, considering
970
+ the memory constraint. For the best performance, put all of your prompts
971
+ into a single list and pass it to this method.
972
+
973
+ Args:
974
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
975
+ for batch inference. See :class:`~vllm.inputs.PromptType`
976
+ for more details about the format of each prompts.
977
+ pooling_params: The pooling parameters for pooling. If None, we
978
+ use the default pooling parameters.
979
+ use_tqdm: Whether to use tqdm to display the progress bar.
980
+ lora_request: LoRA request to use for generation, if any.
981
+ prompt_adapter_request: Prompt Adapter request to use for
982
+ generation, if any.
983
+
984
+ Returns:
985
+ A list of ``EmbeddingRequestOutput`` objects containing the
986
+ embedding vectors in the same order as the input prompts.
987
+ """
988
+ if self.llm_engine.model_config.task != "embed":
989
+ raise ValueError(
990
+ "Embedding API is only enabled for `--task embed`")
991
+
992
+ items = self.encode(prompts,
993
+ use_tqdm=use_tqdm,
994
+ pooling_params=pooling_params,
995
+ lora_request=lora_request,
996
+ prompt_adapter_request=prompt_adapter_request)
997
+
998
+ return [EmbeddingRequestOutput.from_base(item) for item in items]
999
+
1000
+ def classify(
1001
+ self,
1002
+ prompts: Union[PromptType, Sequence[PromptType]],
1003
+ /,
1004
+ *,
1005
+ use_tqdm: bool = True,
1006
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1007
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1008
+ ) -> list[ClassificationRequestOutput]:
1009
+ """
1010
+ Generate class logits for each prompt.
1011
+
1012
+ This class automatically batches the given prompts, considering
1013
+ the memory constraint. For the best performance, put all of your prompts
1014
+ into a single list and pass it to this method.
1015
+
1016
+ Args:
1017
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1018
+ for batch inference. See :class:`~vllm.inputs.PromptType`
1019
+ for more details about the format of each prompts.
1020
+ use_tqdm: Whether to use tqdm to display the progress bar.
1021
+ lora_request: LoRA request to use for generation, if any.
1022
+ prompt_adapter_request: Prompt Adapter request to use for
1023
+ generation, if any.
1024
+
1025
+ Returns:
1026
+ A list of ``ClassificationRequestOutput`` objects containing the
1027
+ embedding vectors in the same order as the input prompts.
1028
+ """
1029
+ if self.llm_engine.model_config.task != "classify":
1030
+ raise ValueError(
1031
+ "Classification API is only enabled for `--task classify`")
1032
+
1033
+ items = self.encode(prompts,
1034
+ use_tqdm=use_tqdm,
1035
+ lora_request=lora_request,
1036
+ prompt_adapter_request=prompt_adapter_request)
1037
+
1038
+ return [ClassificationRequestOutput.from_base(item) for item in items]
1039
+
1040
+ def _embedding_score(
1041
+ self,
1042
+ tokenizer: AnyTokenizer,
1043
+ text_1: list[Union[str, TextPrompt, TokensPrompt]],
1044
+ text_2: list[Union[str, TextPrompt, TokensPrompt]],
1045
+ truncate_prompt_tokens: Optional[int] = None,
1046
+ use_tqdm: bool = True,
1047
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1048
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1049
+ ) -> list[ScoringRequestOutput]:
1050
+
1051
+ encoded_output: list[PoolingRequestOutput] = self.encode(
1052
+ text_1 + text_2,
1053
+ use_tqdm=use_tqdm,
1054
+ lora_request=lora_request,
1055
+ prompt_adapter_request=prompt_adapter_request)
1056
+
1057
+ encoded_output_1: list[PoolingRequestOutput] = encoded_output[
1058
+ 0:len(text_1)]
1059
+ encoded_output_2: list[PoolingRequestOutput] = encoded_output[
1060
+ len(text_1):]
1061
+
1062
+ if len(encoded_output_1) == 1:
1063
+ encoded_output_1 = encoded_output_1 * len(encoded_output_2)
1064
+
1065
+ scores = _cosine_similarity(tokenizer=tokenizer,
1066
+ embed_1=encoded_output_1,
1067
+ embed_2=encoded_output_2)
1068
+
1069
+ items = self.engine_class.validate_outputs(scores,
1070
+ PoolingRequestOutput)
1071
+ return [ScoringRequestOutput.from_base(item) for item in items]
1072
+
1073
+ def _cross_encoding_score(
1074
+ self,
1075
+ tokenizer: AnyTokenizer,
1076
+ text_1: list[str],
1077
+ text_2: list[str],
1078
+ truncate_prompt_tokens: Optional[int] = None,
1079
+ use_tqdm: bool = True,
1080
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1081
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1082
+ ) -> list[ScoringRequestOutput]:
1083
+
1084
+ if isinstance(tokenizer, MistralTokenizer):
1085
+ raise ValueError(
1086
+ "Score API is only enabled for `--task embed or score`")
1087
+
1088
+ if len(text_1) == 1:
1089
+ text_1 = text_1 * len(text_2)
1090
+
1091
+ input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)]
1092
+
1093
+ pooling_params = PoolingParams()
1094
+
1095
+ tokenization_kwargs: dict[str, Any] = {}
1096
+ if truncate_prompt_tokens is not None:
1097
+ tokenization_kwargs["truncation"] = True
1098
+ tokenization_kwargs["max_length"] = truncate_prompt_tokens
1099
+
1100
+ parsed_prompts = []
1101
+
1102
+ for q, t in input_pairs:
1103
+ prompt_inputs = tokenizer(text=q,
1104
+ text_pair=t,
1105
+ **tokenization_kwargs)
1106
+ engine_prompt = TokensPrompt(
1107
+ prompt_token_ids=prompt_inputs["input_ids"],
1108
+ token_type_ids=prompt_inputs.get("token_type_ids"))
1109
+ parsed_prompts.append(engine_prompt)
1110
+
1111
+ self._validate_and_add_requests(
1112
+ prompts=parsed_prompts,
1113
+ params=pooling_params,
1114
+ lora_request=lora_request,
1115
+ prompt_adapter_request=prompt_adapter_request,
1116
+ )
1117
+
1118
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1119
+ items = self.engine_class.validate_outputs(outputs,
1120
+ PoolingRequestOutput)
1121
+
1122
+ return [ScoringRequestOutput.from_base(item) for item in items]
1123
+
1124
+ def score(
1125
+ self,
1126
+ text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1127
+ text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1128
+ /,
1129
+ *,
1130
+ truncate_prompt_tokens: Optional[int] = None,
1131
+ use_tqdm: bool = True,
1132
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1133
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1134
+ ) -> list[ScoringRequestOutput]:
1135
+ """Generate similarity scores for all pairs ``<text,text_pair>``.
1136
+
1137
+ The inputs can be ``1 -> 1``, ``1 -> N`` or ``N -> N``.
1138
+ In the ``1 - N`` case the ``text_1`` sentence will be replicated ``N``
1139
+ times to pair with the ``text_2`` sentences.
1140
+ The input pairs are used to build a list of prompts for the
1141
+ cross encoder model. This class automatically batches the prompts,
1142
+ considering the memory constraint. For the best performance, put all
1143
+ of your texts into a single list and pass it to this method.
1144
+
1145
+ Args:
1146
+ text_1: can be a single prompt or a list of prompts, in which
1147
+ case it has to have the same length as the ``text_2`` list
1148
+ text_2: The texts to pair with the query to form the input
1149
+ to the LLM. See :class:`~vllm.inputs.PromptType` for
1150
+ more details about the format of each prompts.
1151
+ use_tqdm: Whether to use tqdm to display the progress bar.
1152
+ lora_request: LoRA request to use for generation, if any.
1153
+ prompt_adapter_request: Prompt Adapter request to use for
1154
+ generation, if any.
1155
+
1156
+ Returns:
1157
+ A list of ``ScoringRequestOutput`` objects containing the
1158
+ generated scores in the same order as the input prompts.
1159
+ """
1160
+ runner_type = self.llm_engine.model_config.runner_type
1161
+ if runner_type != "pooling":
1162
+ messages = ["LLM.score() is only supported for pooling models."]
1163
+
1164
+ supported_runner_types = self.llm_engine.model_config \
1165
+ .supported_runner_types
1166
+ if "pooling" in supported_runner_types:
1167
+ messages.append(
1168
+ "Your model supports the 'pooling' runner, but is "
1169
+ f"currently initialized for the '{runner_type}' runner. "
1170
+ "Please initialize vLLM using `--task embed`, "
1171
+ "`--task classify`, `--task score` etc.")
1172
+
1173
+ raise ValueError(" ".join(messages))
1174
+
1175
+ if self.llm_engine.model_config.task not in ("embed", "score"):
1176
+ raise ValueError(
1177
+ "Score API is only enabled for `--task embed or --task score`")
1178
+
1179
+ # the tokenizer for models such as
1180
+ # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
1181
+ # lists of tokens to the `text` and `text_pair` kwargs
1182
+ tokenizer = self.llm_engine.get_tokenizer()
1183
+
1184
+ def ensure_str(prompt: SingletonPrompt):
1185
+ if isinstance(prompt, dict):
1186
+ if "multi_modal_data" in prompt:
1187
+ raise ValueError("Multi-modal prompt is not "
1188
+ "supported for scoring")
1189
+ elif "prompt_token_ids" in prompt:
1190
+ prompt = tokenizer.decode(
1191
+ cast(TokensPrompt, prompt)["prompt_token_ids"])
1192
+ elif "prompt" in prompt:
1193
+ prompt = cast(TextPrompt, prompt)["prompt"]
1194
+ assert type(prompt) is str
1195
+ return prompt
1196
+
1197
+ if isinstance(text_1, (str, dict)):
1198
+ # Convert a single prompt to a list.
1199
+ text_1 = [text_1]
1200
+ input_text_1: list[str] = [ensure_str(t) for t in text_1]
1201
+
1202
+ if isinstance(text_2, (str, dict)):
1203
+ # Convert a single prompt to a list.
1204
+ text_2 = [text_2]
1205
+ input_text_2: list[str] = [ensure_str(t) for t in text_2]
1206
+
1207
+ _validate_score_input_lens(input_text_1, input_text_2)
1208
+
1209
+ if self.llm_engine.model_config.is_cross_encoder:
1210
+ return self._cross_encoding_score(tokenizer, input_text_1,
1211
+ input_text_2,
1212
+ truncate_prompt_tokens, use_tqdm,
1213
+ lora_request,
1214
+ prompt_adapter_request)
1215
+ else:
1216
+ return self._embedding_score(
1217
+ tokenizer,
1218
+ input_text_1, # type: ignore[arg-type]
1219
+ input_text_2, # type: ignore[arg-type]
1220
+ truncate_prompt_tokens,
1221
+ use_tqdm,
1222
+ lora_request,
1223
+ prompt_adapter_request)
1224
+
1225
+ def start_profile(self) -> None:
1226
+ self.llm_engine.start_profile()
1227
+
1228
+ def stop_profile(self) -> None:
1229
+ self.llm_engine.stop_profile()
1230
+
1231
+ def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
1232
+ return self.llm_engine.reset_prefix_cache(device)
1233
+
1234
+ def sleep(self, level: int = 1):
1235
+ """
1236
+ Put the engine to sleep. The engine should not process any requests.
1237
+ The caller should guarantee that no requests are being processed
1238
+ during the sleep period, before `wake_up` is called.
1239
+
1240
+ Args:
1241
+ level: The sleep level. Level 1 sleep will offload the model
1242
+ weights and discard the kv cache. The content of kv cache
1243
+ is forgotten. Level 1 sleep is good for sleeping and waking
1244
+ up the engine to run the same model again. The model weights
1245
+ are backed up in CPU memory. Please make sure there's enough
1246
+ CPU memory to store the model weights. Level 2 sleep will
1247
+ discard both the model weights and the kv cache. The content
1248
+ of both the model weights and kv cache is forgotten. Level 2
1249
+ sleep is good for sleeping and waking up the engine to run a
1250
+ different model or update the model, where previous model
1251
+ weights are not needed. It reduces CPU memory pressure.
1252
+ """
1253
+ self.reset_prefix_cache()
1254
+ self.llm_engine.sleep(level=level)
1255
+
1256
+ def wake_up(self, tags: Optional[list[str]] = None):
1257
+ """
1258
+ Wake up the engine from sleep mode. See the :meth:`sleep` method
1259
+ for more details.
1260
+
1261
+ Args:
1262
+ tags: An optional list of tags to reallocate the engine memory
1263
+ for specific memory allocations. Values must be in
1264
+ ("weights", "kv_cache",). If None, all memory is reallocated.
1265
+ wake_up should be called with all tags (or None) before the
1266
+ engine is used again.
1267
+ """
1268
+ self.llm_engine.wake_up(tags)
1269
+
1270
+ # LEGACY
1271
+ def _convert_v1_inputs(
1272
+ self,
1273
+ prompts: Optional[Union[str, list[str]]],
1274
+ prompt_token_ids: Optional[Union[list[int], list[list[int]]]],
1275
+ ):
1276
+ # skip_tokenizer_init is now checked in engine
1277
+
1278
+ if prompts is not None:
1279
+ prompts = [p["content"] for p in parse_and_batch_prompt(prompts)]
1280
+ if prompt_token_ids is not None:
1281
+ prompt_token_ids = [
1282
+ p["content"] for p in parse_and_batch_prompt(prompt_token_ids)
1283
+ ]
1284
+
1285
+ num_requests = None
1286
+ if prompts is not None:
1287
+ num_requests = len(prompts)
1288
+ if prompt_token_ids is not None:
1289
+ if (num_requests is not None
1290
+ and num_requests != len(prompt_token_ids)):
1291
+ raise ValueError("The lengths of prompts and prompt_token_ids "
1292
+ "must be the same.")
1293
+
1294
+ num_requests = len(prompt_token_ids)
1295
+ if num_requests is None:
1296
+ raise ValueError("Either prompts or prompt_token_ids must be "
1297
+ "provided.")
1298
+
1299
+ parsed_prompts: list[PromptType] = []
1300
+ for i in range(num_requests):
1301
+ item: PromptType
1302
+
1303
+ if prompts is not None:
1304
+ item = TextPrompt(prompt=prompts[i])
1305
+ elif prompt_token_ids is not None:
1306
+ item = TokensPrompt(prompt_token_ids=prompt_token_ids[i])
1307
+ else:
1308
+ raise AssertionError
1309
+
1310
+ parsed_prompts.append(item)
1311
+
1312
+ return parsed_prompts
1313
+
1314
+ def _validate_and_add_requests(
1315
+ self,
1316
+ prompts: Union[PromptType, Sequence[PromptType]],
1317
+ params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
1318
+ Sequence[PoolingParams]],
1319
+ lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
1320
+ prompt_adapter_request: Optional[PromptAdapterRequest],
1321
+ guided_options: Optional[GuidedDecodingRequest] = None,
1322
+ priority: Optional[list[int]] = None,
1323
+ ) -> None:
1324
+ if guided_options is not None:
1325
+ warnings.warn(
1326
+ "guided_options_request is deprecated, use "
1327
+ "SamplingParams.guided_decoding instead",
1328
+ DeprecationWarning,
1329
+ stacklevel=2,
1330
+ )
1331
+
1332
+ if isinstance(prompts, (str, dict)):
1333
+ # Convert a single prompt to a list.
1334
+ prompts = [prompts]
1335
+
1336
+ num_requests = len(prompts)
1337
+ if isinstance(params, list) and len(params) != num_requests:
1338
+ raise ValueError("The lengths of prompts and params "
1339
+ "must be the same.")
1340
+ if isinstance(lora_request,
1341
+ list) and len(lora_request) != num_requests:
1342
+ raise ValueError("The lengths of prompts and lora_request "
1343
+ "must be the same.")
1344
+
1345
+ for sp in params if isinstance(params, list) else (params, ):
1346
+ if isinstance(sp, SamplingParams):
1347
+ self._add_guided_params(sp, guided_options)
1348
+
1349
+ # We only care about the final output
1350
+ sp.output_kind = RequestOutputKind.FINAL_ONLY
1351
+
1352
+ # Add requests to the engine.
1353
+ for i, prompt in enumerate(prompts):
1354
+ self._add_request(
1355
+ prompt,
1356
+ params[i] if isinstance(params, Sequence) else params,
1357
+ lora_request=lora_request[i] if isinstance(
1358
+ lora_request, Sequence) else lora_request,
1359
+ prompt_adapter_request=prompt_adapter_request,
1360
+ priority=priority[i] if priority else 0,
1361
+ )
1362
+
1363
+ def _add_request(
1364
+ self,
1365
+ prompt: PromptType,
1366
+ params: Union[SamplingParams, PoolingParams],
1367
+ lora_request: Optional[LoRARequest] = None,
1368
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1369
+ priority: int = 0,
1370
+ ) -> None:
1371
+ request_id = str(next(self.request_counter))
1372
+ self.llm_engine.add_request(
1373
+ request_id,
1374
+ prompt,
1375
+ params,
1376
+ lora_request=lora_request,
1377
+ prompt_adapter_request=prompt_adapter_request,
1378
+ priority=priority,
1379
+ )
1380
+
1381
+ def _add_guided_params(
1382
+ self,
1383
+ params: SamplingParams,
1384
+ guided_options: Optional[GuidedDecodingRequest] = None):
1385
+ if guided_options is None:
1386
+ return params
1387
+
1388
+ if params.guided_decoding is not None:
1389
+ raise ValueError("Cannot set both guided_options_request and "
1390
+ "params.guided_decoding.")
1391
+
1392
+ params.guided_decoding = GuidedDecodingParams(
1393
+ json=guided_options.guided_json,
1394
+ regex=guided_options.guided_regex,
1395
+ choice=guided_options.guided_choice,
1396
+ grammar=guided_options.guided_grammar,
1397
+ json_object=guided_options.guided_json_object,
1398
+ backend=guided_options.guided_decoding_backend,
1399
+ whitespace_pattern=guided_options.guided_whitespace_pattern,
1400
+ structural_tag=guided_options.structural_tag,
1401
+ )
1402
+ return params
1403
+
1404
+ def _run_engine(
1405
+ self, *, use_tqdm: bool
1406
+ ) -> list[Union[RequestOutput, PoolingRequestOutput]]:
1407
+ # Initialize tqdm.
1408
+ if use_tqdm:
1409
+ num_requests = self.llm_engine.get_num_unfinished_requests()
1410
+ pbar = tqdm(
1411
+ total=num_requests,
1412
+ desc="Processed prompts",
1413
+ dynamic_ncols=True,
1414
+ postfix=(f"est. speed input: {0:.2f} toks/s, "
1415
+ f"output: {0:.2f} toks/s"),
1416
+ )
1417
+
1418
+ # Run the engine.
1419
+ outputs: list[Union[RequestOutput, PoolingRequestOutput]] = []
1420
+ total_in_toks = 0
1421
+ total_out_toks = 0
1422
+ while self.llm_engine.has_unfinished_requests():
1423
+ step_outputs = self.llm_engine.step()
1424
+ for output in step_outputs:
1425
+ if output.finished:
1426
+ outputs.append(output)
1427
+ if use_tqdm:
1428
+ if isinstance(output, RequestOutput):
1429
+ # Calculate tokens only for RequestOutput
1430
+ n = len(output.outputs)
1431
+ assert output.prompt_token_ids is not None
1432
+ total_in_toks += len(output.prompt_token_ids) * n
1433
+ in_spd = total_in_toks / pbar.format_dict["elapsed"]
1434
+ total_out_toks += sum(
1435
+ len(stp.token_ids) for stp in output.outputs)
1436
+ out_spd = (total_out_toks /
1437
+ pbar.format_dict["elapsed"])
1438
+ pbar.postfix = (
1439
+ f"est. speed input: {in_spd:.2f} toks/s, "
1440
+ f"output: {out_spd:.2f} toks/s")
1441
+ pbar.update(n)
1442
+ else:
1443
+ pbar.update(1)
1444
+
1445
+ if use_tqdm:
1446
+ pbar.close()
1447
+ # Sort the outputs by request ID.
1448
+ # This is necessary because some requests may be finished earlier than
1449
+ # its previous requests.
1450
+ return sorted(outputs, key=lambda x: int(x.request_id))