vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1629 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import itertools
5
+ from collections.abc import Sequence
6
+ from typing import TYPE_CHECKING, Any, Callable, Optional, Union, cast
7
+
8
+ import cloudpickle
9
+ import torch.nn as nn
10
+ from pydantic import ValidationError
11
+ from tqdm.auto import tqdm
12
+ from typing_extensions import TypeVar
13
+
14
+ from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
15
+ BeamSearchSequence,
16
+ create_sort_beams_key_function)
17
+ from vllm.config import (CompilationConfig, ModelDType,
18
+ StructuredOutputsConfig, TokenizerMode, is_init_field)
19
+ from vllm.engine.arg_utils import (ConvertOption, EngineArgs, HfOverrides,
20
+ PoolerConfig, RunnerOption)
21
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
22
+ ChatTemplateContentFormatOption,
23
+ apply_hf_chat_template,
24
+ apply_mistral_chat_template,
25
+ parse_chat_messages,
26
+ resolve_chat_template_content_format)
27
+ # yapf conflicts with isort for this block
28
+ # yapf: disable
29
+ from vllm.entrypoints.score_utils import (ScoreContentPartParam,
30
+ ScoreMultiModalParam,
31
+ _cosine_similarity,
32
+ _validate_score_input_lens,
33
+ compress_token_type_ids,
34
+ get_score_prompt)
35
+ # yapf: enable
36
+ from vllm.entrypoints.utils import (_validate_truncation_size,
37
+ log_non_default_args)
38
+ from vllm.inputs import (DataPrompt, PromptType, SingletonPrompt, TextPrompt,
39
+ TokensPrompt)
40
+ from vllm.logger import init_logger
41
+ from vllm.lora.request import LoRARequest
42
+ from vllm.model_executor.layers.quantization import QuantizationMethods
43
+ from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
44
+ PoolingRequestOutput, RequestOutput,
45
+ ScoringRequestOutput)
46
+ from vllm.plugins.io_processors import get_io_processor
47
+ from vllm.pooling_params import PoolingParams
48
+ from vllm.sampling_params import (BeamSearchParams, RequestOutputKind,
49
+ SamplingParams)
50
+ from vllm.tasks import PoolingTask
51
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
52
+ get_cached_tokenizer)
53
+ from vllm.usage.usage_lib import UsageContext
54
+ from vllm.utils import Counter, Device, as_iter, is_list_of
55
+ from vllm.v1.engine.llm_engine import LLMEngine
56
+ from vllm.v1.sample.logits_processor import LogitsProcessor
57
+
58
+ if TYPE_CHECKING:
59
+ from vllm.v1.metrics.reader import Metric
60
+
61
+ logger = init_logger(__name__)
62
+
63
+ _R = TypeVar("_R", default=Any)
64
+
65
+
66
+ class LLM:
67
+ """An LLM for generating texts from given prompts and sampling parameters.
68
+
69
+ This class includes a tokenizer, a language model (possibly distributed
70
+ across multiple GPUs), and GPU memory space allocated for intermediate
71
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
72
+ this class generates texts from the model, using an intelligent batching
73
+ mechanism and efficient memory management.
74
+
75
+ Args:
76
+ model: The name or path of a HuggingFace Transformers model.
77
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
78
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
79
+ if available, and "slow" will always use the slow tokenizer.
80
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
81
+ detokenizer. Expect valid prompt_token_ids and None for prompt
82
+ from the input.
83
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
84
+ downloading the model and tokenizer.
85
+ allowed_local_media_path: Allowing API requests to read local images
86
+ or videos from directories specified by the server file system.
87
+ This is a security risk. Should only be enabled in trusted
88
+ environments.
89
+ allowed_media_domains: If set, only media URLs that belong to this
90
+ domain can be used for multi-modal inputs.
91
+ tensor_parallel_size: The number of GPUs to use for distributed
92
+ execution with tensor parallelism.
93
+ dtype: The data type for the model weights and activations. Currently,
94
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
95
+ the `torch_dtype` attribute specified in the model config file.
96
+ However, if the `torch_dtype` in the config is `float32`, we will
97
+ use `float16` instead.
98
+ quantization: The method used to quantize the model weights. Currently,
99
+ we support "awq", "gptq", and "fp8" (experimental).
100
+ If None, we first check the `quantization_config` attribute in the
101
+ model config file. If that is None, we assume the model weights are
102
+ not quantized and use `dtype` to determine the data type of
103
+ the weights.
104
+ revision: The specific model version to use. It can be a branch name,
105
+ a tag name, or a commit id.
106
+ tokenizer_revision: The specific tokenizer version to use. It can be a
107
+ branch name, a tag name, or a commit id.
108
+ seed: The seed to initialize the random number generator for sampling.
109
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
110
+ reserve for the model weights, activations, and KV cache. Higher
111
+ values will increase the KV cache size and thus improve the model's
112
+ throughput. However, if the value is too high, it may cause out-of-
113
+ memory (OOM) errors.
114
+ kv_cache_memory_bytes: Size of KV Cache per GPU in bytes. By default,
115
+ this is set to None and vllm can automatically infer the kv cache
116
+ size based on gpu_memory_utilization. However, users may want to
117
+ manually specify the kv cache memory size. kv_cache_memory_bytes
118
+ allows more fine-grain control of how much memory gets used when
119
+ compared with using gpu_memory_memory_utilization. Note that
120
+ kv_cache_memory_bytes (when not-None) ignores
121
+ gpu_memory_utilization
122
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
123
+ This can be used for temporarily storing the states of the requests
124
+ when their `best_of` sampling parameters are larger than 1. If all
125
+ requests will have `best_of=1`, you can safely set this to 0.
126
+ Noting that `best_of` is only supported in V0. Otherwise, too small
127
+ values may cause out-of-memory (OOM) errors.
128
+ cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
129
+ the model weights. This virtually increases the GPU memory space
130
+ you can use to hold the model weights, at the cost of CPU-GPU data
131
+ transfer for every forward pass.
132
+ enforce_eager: Whether to enforce eager execution. If True, we will
133
+ disable CUDA graph and always execute the model in eager mode.
134
+ If False, we will use CUDA graph and eager execution in hybrid.
135
+ disable_custom_all_reduce: See
136
+ [ParallelConfig][vllm.config.ParallelConfig].
137
+ hf_token: The token to use as HTTP bearer authorization for remote files
138
+ . If `True`, will use the token generated when running
139
+ `huggingface-cli login` (stored in `~/.huggingface`).
140
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
141
+ HuggingFace config. If a callable, it is called to update the
142
+ HuggingFace config.
143
+ mm_processor_kwargs: Arguments to be forwarded to the model's processor
144
+ for multi-modal data, e.g., image processor. Overrides for the
145
+ multi-modal processor obtained from `AutoProcessor.from_pretrained`.
146
+ The available overrides depend on the model that is being run.
147
+ For example, for Phi-3-Vision: `{"num_crops": 4}`.
148
+ pooler_config: Initialize non-default pooling config for the pooling
149
+ model. e.g. `PoolerConfig(pooling_type="mean", normalize=False)`.
150
+ override_pooler_config: [DEPRECATED] Use `pooler_config` instead. This
151
+ argument is deprecated and will be removed in v0.12.0 or v1.0.0,
152
+ whichever is sooner.
153
+ compilation_config: Either an integer or a dictionary. If it is an
154
+ integer, it is used as the level of compilation optimization. If it
155
+ is a dictionary, it can specify the full compilation configuration.
156
+ **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].
157
+
158
+ Note:
159
+ This class is intended to be used for offline inference. For online
160
+ serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
161
+ """
162
+
163
+ def __init__(
164
+ self,
165
+ model: str,
166
+ *,
167
+ runner: RunnerOption = "auto",
168
+ convert: ConvertOption = "auto",
169
+ tokenizer: Optional[str] = None,
170
+ tokenizer_mode: TokenizerMode = "auto",
171
+ skip_tokenizer_init: bool = False,
172
+ trust_remote_code: bool = False,
173
+ allowed_local_media_path: str = "",
174
+ allowed_media_domains: Optional[list[str]] = None,
175
+ tensor_parallel_size: int = 1,
176
+ dtype: ModelDType = "auto",
177
+ quantization: Optional[QuantizationMethods] = None,
178
+ revision: Optional[str] = None,
179
+ tokenizer_revision: Optional[str] = None,
180
+ seed: Optional[int] = None,
181
+ gpu_memory_utilization: float = 0.9,
182
+ swap_space: float = 4,
183
+ cpu_offload_gb: float = 0,
184
+ enforce_eager: bool = False,
185
+ disable_custom_all_reduce: bool = False,
186
+ hf_token: Optional[Union[bool, str]] = None,
187
+ hf_overrides: Optional[HfOverrides] = None,
188
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
189
+ pooler_config: Optional[PoolerConfig] = None,
190
+ override_pooler_config: Optional[PoolerConfig] = None,
191
+ structured_outputs_config: Optional[Union[dict[
192
+ str, Any], StructuredOutputsConfig]] = None,
193
+ kv_cache_memory_bytes: Optional[int] = None,
194
+ compilation_config: Optional[Union[int, dict[str, Any],
195
+ CompilationConfig]] = None,
196
+ logits_processors: Optional[list[Union[str,
197
+ type[LogitsProcessor]]]] = None,
198
+ **kwargs: Any,
199
+ ) -> None:
200
+ """LLM constructor."""
201
+
202
+ if "disable_log_stats" not in kwargs:
203
+ kwargs["disable_log_stats"] = True
204
+
205
+ if "worker_cls" in kwargs:
206
+ worker_cls = kwargs["worker_cls"]
207
+ # if the worker_cls is not qualified string name,
208
+ # we serialize it using cloudpickle to avoid pickling issues
209
+ if isinstance(worker_cls, type):
210
+ kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
211
+
212
+ if "kv_transfer_config" in kwargs and isinstance(
213
+ kwargs["kv_transfer_config"], dict):
214
+ from vllm.config.kv_transfer import KVTransferConfig
215
+ raw_config_dict = kwargs["kv_transfer_config"]
216
+ try:
217
+ kwargs["kv_transfer_config"] = KVTransferConfig(
218
+ **raw_config_dict)
219
+ except ValidationError as e:
220
+ logger.error(
221
+ "Failed to convert 'kv_transfer_config' dict to "
222
+ "KVTransferConfig object. Dict: %s. Error: %s",
223
+ raw_config_dict, e)
224
+ # Consider re-raising a more specific vLLM error or ValueError
225
+ # to provide better context to the user.
226
+ raise ValueError(
227
+ f"Invalid 'kv_transfer_config' provided: {e}") from e
228
+
229
+ if hf_overrides is None:
230
+ hf_overrides = {}
231
+
232
+ if compilation_config is not None:
233
+ if isinstance(compilation_config, int):
234
+ compilation_config_instance = CompilationConfig(
235
+ level=compilation_config)
236
+ elif isinstance(compilation_config, dict):
237
+ compilation_config_instance = CompilationConfig(
238
+ **{
239
+ k: v
240
+ for k, v in compilation_config.items()
241
+ if is_init_field(CompilationConfig, k)
242
+ })
243
+ else:
244
+ compilation_config_instance = compilation_config
245
+ else:
246
+ compilation_config_instance = CompilationConfig()
247
+
248
+ if structured_outputs_config is not None:
249
+ if isinstance(structured_outputs_config, dict):
250
+ structured_outputs_instance = StructuredOutputsConfig(
251
+ **{
252
+ k: v
253
+ for k, v in structured_outputs_config.items()
254
+ if is_init_field(StructuredOutputsConfig, k)
255
+ })
256
+ else:
257
+ structured_outputs_instance = structured_outputs_config
258
+ else:
259
+ structured_outputs_instance = StructuredOutputsConfig()
260
+
261
+ engine_args = EngineArgs(
262
+ model=model,
263
+ runner=runner,
264
+ convert=convert,
265
+ tokenizer=tokenizer,
266
+ tokenizer_mode=tokenizer_mode,
267
+ skip_tokenizer_init=skip_tokenizer_init,
268
+ trust_remote_code=trust_remote_code,
269
+ allowed_local_media_path=allowed_local_media_path,
270
+ allowed_media_domains=allowed_media_domains,
271
+ tensor_parallel_size=tensor_parallel_size,
272
+ dtype=dtype,
273
+ quantization=quantization,
274
+ revision=revision,
275
+ tokenizer_revision=tokenizer_revision,
276
+ seed=seed,
277
+ gpu_memory_utilization=gpu_memory_utilization,
278
+ kv_cache_memory_bytes=kv_cache_memory_bytes,
279
+ swap_space=swap_space,
280
+ cpu_offload_gb=cpu_offload_gb,
281
+ enforce_eager=enforce_eager,
282
+ disable_custom_all_reduce=disable_custom_all_reduce,
283
+ hf_token=hf_token,
284
+ hf_overrides=hf_overrides,
285
+ mm_processor_kwargs=mm_processor_kwargs,
286
+ pooler_config=pooler_config,
287
+ override_pooler_config=override_pooler_config,
288
+ structured_outputs_config=structured_outputs_instance,
289
+ compilation_config=compilation_config_instance,
290
+ logits_processors=logits_processors,
291
+ **kwargs,
292
+ )
293
+
294
+ log_non_default_args(engine_args)
295
+
296
+ # Create the Engine (autoselects V0 vs V1)
297
+ self.llm_engine = LLMEngine.from_engine_args(
298
+ engine_args=engine_args, usage_context=UsageContext.LLM_CLASS)
299
+ self.engine_class = type(self.llm_engine)
300
+
301
+ self.request_counter = Counter()
302
+ self.default_sampling_params: Union[dict[str, Any], None] = None
303
+
304
+ supported_tasks = self.llm_engine.get_supported_tasks() # type: ignore
305
+
306
+ logger.info("Supported_tasks: %s", supported_tasks)
307
+
308
+ self.supported_tasks = supported_tasks
309
+
310
+ # Load the Input/Output processor plugin if any
311
+ io_processor_plugin = self.llm_engine.model_config.io_processor_plugin
312
+ self.io_processor = get_io_processor(self.llm_engine.vllm_config,
313
+ io_processor_plugin)
314
+
315
+ def get_tokenizer(self) -> AnyTokenizer:
316
+ return self.llm_engine.get_tokenizer()
317
+
318
+ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
319
+ # While CachedTokenizer is dynamic, have no choice but
320
+ # compare class name. Misjudgment will arise from
321
+ # user-defined tokenizer started with 'Cached'
322
+ if tokenizer.__class__.__name__.startswith("Cached"):
323
+ self.llm_engine.tokenizer = tokenizer
324
+ else:
325
+ self.llm_engine.tokenizer = get_cached_tokenizer(tokenizer)
326
+
327
+ def get_default_sampling_params(self) -> SamplingParams:
328
+ if self.default_sampling_params is None:
329
+ self.default_sampling_params = (
330
+ self.llm_engine.model_config.get_diff_sampling_param())
331
+ if self.default_sampling_params:
332
+ return SamplingParams.from_optional(**self.default_sampling_params)
333
+ return SamplingParams()
334
+
335
+ def generate(
336
+ self,
337
+ prompts: Union[PromptType, Sequence[PromptType]],
338
+ sampling_params: Optional[Union[SamplingParams,
339
+ Sequence[SamplingParams]]] = None,
340
+ *,
341
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
342
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
343
+ priority: Optional[list[int]] = None,
344
+ ) -> list[RequestOutput]:
345
+ """Generates the completions for the input prompts.
346
+
347
+ This class automatically batches the given prompts, considering
348
+ the memory constraint. For the best performance, put all of your prompts
349
+ into a single list and pass it to this method.
350
+
351
+ Args:
352
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
353
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
354
+ for more details about the format of each prompt.
355
+ sampling_params: The sampling parameters for text generation. If
356
+ None, we use the default sampling parameters.
357
+ When it is a single value, it is applied to every prompt.
358
+ When it is a list, the list must have the same length as the
359
+ prompts and it is paired one by one with the prompt.
360
+ use_tqdm: If `True`, shows a tqdm progress bar.
361
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
362
+ it is used to create the progress bar.
363
+ If `False`, no progress bar is created.
364
+ lora_request: LoRA request to use for generation, if any.
365
+ priority: The priority of the requests, if any.
366
+ Only applicable when priority scheduling policy is enabled.
367
+
368
+ Returns:
369
+ A list of `RequestOutput` objects containing the
370
+ generated completions in the same order as the input prompts.
371
+
372
+ Note:
373
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
374
+ considered legacy and may be deprecated in the future. You should
375
+ instead pass them via the `inputs` parameter.
376
+ """
377
+ model_config = self.llm_engine.model_config
378
+ runner_type = model_config.runner_type
379
+ if runner_type != "generate":
380
+ raise ValueError(
381
+ "LLM.generate() is only supported for generative models. "
382
+ "Try passing `--runner generate` to use the model as a "
383
+ "generative model.")
384
+
385
+ if sampling_params is None:
386
+ # Use default sampling params.
387
+ sampling_params = self.get_default_sampling_params()
388
+
389
+ # Add any modality specific loras to the corresponding prompts
390
+ lora_request = self._get_modality_specific_lora_reqs(
391
+ prompts, lora_request)
392
+
393
+ self._validate_and_add_requests(
394
+ prompts=prompts,
395
+ params=sampling_params,
396
+ use_tqdm=use_tqdm,
397
+ lora_request=lora_request,
398
+ priority=priority,
399
+ )
400
+
401
+ outputs = self._run_engine(use_tqdm=use_tqdm)
402
+ return self.engine_class.validate_outputs(outputs, RequestOutput)
403
+
404
+ def _get_modality_specific_lora_reqs(
405
+ self, prompts: Union[PromptType, Sequence[PromptType]],
406
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]]):
407
+ # Grab the lora config off the vllm config on the engine,
408
+ # since this is the same for both v0 & v1.
409
+ lora_config = self.llm_engine.vllm_config.lora_config
410
+
411
+ # If there's no lora config / default_mm_loras, or the model
412
+ # isn't multimodal, leave the lora as is.
413
+ if (lora_config is None
414
+ or not self.llm_engine.model_config.is_multimodal_model
415
+ or (lora_config and lora_config.default_mm_loras is None)):
416
+ return lora_request
417
+
418
+ if not isinstance(prompts, Sequence):
419
+ prompts = [prompts]
420
+
421
+ optional_loras = ([lora_request] * len(prompts)
422
+ if not isinstance(lora_request, Sequence) else
423
+ lora_request)
424
+
425
+ return [
426
+ self._resolve_single_prompt_mm_lora(
427
+ prompt,
428
+ opt_lora_req,
429
+ lora_config.default_mm_loras,
430
+ ) for prompt, opt_lora_req in zip(prompts, optional_loras)
431
+ ]
432
+
433
+ def _resolve_single_prompt_mm_lora(self, prompt: PromptType,
434
+ lora_request: Optional[LoRARequest],
435
+ default_mm_loras: Optional[dict[str,
436
+ str]]):
437
+ if (not default_mm_loras or not isinstance(prompt, dict)
438
+ or "multi_modal_data" not in prompt):
439
+ return lora_request
440
+
441
+ prompt = cast(Union[TextPrompt, TokensPrompt], prompt)
442
+
443
+ intersection = set(prompt["multi_modal_data"].keys()) \
444
+ .intersection(default_mm_loras.keys())
445
+ if not intersection:
446
+ return lora_request
447
+ if len(intersection) > 1:
448
+ # TODO: Would be nice to be able to have multiple loras per prompt
449
+ logger.warning(
450
+ "Multiple modality specific loras were registered and would be"
451
+ " used by a single prompt consuming several modalities; "
452
+ " currently we only support one lora per request; as such,"
453
+ " lora(s) registered with modalities: %s"
454
+ " will be skipped", intersection)
455
+ return lora_request
456
+
457
+ # Build the LoRA request; the ID of the default mm lora is the
458
+ # index of the modality name sorted alphabetically + 1.
459
+ modality_name = intersection.pop()
460
+ modality_lora_path = default_mm_loras[modality_name]
461
+ modality_lora_id = sorted(default_mm_loras).index(modality_name) + 1
462
+
463
+ # If we have a collision, warn if there is a collision,
464
+ # but always send the explicitly provided request.
465
+ if lora_request:
466
+ if lora_request.lora_int_id != modality_lora_id:
467
+ logger.warning(
468
+ "A modality with a registered lora and a lora_request "
469
+ "with a different ID were provided; falling back to the "
470
+ "lora_request as we only apply one LoRARequest per prompt")
471
+ return lora_request
472
+
473
+ return LoRARequest(
474
+ modality_name,
475
+ modality_lora_id,
476
+ modality_lora_path,
477
+ )
478
+
479
+ def collective_rpc(self,
480
+ method: Union[str, Callable[..., _R]],
481
+ timeout: Optional[float] = None,
482
+ args: tuple = (),
483
+ kwargs: Optional[dict[str, Any]] = None) -> list[_R]:
484
+ """
485
+ Execute an RPC call on all workers.
486
+
487
+ Args:
488
+ method: Name of the worker method to execute, or a callable that
489
+ is serialized and sent to all workers to execute.
490
+
491
+ If the method is a callable, it should accept an additional
492
+ `self` argument, in addition to the arguments passed in `args`
493
+ and `kwargs`. The `self` argument will be the worker object.
494
+ timeout: Maximum time in seconds to wait for execution. Raises a
495
+ [`TimeoutError`][] on timeout. `None` means wait indefinitely.
496
+ args: Positional arguments to pass to the worker method.
497
+ kwargs: Keyword arguments to pass to the worker method.
498
+
499
+ Returns:
500
+ A list containing the results from each worker.
501
+
502
+ Note:
503
+ It is recommended to use this API to only pass control messages,
504
+ and set up data-plane communication to pass data.
505
+ """
506
+
507
+ return self.llm_engine.collective_rpc(method, timeout, args, kwargs)
508
+
509
+ def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
510
+ """
511
+ Run a function directly on the model inside each worker,
512
+ returning the result for each of them.
513
+
514
+ !!! warning
515
+ To reduce the overhead of data transfer, avoid returning large
516
+ arrays or tensors from this method. If you must return them,
517
+ make sure you move them to CPU first to avoid taking up additional
518
+ VRAM!
519
+ """
520
+ return self.llm_engine.apply_model(func)
521
+
522
+ def _get_beam_search_lora_requests(
523
+ self,
524
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]],
525
+ prompts: list[Union[TokensPrompt, TextPrompt]],
526
+ ) -> list[Optional[LoRARequest]]:
527
+ """Get the optional lora request corresponding to each prompt."""
528
+ if isinstance(lora_request,
529
+ Sequence) and len(lora_request) != len(prompts):
530
+ raise ValueError(
531
+ "Lora request list should be the same length as the prompts")
532
+
533
+ if lora_request is None or isinstance(lora_request, LoRARequest):
534
+ return [lora_request] * len(prompts)
535
+
536
+ raise TypeError(f"Invalid lora_request type {type(lora_request)}")
537
+
538
+ def beam_search(
539
+ self,
540
+ prompts: list[Union[TokensPrompt, TextPrompt]],
541
+ params: BeamSearchParams,
542
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
543
+ use_tqdm: bool = False,
544
+ concurrency_limit: Optional[int] = None,
545
+ ) -> list[BeamSearchOutput]:
546
+ """
547
+ Generate sequences using beam search.
548
+
549
+ Args:
550
+ prompts: A list of prompts. Each prompt can be a string or a list
551
+ of token IDs.
552
+ params: The beam search parameters.
553
+ lora_request: LoRA request to use for generation, if any.
554
+ use_tqdm: Whether to use tqdm to display the progress bar.
555
+ concurrency_limit: The maximum number of concurrent requests.
556
+ If None, the number of concurrent requests is unlimited.
557
+ """
558
+ # TODO: how does beam search work together with length penalty,
559
+ # frequency, penalty, and stopping criteria, etc.?
560
+ beam_width = params.beam_width
561
+ max_tokens = params.max_tokens
562
+ temperature = params.temperature
563
+ ignore_eos = params.ignore_eos
564
+ length_penalty = params.length_penalty
565
+
566
+ lora_requests = self._get_beam_search_lora_requests(
567
+ lora_request, prompts)
568
+
569
+ tokenizer = self.get_tokenizer()
570
+ sort_beams_key = create_sort_beams_key_function(
571
+ tokenizer.eos_token_id,
572
+ length_penalty,
573
+ )
574
+
575
+ if use_tqdm and concurrency_limit is not None:
576
+ logger.warning(
577
+ "Progress bar is not supported when using concurrency_limit. "
578
+ "Disabling progress bar.")
579
+ use_tqdm = False
580
+
581
+ if concurrency_limit is None:
582
+ concurrency_limit = len(prompts)
583
+
584
+ def create_tokens_prompt_from_beam(
585
+ beam: BeamSearchSequence) -> TokensPrompt:
586
+ token_prompt_kwargs: TokensPrompt = {
587
+ "prompt_token_ids": beam.tokens
588
+ }
589
+ if beam.multi_modal_data is not None:
590
+ token_prompt_kwargs["multi_modal_data"] = beam.multi_modal_data
591
+
592
+ if beam.mm_processor_kwargs is not None:
593
+ token_prompt_kwargs[
594
+ "mm_processor_kwargs"] = beam.mm_processor_kwargs
595
+ return TokensPrompt(**token_prompt_kwargs)
596
+
597
+ # generate 2 * beam_width candidates at each step
598
+ # following the huggingface transformers implementation
599
+ # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
600
+ beam_search_params = SamplingParams(logprobs=2 * beam_width,
601
+ max_tokens=1,
602
+ temperature=temperature)
603
+ instances: list[BeamSearchInstance] = []
604
+
605
+ for lora_req, prompt in zip(lora_requests, prompts):
606
+ # Add multimodal processor kwargs & data
607
+ mm_kwargs = {}
608
+ if "multi_modal_data" in prompt:
609
+ mm_kwargs["multi_modal_data"] = prompt["multi_modal_data"]
610
+ if "mm_processor_kwargs" in prompt:
611
+ mm_kwargs["mm_processor_kwargs"] = prompt[
612
+ "mm_processor_kwargs"]
613
+
614
+ if "prompt_token_ids" in prompt:
615
+ prompt = cast(TokensPrompt, prompt) # Needed for mypy
616
+ prompt_tokens = prompt["prompt_token_ids"]
617
+ else:
618
+ prompt_tokens = tokenizer.encode(prompt["prompt"])
619
+
620
+ instances.append(
621
+ BeamSearchInstance(
622
+ prompt_tokens,
623
+ lora_request=lora_req,
624
+ logprobs=None,
625
+ **mm_kwargs,
626
+ ), )
627
+
628
+ for prompt_start in range(0, len(prompts), concurrency_limit):
629
+ instances_batch = instances[prompt_start:prompt_start +
630
+ concurrency_limit]
631
+
632
+ token_iter = range(max_tokens)
633
+ if use_tqdm:
634
+ token_iter = tqdm(token_iter,
635
+ desc="Beam search",
636
+ unit="token",
637
+ unit_scale=False)
638
+ logger.warning(
639
+ "The progress bar shows the upper bound on token steps and "
640
+ "may finish early due to stopping conditions. It does not "
641
+ "reflect instance-level progress.")
642
+ for _ in token_iter:
643
+ all_beams: list[BeamSearchSequence] = list(
644
+ sum((instance.beams for instance in instances_batch), []))
645
+ pos = [0] + list(
646
+ itertools.accumulate(
647
+ len(instance.beams) for instance in instances_batch))
648
+ instance_start_and_end: list[tuple[int, int]] = list(
649
+ zip(pos[:-1], pos[1:]))
650
+
651
+ if len(all_beams) == 0:
652
+ break
653
+
654
+ # create corresponding batch entries for prompt & optional lora
655
+ prompts_batch, lora_req_batch = zip(
656
+ *[(create_tokens_prompt_from_beam(beam), beam.lora_request)
657
+ for beam in all_beams])
658
+
659
+ # only runs for one step
660
+ # we don't need to use tqdm here
661
+ output = self.generate(prompts_batch,
662
+ sampling_params=beam_search_params,
663
+ use_tqdm=False,
664
+ lora_request=lora_req_batch)
665
+
666
+ for (start, end), instance in zip(instance_start_and_end,
667
+ instances_batch):
668
+ instance_new_beams = []
669
+ for i in range(start, end):
670
+ current_beam = all_beams[i]
671
+ result = output[i]
672
+
673
+ if result.outputs[0].logprobs is not None:
674
+ # if `result.outputs[0].logprobs` is None, it means
675
+ # the sequence is completed because of the
676
+ # max-model-len or abortion. we don't need to add
677
+ # it to the new beams.
678
+ logprobs = result.outputs[0].logprobs[0]
679
+ for token_id, logprob_obj in logprobs.items():
680
+ new_beam = BeamSearchSequence(
681
+ tokens=current_beam.tokens + [token_id],
682
+ logprobs=current_beam.logprobs +
683
+ [logprobs],
684
+ lora_request=current_beam.lora_request,
685
+ cum_logprob=current_beam.cum_logprob +
686
+ logprob_obj.logprob,
687
+ multi_modal_data=current_beam.
688
+ multi_modal_data,
689
+ mm_processor_kwargs=current_beam.
690
+ mm_processor_kwargs)
691
+
692
+ if token_id == tokenizer.eos_token_id and \
693
+ not ignore_eos:
694
+ instance.completed.append(new_beam)
695
+ else:
696
+ instance_new_beams.append(new_beam)
697
+ sorted_beams = sorted(instance_new_beams,
698
+ key=sort_beams_key,
699
+ reverse=True)
700
+ instance.beams = sorted_beams[:beam_width]
701
+
702
+ outputs = []
703
+ for instance in instances:
704
+ instance.completed.extend(instance.beams)
705
+ sorted_completed = sorted(instance.completed,
706
+ key=sort_beams_key,
707
+ reverse=True)
708
+ best_beams = sorted_completed[:beam_width]
709
+
710
+ for beam in best_beams:
711
+ beam.text = tokenizer.decode(beam.tokens)
712
+ outputs.append(BeamSearchOutput(sequences=best_beams))
713
+
714
+ return outputs
715
+
716
+ def preprocess_chat(
717
+ self,
718
+ messages: Union[list[ChatCompletionMessageParam],
719
+ list[list[ChatCompletionMessageParam]]],
720
+ chat_template: Optional[str] = None,
721
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
722
+ add_generation_prompt: bool = True,
723
+ continue_final_message: bool = False,
724
+ tools: Optional[list[dict[str, Any]]] = None,
725
+ chat_template_kwargs: Optional[dict[str, Any]] = None,
726
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
727
+ ) -> list[TokensPrompt]:
728
+ """
729
+ Generate prompt for a chat conversation. The pre-processed
730
+ prompt can then be used as input for the other LLM methods.
731
+
732
+ Refer to `chat` for a complete description of the arguments.
733
+ Returns:
734
+ A list of `TokensPrompts` objects containing the tokenized
735
+ prompt after chat template interpolation, and the
736
+ pre-processed multi-modal inputs.
737
+ """
738
+ list_of_messages: list[list[ChatCompletionMessageParam]]
739
+
740
+ # Handle multi and single conversations
741
+ if is_list_of(messages, list):
742
+ # messages is list[list[...]]
743
+ list_of_messages = cast(list[list[ChatCompletionMessageParam]],
744
+ messages)
745
+ else:
746
+ # messages is list[...]
747
+ list_of_messages = [
748
+ cast(list[ChatCompletionMessageParam], messages)
749
+ ]
750
+
751
+ tokenizer = self.get_tokenizer()
752
+ model_config = self.llm_engine.get_model_config()
753
+ resolved_content_format = resolve_chat_template_content_format(
754
+ chat_template,
755
+ tools,
756
+ chat_template_content_format,
757
+ tokenizer,
758
+ model_config=model_config,
759
+ )
760
+
761
+ _chat_template_kwargs: dict[str, Any] = dict(
762
+ chat_template=chat_template,
763
+ add_generation_prompt=add_generation_prompt,
764
+ continue_final_message=continue_final_message,
765
+ tools=tools,
766
+ )
767
+ _chat_template_kwargs.update(chat_template_kwargs or {})
768
+
769
+ prompts: list[TokensPrompt] = []
770
+
771
+ for msgs in list_of_messages:
772
+ # NOTE: _parse_chat_message_content_parts() currently doesn't
773
+ # handle mm_processor_kwargs, since there is no implementation in
774
+ # the chat message parsing for it.
775
+ conversation, mm_data, mm_uuids = parse_chat_messages(
776
+ msgs,
777
+ model_config,
778
+ tokenizer,
779
+ content_format=resolved_content_format,
780
+ )
781
+
782
+ if isinstance(tokenizer, MistralTokenizer):
783
+ prompt_token_ids = apply_mistral_chat_template(
784
+ tokenizer,
785
+ messages=msgs,
786
+ **_chat_template_kwargs,
787
+ )
788
+ else:
789
+ prompt_str = apply_hf_chat_template(
790
+ tokenizer=tokenizer,
791
+ conversation=conversation,
792
+ model_config=model_config,
793
+ **_chat_template_kwargs,
794
+ )
795
+ # Special tokens are already included in chat templates so
796
+ # should not be added by the tokenizer in this case.
797
+ prompt_token_ids = tokenizer.encode(prompt_str,
798
+ add_special_tokens=False)
799
+
800
+ prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
801
+
802
+ if mm_data is not None:
803
+ prompt["multi_modal_data"] = mm_data
804
+
805
+ if mm_uuids is not None:
806
+ prompt["multi_modal_uuids"] = mm_uuids
807
+
808
+ if mm_processor_kwargs is not None:
809
+ prompt["mm_processor_kwargs"] = mm_processor_kwargs
810
+
811
+ prompts.append(prompt)
812
+
813
+ return prompts
814
+
815
+ def chat(
816
+ self,
817
+ messages: Union[list[ChatCompletionMessageParam],
818
+ list[list[ChatCompletionMessageParam]]],
819
+ sampling_params: Optional[Union[SamplingParams,
820
+ list[SamplingParams]]] = None,
821
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
822
+ lora_request: Optional[LoRARequest] = None,
823
+ chat_template: Optional[str] = None,
824
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
825
+ add_generation_prompt: bool = True,
826
+ continue_final_message: bool = False,
827
+ tools: Optional[list[dict[str, Any]]] = None,
828
+ chat_template_kwargs: Optional[dict[str, Any]] = None,
829
+ mm_processor_kwargs: Optional[dict[str, Any]] = None,
830
+ ) -> list[RequestOutput]:
831
+ """
832
+ Generate responses for a chat conversation.
833
+
834
+ The chat conversation is converted into a text prompt using the
835
+ tokenizer and calls the [generate][vllm.LLM.generate] method to generate
836
+ the responses.
837
+
838
+ Multi-modal inputs can be passed in the same way you would pass them
839
+ to the OpenAI API.
840
+
841
+ Args:
842
+ messages: A list of conversations or a single conversation.
843
+
844
+ - Each conversation is represented as a list of messages.
845
+ - Each message is a dictionary with 'role' and 'content' keys.
846
+
847
+ sampling_params: The sampling parameters for text generation.
848
+ If None, we use the default sampling parameters. When it
849
+ is a single value, it is applied to every prompt. When it
850
+ is a list, the list must have the same length as the
851
+ prompts and it is paired one by one with the prompt.
852
+ use_tqdm: If `True`, shows a tqdm progress bar.
853
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
854
+ it is used to create the progress bar.
855
+ If `False`, no progress bar is created.
856
+ lora_request: LoRA request to use for generation, if any.
857
+ chat_template: The template to use for structuring the chat.
858
+ If not provided, the model's default chat template will be used.
859
+ chat_template_content_format: The format to render message content.
860
+
861
+ - "string" will render the content as a string.
862
+ Example: `"Who are you?"`
863
+ - "openai" will render the content as a list of dictionaries,
864
+ similar to OpenAI schema.
865
+ Example: `[{"type": "text", "text": "Who are you?"}]`
866
+
867
+ add_generation_prompt: If True, adds a generation template
868
+ to each message.
869
+ continue_final_message: If True, continues the final message in
870
+ the conversation instead of starting a new one. Cannot be
871
+ `True` if `add_generation_prompt` is also `True`.
872
+ chat_template_kwargs: Additional kwargs to pass to the chat
873
+ template.
874
+ mm_processor_kwargs: Multimodal processor kwarg overrides for this
875
+ chat request. Only used for offline requests.
876
+
877
+ Returns:
878
+ A list of `RequestOutput` objects containing the generated
879
+ responses in the same order as the input messages.
880
+ """
881
+
882
+ prompts = self.preprocess_chat(
883
+ messages=messages,
884
+ chat_template=chat_template,
885
+ chat_template_content_format=chat_template_content_format,
886
+ add_generation_prompt=add_generation_prompt,
887
+ continue_final_message=continue_final_message,
888
+ tools=tools,
889
+ chat_template_kwargs=chat_template_kwargs,
890
+ mm_processor_kwargs=mm_processor_kwargs,
891
+ )
892
+
893
+ return self.generate(
894
+ prompts,
895
+ sampling_params=sampling_params,
896
+ use_tqdm=use_tqdm,
897
+ lora_request=lora_request,
898
+ )
899
+
900
+ def encode(
901
+ self,
902
+ prompts: Union[PromptType, Sequence[PromptType], DataPrompt],
903
+ pooling_params: Optional[Union[PoolingParams,
904
+ Sequence[PoolingParams]]] = None,
905
+ *,
906
+ truncate_prompt_tokens: Optional[int] = None,
907
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
908
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
909
+ pooling_task: PoolingTask = "encode",
910
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
911
+ ) -> list[PoolingRequestOutput]:
912
+ """Apply pooling to the hidden states corresponding to the input
913
+ prompts.
914
+
915
+ This class automatically batches the given prompts, considering
916
+ the memory constraint. For the best performance, put all of your prompts
917
+ into a single list and pass it to this method.
918
+
919
+ Args:
920
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
921
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
922
+ for more details about the format of each prompt.
923
+ pooling_params: The pooling parameters for pooling. If None, we
924
+ use the default pooling parameters.
925
+ use_tqdm: If `True`, shows a tqdm progress bar.
926
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
927
+ it is used to create the progress bar.
928
+ If `False`, no progress bar is created.
929
+ lora_request: LoRA request to use for generation, if any.
930
+ pooling_task: Override the pooling task to use.
931
+ tokenization_kwargs: overrides tokenization_kwargs set in
932
+ pooling_params
933
+
934
+ Returns:
935
+ A list of `PoolingRequestOutput` objects containing the
936
+ pooled hidden states in the same order as the input prompts.
937
+
938
+ Note:
939
+ Using `prompts` and `prompt_token_ids` as keyword parameters is
940
+ considered legacy and may be deprecated in the future. You should
941
+ instead pass them via the `inputs` parameter.
942
+ """
943
+
944
+ if self.supported_tasks == ["encode"] and pooling_task is None:
945
+ pooling_task = "encode"
946
+
947
+ if pooling_task is None:
948
+ if "embed" in self.supported_tasks:
949
+ pooling_task = "embed"
950
+ else:
951
+ pooling_task = "encode"
952
+
953
+ logger.warning_once(
954
+ "`LLM.encode` is currently using `pooling_task = %s`.\n"
955
+ "Please use one of the more specific methods or set the "
956
+ "task directly when using `LLM.encode`:\n"
957
+ " - For embeddings, use `LLM.embed(...)` "
958
+ "or `pooling_task=\"embed\"`.\n"
959
+ " - For classification logits, use `LLM.classify(...)` "
960
+ "or `pooling_task=\"classify\"`.\n"
961
+ " - For rewards, use `LLM.reward(...)` "
962
+ "or `pooling_task=\"reward\"`\n"
963
+ " - For similarity scores, use `LLM.score(...)`.",
964
+ pooling_task)
965
+
966
+ model_config = self.llm_engine.model_config
967
+ runner_type = model_config.runner_type
968
+ if runner_type != "pooling":
969
+ raise ValueError(
970
+ "LLM.encode() is only supported for pooling models. "
971
+ "Try passing `--runner pooling` to use the model as a "
972
+ "pooling model.")
973
+
974
+ if pooling_task not in self.supported_tasks:
975
+ raise ValueError(
976
+ f"pooling_task must be one of {self.supported_tasks}.")
977
+
978
+ if pooling_params is None:
979
+ # Use default pooling params.
980
+ pooling_params = PoolingParams()
981
+
982
+ for param in as_iter(pooling_params):
983
+ param.verify(pooling_task, model_config)
984
+ # for backwards compatibility
985
+ if truncate_prompt_tokens is not None:
986
+ param.truncate_prompt_tokens = truncate_prompt_tokens
987
+
988
+ io_processor_prompt = False
989
+ if isinstance(prompts, dict) and "data" in prompts:
990
+ io_processor_prompt = True
991
+ if self.io_processor is None:
992
+ raise ValueError(
993
+ "No IOProcessor plugin installed. Please refer "
994
+ "to the documentation and to the "
995
+ "'prithvi_geospatial_mae_io_processor' "
996
+ "offline inference example for more details.")
997
+
998
+ # Validate the request data is valid for the loaded plugin
999
+ validated_prompt = self.io_processor.parse_request(prompts)
1000
+
1001
+ # obtain the actual model prompts from the pre-processor
1002
+ prompts = self.io_processor.pre_process(prompt=validated_prompt)
1003
+
1004
+ self._validate_and_add_requests(
1005
+ prompts=prompts,
1006
+ params=pooling_params,
1007
+ use_tqdm=use_tqdm,
1008
+ lora_request=lora_request,
1009
+ )
1010
+
1011
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1012
+
1013
+ model_outputs = self.engine_class.validate_outputs(
1014
+ outputs, PoolingRequestOutput)
1015
+
1016
+ if io_processor_prompt:
1017
+ # get the post-processed model outputs
1018
+ assert self.io_processor is not None
1019
+ processed_outputs = self.io_processor.post_process(
1020
+ model_output=model_outputs)
1021
+
1022
+ return [
1023
+ PoolingRequestOutput[Any](request_id="",
1024
+ outputs=processed_outputs,
1025
+ prompt_token_ids=[],
1026
+ finished=True)
1027
+ ]
1028
+ else:
1029
+ return model_outputs
1030
+
1031
+ def embed(
1032
+ self,
1033
+ prompts: Union[PromptType, Sequence[PromptType]],
1034
+ *,
1035
+ truncate_prompt_tokens: Optional[int] = None,
1036
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1037
+ pooling_params: Optional[Union[PoolingParams,
1038
+ Sequence[PoolingParams]]] = None,
1039
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1040
+ ) -> list[EmbeddingRequestOutput]:
1041
+ """
1042
+ Generate an embedding vector for each prompt.
1043
+
1044
+ This class automatically batches the given prompts, considering
1045
+ the memory constraint. For the best performance, put all of your prompts
1046
+ into a single list and pass it to this method.
1047
+
1048
+ Args:
1049
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1050
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1051
+ for more details about the format of each prompt.
1052
+ pooling_params: The pooling parameters for pooling. If None, we
1053
+ use the default pooling parameters.
1054
+ use_tqdm: If `True`, shows a tqdm progress bar.
1055
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1056
+ it is used to create the progress bar.
1057
+ If `False`, no progress bar is created.
1058
+ lora_request: LoRA request to use for generation, if any.
1059
+
1060
+ Returns:
1061
+ A list of `EmbeddingRequestOutput` objects containing the
1062
+ embedding vectors in the same order as the input prompts.
1063
+ """
1064
+ if "embed" not in self.supported_tasks:
1065
+ raise ValueError(
1066
+ "Embedding API is not supported by this model. "
1067
+ "Try converting the model using `--convert embed`.")
1068
+
1069
+ items = self.encode(
1070
+ prompts,
1071
+ truncate_prompt_tokens=truncate_prompt_tokens,
1072
+ use_tqdm=use_tqdm,
1073
+ pooling_params=pooling_params,
1074
+ lora_request=lora_request,
1075
+ pooling_task="embed",
1076
+ )
1077
+
1078
+ return [EmbeddingRequestOutput.from_base(item) for item in items]
1079
+
1080
+ def classify(
1081
+ self,
1082
+ prompts: Union[PromptType, Sequence[PromptType]],
1083
+ *,
1084
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1085
+ pooling_params: Optional[Union[PoolingParams,
1086
+ Sequence[PoolingParams]]] = None,
1087
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1088
+ ) -> list[ClassificationRequestOutput]:
1089
+ """
1090
+ Generate class logits for each prompt.
1091
+
1092
+ This class automatically batches the given prompts, considering
1093
+ the memory constraint. For the best performance, put all of your prompts
1094
+ into a single list and pass it to this method.
1095
+
1096
+ Args:
1097
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1098
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1099
+ for more details about the format of each prompt.
1100
+ use_tqdm: If `True`, shows a tqdm progress bar.
1101
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1102
+ it is used to create the progress bar.
1103
+ If `False`, no progress bar is created.
1104
+ lora_request: LoRA request to use for generation, if any.
1105
+ pooling_params: The pooling parameters for pooling. If None, we
1106
+ use the default pooling parameters.
1107
+ Returns:
1108
+ A list of `ClassificationRequestOutput` objects containing the
1109
+ embedding vectors in the same order as the input prompts.
1110
+ """
1111
+ if "classify" not in self.supported_tasks:
1112
+ raise ValueError(
1113
+ "Classification API is not supported by this model. "
1114
+ "Try converting the model using `--convert classify`.")
1115
+
1116
+ items = self.encode(
1117
+ prompts,
1118
+ use_tqdm=use_tqdm,
1119
+ pooling_params=pooling_params,
1120
+ lora_request=lora_request,
1121
+ pooling_task="classify",
1122
+ )
1123
+
1124
+ return [ClassificationRequestOutput.from_base(item) for item in items]
1125
+
1126
+ def reward(
1127
+ self,
1128
+ prompts: Union[PromptType, Sequence[PromptType]],
1129
+ /,
1130
+ *,
1131
+ truncate_prompt_tokens: Optional[int] = None,
1132
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1133
+ pooling_params: Optional[Union[PoolingParams,
1134
+ Sequence[PoolingParams]]] = None,
1135
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1136
+ ) -> list[PoolingRequestOutput]:
1137
+ """
1138
+ Generate rewards for each prompt.
1139
+
1140
+ Args:
1141
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
1142
+ for batch inference. See [PromptType][vllm.inputs.PromptType]
1143
+ for more details about the format of each prompt.
1144
+ use_tqdm: If `True`, shows a tqdm progress bar.
1145
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1146
+ it is used to create the progress bar.
1147
+ If `False`, no progress bar is created.
1148
+ lora_request: LoRA request to use for generation, if any.
1149
+ pooling_params: The pooling parameters for pooling. If None, we
1150
+ use the default pooling parameters.
1151
+ Returns:
1152
+ A list of `PoolingRequestOutput` objects containing the
1153
+ pooled hidden states in the same order as the input prompts.
1154
+ """
1155
+
1156
+ return self.encode(
1157
+ prompts,
1158
+ use_tqdm=use_tqdm,
1159
+ lora_request=lora_request,
1160
+ pooling_params=pooling_params,
1161
+ truncate_prompt_tokens=truncate_prompt_tokens,
1162
+ pooling_task="encode",
1163
+ )
1164
+
1165
+ def _embedding_score(
1166
+ self,
1167
+ tokenizer: AnyTokenizer,
1168
+ text_1: list[Union[str, TextPrompt, TokensPrompt]],
1169
+ text_2: list[Union[str, TextPrompt, TokensPrompt]],
1170
+ truncate_prompt_tokens: Optional[int] = None,
1171
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1172
+ pooling_params: Optional[PoolingParams] = None,
1173
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1174
+ ) -> list[ScoringRequestOutput]:
1175
+
1176
+ encoded_output: list[PoolingRequestOutput] = self.encode(
1177
+ text_1 + text_2,
1178
+ truncate_prompt_tokens=truncate_prompt_tokens,
1179
+ use_tqdm=use_tqdm,
1180
+ lora_request=lora_request,
1181
+ pooling_params=pooling_params,
1182
+ pooling_task="embed",
1183
+ )
1184
+
1185
+ encoded_output_1: list[PoolingRequestOutput] = encoded_output[
1186
+ 0:len(text_1)]
1187
+ encoded_output_2: list[PoolingRequestOutput] = encoded_output[
1188
+ len(text_1):]
1189
+
1190
+ if len(encoded_output_1) == 1:
1191
+ encoded_output_1 = encoded_output_1 * len(encoded_output_2)
1192
+
1193
+ scores = _cosine_similarity(tokenizer=tokenizer,
1194
+ embed_1=encoded_output_1,
1195
+ embed_2=encoded_output_2)
1196
+
1197
+ items = self.engine_class.validate_outputs(scores,
1198
+ PoolingRequestOutput)
1199
+ return [ScoringRequestOutput.from_base(item) for item in items]
1200
+
1201
+ def _cross_encoding_score(
1202
+ self,
1203
+ tokenizer: AnyTokenizer,
1204
+ data_1: Union[list[str], list[ScoreContentPartParam]],
1205
+ data_2: Union[list[str], list[ScoreContentPartParam]],
1206
+ truncate_prompt_tokens: Optional[int] = None,
1207
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1208
+ pooling_params: Optional[PoolingParams] = None,
1209
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1210
+ ) -> list[ScoringRequestOutput]:
1211
+ model_config = self.llm_engine.model_config
1212
+
1213
+ if isinstance(tokenizer, MistralTokenizer):
1214
+ raise ValueError(
1215
+ "Score API is not supported for Mistral tokenizer")
1216
+
1217
+ if len(data_1) == 1:
1218
+ data_1 = data_1 * len(data_2)
1219
+
1220
+ if pooling_params is None:
1221
+ pooling_params = PoolingParams(task="score")
1222
+
1223
+ model_config = self.llm_engine.model_config
1224
+ pooling_params.verify("score", model_config)
1225
+ pooling_params_list = list[PoolingParams]()
1226
+
1227
+ tokenization_kwargs: dict[str, Any] = {}
1228
+
1229
+ _validate_truncation_size(model_config.max_model_len,
1230
+ truncate_prompt_tokens, tokenization_kwargs)
1231
+
1232
+ prompts = list[PromptType]()
1233
+
1234
+ input_pairs = [(t1, t2) for t1, t2 in zip(data_1, data_2)]
1235
+
1236
+ model_config = self.llm_engine.model_config
1237
+
1238
+ for q, d in input_pairs:
1239
+ _, engine_prompt = get_score_prompt(
1240
+ model_config=model_config,
1241
+ data_1=q,
1242
+ data_2=d,
1243
+ tokenizer=tokenizer,
1244
+ tokenization_kwargs=tokenization_kwargs,
1245
+ )
1246
+
1247
+ if (token_type_ids := engine_prompt.pop("token_type_ids", None)):
1248
+ params = pooling_params.clone()
1249
+ compressed = compress_token_type_ids(token_type_ids)
1250
+ params.extra_kwargs = {"compressed_token_type_ids": compressed}
1251
+ pooling_params_list.append(params)
1252
+ else:
1253
+ pooling_params_list.append(pooling_params)
1254
+
1255
+ prompts.append(engine_prompt)
1256
+
1257
+ self._validate_and_add_requests(
1258
+ prompts=prompts,
1259
+ params=pooling_params_list,
1260
+ use_tqdm=use_tqdm,
1261
+ lora_request=lora_request,
1262
+ )
1263
+
1264
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1265
+ items = self.engine_class.validate_outputs(outputs,
1266
+ PoolingRequestOutput)
1267
+
1268
+ return [ScoringRequestOutput.from_base(item) for item in items]
1269
+
1270
+ def score(
1271
+ self,
1272
+ data_1: Union[SingletonPrompt, Sequence[SingletonPrompt],
1273
+ ScoreMultiModalParam],
1274
+ data_2: Union[SingletonPrompt, Sequence[SingletonPrompt],
1275
+ ScoreMultiModalParam],
1276
+ /,
1277
+ *,
1278
+ truncate_prompt_tokens: Optional[int] = None,
1279
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1280
+ pooling_params: Optional[PoolingParams] = None,
1281
+ lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None,
1282
+ ) -> list[ScoringRequestOutput]:
1283
+ """Generate similarity scores for all pairs `<text,text_pair>` or
1284
+ `<multi-modal data, multi-modal data pair>`.
1285
+
1286
+ The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
1287
+ In the `1 - N` case the `data_1` input will be replicated `N`
1288
+ times to pair with the `data_2` inputs.
1289
+ The input pairs are used to build a list of prompts for the
1290
+ cross encoder model. This class automatically batches the prompts,
1291
+ considering the memory constraint. For the best performance, put all
1292
+ of your inputs into a single list and pass it to this method.
1293
+
1294
+ Supports both text and multi-modal data (images, etc.) when used with
1295
+ appropriate multi-modal models. For multi-modal inputs, ensure the
1296
+ prompt structure matches the model's expected input format.
1297
+
1298
+ Args:
1299
+ data_1: Can be a single prompt, a list of prompts or
1300
+ `ScoreMultiModalParam`, which can contain either text or
1301
+ multi-modal data. When a list, it must have the same length as
1302
+ the `data_2` list.
1303
+ data_2: The data to pair with the query to form the input to
1304
+ the LLM. Can be text or multi-modal data. See [PromptType]
1305
+ [vllm.inputs.PromptType] for more details about the format of
1306
+ each prompt.
1307
+ use_tqdm: If `True`, shows a tqdm progress bar.
1308
+ If a callable (e.g., `functools.partial(tqdm, leave=False)`),
1309
+ it is used to create the progress bar.
1310
+ If `False`, no progress bar is created.
1311
+ lora_request: LoRA request to use for generation, if any.
1312
+ pooling_params: The pooling parameters for pooling. If None, we
1313
+ use the default pooling parameters.
1314
+ Returns:
1315
+ A list of `ScoringRequestOutput` objects containing the
1316
+ generated scores in the same order as the input prompts.
1317
+ """
1318
+ model_config = self.llm_engine.model_config
1319
+ runner_type = model_config.runner_type
1320
+ if runner_type != "pooling":
1321
+ raise ValueError(
1322
+ "LLM.score() is only supported for pooling models. "
1323
+ "Try passing `--runner pooling` to use the model as a "
1324
+ "pooling model.")
1325
+
1326
+ supported_tasks = self.supported_tasks
1327
+ if all(t not in supported_tasks for t in ("embed", "classify")):
1328
+ raise ValueError("Score API is not supported by this model. "
1329
+ "Try converting the model using "
1330
+ "`--convert embed` or `--convert classify`.")
1331
+
1332
+ if (model_config.is_cross_encoder
1333
+ and getattr(model_config.hf_config, "num_labels", 0) != 1):
1334
+ raise ValueError("Score API is only enabled for num_labels == 1.")
1335
+
1336
+ # the tokenizer for models such as
1337
+ # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
1338
+ # lists of tokens to the `text` and `text_pair` kwargs
1339
+ tokenizer = self.get_tokenizer()
1340
+
1341
+ if not model_config.is_multimodal_model:
1342
+
1343
+ def check_data_type(data: Union[SingletonPrompt,
1344
+ Sequence[SingletonPrompt],
1345
+ ScoreMultiModalParam]):
1346
+ if isinstance(data, dict) and "content" in data:
1347
+ raise ValueError("ScoreMultiModalParam is not supported "
1348
+ f"for {model_config.architecture}")
1349
+
1350
+ check_data_type(data_1)
1351
+ check_data_type(data_2)
1352
+
1353
+ def ensure_str(prompt: SingletonPrompt):
1354
+ if isinstance(prompt, dict):
1355
+ if "multi_modal_data" in prompt:
1356
+ raise ValueError("Multi-modal prompt is not "
1357
+ "supported for scoring")
1358
+ elif "prompt_token_ids" in prompt:
1359
+ prompt = tokenizer.decode(
1360
+ cast(TokensPrompt, prompt)["prompt_token_ids"])
1361
+ elif "prompt" in prompt:
1362
+ prompt = cast(TextPrompt, prompt)["prompt"]
1363
+ assert type(prompt) is str
1364
+ return prompt
1365
+
1366
+ if isinstance(data_1, (str, dict)):
1367
+ # Convert a single prompt to a list.
1368
+ data_1 = [data_1] # type: ignore[list-item]
1369
+
1370
+ data_1 = [ensure_str(t) for t in data_1]
1371
+
1372
+ if isinstance(data_2, (str, dict)):
1373
+ # Convert a single prompt to a list.
1374
+ data_2 = [data_2] # type: ignore[list-item]
1375
+
1376
+ data_2 = [ensure_str(t) for t in data_2]
1377
+
1378
+ if isinstance(data_1, dict) and "content" in data_1:
1379
+ data_1 = data_1.get("content") # type: ignore[assignment]
1380
+ elif isinstance(data_1, str):
1381
+ data_1 = [data_1]
1382
+
1383
+ if isinstance(data_2, dict) and "content" in data_2:
1384
+ data_2 = data_2.get("content") # type: ignore[assignment]
1385
+ elif isinstance(data_2, str):
1386
+ data_2 = [data_2]
1387
+
1388
+ _validate_score_input_lens(data_1, data_2) # type: ignore[arg-type]
1389
+
1390
+ if model_config.is_cross_encoder:
1391
+ return self._cross_encoding_score(
1392
+ tokenizer,
1393
+ data_1, # type: ignore[arg-type]
1394
+ data_2, # type: ignore[arg-type]
1395
+ truncate_prompt_tokens,
1396
+ use_tqdm,
1397
+ pooling_params,
1398
+ lora_request)
1399
+ else:
1400
+ return self._embedding_score(
1401
+ tokenizer,
1402
+ data_1, # type: ignore[arg-type]
1403
+ data_2, # type: ignore[arg-type]
1404
+ truncate_prompt_tokens,
1405
+ use_tqdm,
1406
+ pooling_params,
1407
+ lora_request)
1408
+
1409
+ def start_profile(self) -> None:
1410
+ self.llm_engine.start_profile()
1411
+
1412
+ def stop_profile(self) -> None:
1413
+ self.llm_engine.stop_profile()
1414
+
1415
+ def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
1416
+ return self.llm_engine.reset_prefix_cache(device)
1417
+
1418
+ def sleep(self, level: int = 1):
1419
+ """
1420
+ Put the engine to sleep. The engine should not process any requests.
1421
+ The caller should guarantee that no requests are being processed
1422
+ during the sleep period, before `wake_up` is called.
1423
+
1424
+ Args:
1425
+ level: The sleep level. Level 1 sleep will offload the model
1426
+ weights and discard the kv cache. The content of kv cache
1427
+ is forgotten. Level 1 sleep is good for sleeping and waking
1428
+ up the engine to run the same model again. The model weights
1429
+ are backed up in CPU memory. Please make sure there's enough
1430
+ CPU memory to store the model weights. Level 2 sleep will
1431
+ discard both the model weights and the kv cache. The content
1432
+ of both the model weights and kv cache is forgotten. Level 2
1433
+ sleep is good for sleeping and waking up the engine to run a
1434
+ different model or update the model, where previous model
1435
+ weights are not needed. It reduces CPU memory pressure.
1436
+ """
1437
+ self.reset_prefix_cache()
1438
+ self.llm_engine.sleep(level=level)
1439
+
1440
+ def wake_up(self, tags: Optional[list[str]] = None):
1441
+ """
1442
+ Wake up the engine from sleep mode. See the [sleep][vllm.LLM.sleep]
1443
+ method for more details.
1444
+
1445
+ Args:
1446
+ tags: An optional list of tags to reallocate the engine memory
1447
+ for specific memory allocations. Values must be in
1448
+ `("weights", "kv_cache")`. If None, all memory is reallocated.
1449
+ wake_up should be called with all tags (or None) before the
1450
+ engine is used again.
1451
+ """
1452
+ self.llm_engine.wake_up(tags)
1453
+
1454
+ def get_metrics(self) -> list["Metric"]:
1455
+ """Return a snapshot of aggregated metrics from Prometheus.
1456
+
1457
+ Returns:
1458
+ A ``MetricSnapshot`` instance capturing the current state
1459
+ of all aggregated metrics from Prometheus.
1460
+
1461
+ Note:
1462
+ This method is only available with the V1 LLM engine.
1463
+ """
1464
+ return self.llm_engine.get_metrics()
1465
+
1466
+ def _validate_and_add_requests(
1467
+ self,
1468
+ prompts: Union[PromptType, Sequence[PromptType], DataPrompt],
1469
+ params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
1470
+ Sequence[PoolingParams]],
1471
+ *,
1472
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True,
1473
+ lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
1474
+ priority: Optional[list[int]] = None,
1475
+ ) -> None:
1476
+ if isinstance(prompts, (str, dict)):
1477
+ # Convert a single prompt to a list.
1478
+ prompts = [prompts] # type: ignore[list-item]
1479
+
1480
+ num_requests = len(prompts)
1481
+ if isinstance(params, Sequence) and len(params) != num_requests:
1482
+ raise ValueError("The lengths of prompts and params "
1483
+ "must be the same.")
1484
+ if isinstance(lora_request,
1485
+ Sequence) and len(lora_request) != num_requests:
1486
+ raise ValueError("The lengths of prompts and lora_request "
1487
+ "must be the same.")
1488
+
1489
+ for sp in params if isinstance(params, Sequence) else (params, ):
1490
+ if isinstance(sp, SamplingParams):
1491
+ # We only care about the final output
1492
+ sp.output_kind = RequestOutputKind.FINAL_ONLY
1493
+
1494
+ # Add requests to the engine.
1495
+ it = prompts
1496
+ if use_tqdm:
1497
+ tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
1498
+ it = tqdm_func(it, desc="Adding requests")
1499
+
1500
+ model_config = self.llm_engine.model_config
1501
+
1502
+ for i, prompt in enumerate(it):
1503
+
1504
+ if isinstance(prompt, dict):
1505
+ self._validate_mm_data_and_uuids(
1506
+ prompt.get("multi_modal_data"),
1507
+ prompt.get("multi_modal_uuids"))
1508
+
1509
+ param = params[i] if isinstance(params, Sequence) else params
1510
+
1511
+ tokenization_kwargs: dict[str, Any] = {}
1512
+ _validate_truncation_size(model_config.max_model_len,
1513
+ param.truncate_prompt_tokens,
1514
+ tokenization_kwargs)
1515
+
1516
+ self._add_request(
1517
+ prompt,
1518
+ params[i] if isinstance(params, Sequence) else params,
1519
+ tokenization_kwargs=tokenization_kwargs,
1520
+ lora_request=lora_request[i] if isinstance(
1521
+ lora_request, Sequence) else lora_request,
1522
+ priority=priority[i] if priority else 0,
1523
+ )
1524
+
1525
+ def _validate_mm_data_and_uuids(
1526
+ self,
1527
+ multi_modal_data: Optional[Any], # MultiModalDataDict
1528
+ multi_modal_uuids: Optional[Any], # MultiModalUUIDDict
1529
+ ):
1530
+ """
1531
+ Validate that if any multi-modal data is skipped (i.e. None),
1532
+ then its corresponding UUID must be set.
1533
+ """
1534
+ if multi_modal_data is None:
1535
+ return
1536
+
1537
+ for modality, data in multi_modal_data.items():
1538
+ if isinstance(data, list):
1539
+ for i, d in enumerate(data):
1540
+ if d is None:
1541
+ if multi_modal_uuids is None or modality not in multi_modal_uuids or multi_modal_uuids[ # noqa: E501
1542
+ modality] is None:
1543
+ raise ValueError(
1544
+ f"Multi-modal data for {modality} is None "
1545
+ f"but UUID is not provided")
1546
+ else:
1547
+ if len(
1548
+ multi_modal_uuids[modality]
1549
+ ) <= i or multi_modal_uuids[modality][i] is None:
1550
+ raise ValueError(
1551
+ f"Multi-modal data for {modality} is None "
1552
+ f"but UUID is not provided")
1553
+ else:
1554
+ if data is None and (multi_modal_uuids is None
1555
+ or modality not in multi_modal_uuids
1556
+ or multi_modal_uuids[modality] is None):
1557
+ raise ValueError(f"Multi-modal data for {modality} is None"
1558
+ f" but UUID is not provided")
1559
+
1560
+ def _add_request(
1561
+ self,
1562
+ prompt: PromptType,
1563
+ params: Union[SamplingParams, PoolingParams],
1564
+ tokenization_kwargs: Optional[dict[str, Any]] = None,
1565
+ lora_request: Optional[LoRARequest] = None,
1566
+ priority: int = 0,
1567
+ ) -> None:
1568
+ request_id = str(next(self.request_counter))
1569
+ self.llm_engine.add_request(
1570
+ request_id,
1571
+ prompt,
1572
+ params,
1573
+ lora_request=lora_request,
1574
+ tokenization_kwargs=tokenization_kwargs,
1575
+ priority=priority,
1576
+ )
1577
+
1578
+ def _run_engine(
1579
+ self,
1580
+ *,
1581
+ use_tqdm: Union[bool, Callable[..., tqdm]] = True
1582
+ ) -> list[Union[RequestOutput, PoolingRequestOutput]]:
1583
+ # Initialize tqdm.
1584
+ if use_tqdm:
1585
+ num_requests = self.llm_engine.get_num_unfinished_requests()
1586
+ tqdm_func = use_tqdm if callable(use_tqdm) else tqdm
1587
+ pbar = tqdm_func(
1588
+ total=num_requests,
1589
+ desc="Processed prompts",
1590
+ dynamic_ncols=True,
1591
+ postfix=(f"est. speed input: {0:.2f} toks/s, "
1592
+ f"output: {0:.2f} toks/s"),
1593
+ )
1594
+
1595
+ # Run the engine.
1596
+ outputs: list[Union[RequestOutput, PoolingRequestOutput]] = []
1597
+ total_in_toks = 0
1598
+ total_out_toks = 0
1599
+ while self.llm_engine.has_unfinished_requests():
1600
+ step_outputs = self.llm_engine.step()
1601
+ for output in step_outputs:
1602
+ if output.finished:
1603
+ outputs.append(output)
1604
+ if use_tqdm:
1605
+ if isinstance(output, RequestOutput):
1606
+ # Calculate tokens only for RequestOutput
1607
+ n = len(output.outputs)
1608
+ assert output.prompt_token_ids is not None
1609
+ total_in_toks += len(output.prompt_token_ids) * n
1610
+ in_spd = total_in_toks / pbar.format_dict["elapsed"]
1611
+ total_out_toks += sum(
1612
+ len(stp.token_ids) for stp in output.outputs)
1613
+ out_spd = (total_out_toks /
1614
+ pbar.format_dict["elapsed"])
1615
+ pbar.postfix = (
1616
+ f"est. speed input: {in_spd:.2f} toks/s, "
1617
+ f"output: {out_spd:.2f} toks/s")
1618
+ pbar.update(n)
1619
+ else:
1620
+ pbar.update(1)
1621
+ if pbar.n == num_requests:
1622
+ pbar.refresh()
1623
+
1624
+ if use_tqdm:
1625
+ pbar.close()
1626
+ # Sort the outputs by request ID.
1627
+ # This is necessary because some requests may be finished earlier than
1628
+ # its previous requests.
1629
+ return sorted(outputs, key=lambda x: int(x.request_id))