vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
vllm/utils/__init__.py ADDED
@@ -0,0 +1,3566 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from __future__ import annotations
5
+
6
+ import asyncio
7
+ import concurrent
8
+ import contextlib
9
+ import datetime
10
+ import enum
11
+ import gc
12
+ import getpass
13
+ import hashlib
14
+ import importlib
15
+ import importlib.metadata
16
+ import importlib.util
17
+ import inspect
18
+ import ipaddress
19
+ import json
20
+ import multiprocessing
21
+ import os
22
+ import pickle
23
+ import signal
24
+ import socket
25
+ import subprocess
26
+ import sys
27
+ import tempfile
28
+ import textwrap
29
+ import threading
30
+ import time
31
+ import traceback
32
+ import types
33
+ import uuid
34
+ import warnings
35
+ import weakref
36
+ from argparse import (Action, ArgumentDefaultsHelpFormatter, ArgumentParser,
37
+ ArgumentTypeError, RawDescriptionHelpFormatter,
38
+ _ArgumentGroup)
39
+ from asyncio import FIRST_COMPLETED, AbstractEventLoop, Task
40
+ from collections import UserDict, defaultdict
41
+ from collections.abc import (AsyncGenerator, Awaitable, Collection, Generator,
42
+ Hashable, Iterable, Iterator, KeysView, Mapping,
43
+ Sequence)
44
+ from concurrent.futures import ThreadPoolExecutor
45
+ from concurrent.futures.process import ProcessPoolExecutor
46
+ from dataclasses import dataclass, field
47
+ from functools import cache, lru_cache, partial, wraps
48
+ from pathlib import Path
49
+ from types import MappingProxyType
50
+ from typing import (TYPE_CHECKING, Any, Callable, Generic, Literal, NamedTuple,
51
+ Optional, TextIO, TypeVar, Union, cast, overload)
52
+ from urllib.parse import urlparse
53
+ from uuid import uuid4
54
+
55
+ import cachetools
56
+ import cbor2
57
+ import cloudpickle
58
+ import numpy as np
59
+ import numpy.typing as npt
60
+ import psutil
61
+ import regex as re
62
+ import setproctitle
63
+ import torch
64
+ import torch.types
65
+ import yaml
66
+ import zmq
67
+ import zmq.asyncio
68
+ from packaging import version
69
+ from packaging.version import Version
70
+ from torch.library import Library
71
+ from transformers.tokenization_utils_base import BatchEncoding
72
+ from typing_extensions import Never, ParamSpec, TypeIs, assert_never
73
+
74
+ import vllm.envs as envs
75
+ from vllm.logger import enable_trace_function_call, init_logger
76
+ from vllm.ray.lazy_utils import is_in_ray_actor
77
+
78
+ if TYPE_CHECKING:
79
+ from argparse import Namespace
80
+
81
+ from vllm.config import ModelConfig, VllmConfig
82
+ from vllm.sequence import IntermediateTensors
83
+
84
+ logger = init_logger(__name__)
85
+
86
+ # This value is chosen to have a balance between ITL and TTFT. Note it is
87
+ # not optimized for throughput.
88
+ DEFAULT_MAX_NUM_BATCHED_TOKENS = 2048
89
+ POOLING_MODEL_MAX_NUM_BATCHED_TOKENS = 32768
90
+ MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS = 5120
91
+
92
+ # Constants related to forcing the attention backend selection
93
+
94
+ # String name of register which may be set in order to
95
+ # force auto-selection of attention backend by Attention
96
+ # wrapper
97
+ STR_BACKEND_ENV_VAR: str = "VLLM_ATTENTION_BACKEND"
98
+
99
+ # Possible string values of STR_BACKEND_ENV_VAR
100
+ # register, corresponding to possible backends
101
+ STR_FLASHINFER_ATTN_VAL: str = "FLASHINFER"
102
+ STR_TORCH_SDPA_ATTN_VAL: str = "TORCH_SDPA"
103
+ STR_XFORMERS_ATTN_VAL: str = "XFORMERS"
104
+ STR_FLASH_ATTN_VAL: str = "FLASH_ATTN"
105
+ STR_INVALID_VAL: str = "INVALID"
106
+
107
+ MB_bytes = 1_000_000
108
+ """The number of bytes in one megabyte (MB)."""
109
+
110
+ MiB_bytes = 1 << 20
111
+ """The number of bytes in one mebibyte (MiB)."""
112
+
113
+ GB_bytes = 1_000_000_000
114
+ """The number of bytes in one gigabyte (GB)."""
115
+
116
+ GiB_bytes = 1 << 30
117
+ """The number of bytes in one gibibyte (GiB)."""
118
+
119
+ # ANSI color codes
120
+ CYAN = '\033[1;36m'
121
+ RESET = '\033[0;0m'
122
+
123
+ STR_DTYPE_TO_TORCH_DTYPE = {
124
+ "float32": torch.float32,
125
+ "half": torch.half,
126
+ "bfloat16": torch.bfloat16,
127
+ "float": torch.float,
128
+ "fp8": torch.uint8,
129
+ "fp8_e4m3": torch.uint8,
130
+ "fp8_e5m2": torch.uint8,
131
+ "int8": torch.int8,
132
+ "fp8_inc": torch.float8_e4m3fn,
133
+ "fp8_ds_mla": torch.uint8,
134
+ }
135
+
136
+ TORCH_DTYPE_TO_NUMPY_DTYPE = {
137
+ torch.float16: np.float16,
138
+ torch.float32: np.float32,
139
+ torch.float64: np.float64,
140
+ torch.uint8: np.uint8,
141
+ torch.int32: np.int32,
142
+ torch.int64: np.int64,
143
+ }
144
+
145
+
146
+ @contextlib.contextmanager
147
+ def set_default_torch_num_threads(num_threads: int):
148
+ """Sets the default number of threads for PyTorch to the given value."""
149
+ old_num_threads = torch.get_num_threads()
150
+ torch.set_num_threads(num_threads)
151
+ yield
152
+ torch.set_num_threads(old_num_threads)
153
+
154
+
155
+ P = ParamSpec('P')
156
+ T = TypeVar("T")
157
+ U = TypeVar("U")
158
+
159
+ _K = TypeVar("_K", bound=Hashable)
160
+ _V = TypeVar("_V")
161
+ _T = TypeVar("_T")
162
+
163
+
164
+ class _Sentinel:
165
+ ...
166
+
167
+
168
+ ALL_PINNED_SENTINEL = _Sentinel()
169
+
170
+
171
+ class Device(enum.Enum):
172
+ GPU = enum.auto()
173
+ CPU = enum.auto()
174
+
175
+
176
+ class LayerBlockType(enum.Enum):
177
+ attention = "attention"
178
+ mamba = "mamba"
179
+
180
+
181
+ class Counter:
182
+
183
+ def __init__(self, start: int = 0) -> None:
184
+ self.counter = start
185
+
186
+ def __next__(self) -> int:
187
+ i = self.counter
188
+ self.counter += 1
189
+ return i
190
+
191
+ def reset(self) -> None:
192
+ self.counter = 0
193
+
194
+
195
+ class _MappingOrderCacheView(UserDict[_K, _V]):
196
+
197
+ def __init__(self, data: Mapping[_K, _V], ordered_keys: Mapping[_K, None]):
198
+ super().__init__(data)
199
+ self.ordered_keys = ordered_keys
200
+
201
+ def __iter__(self) -> Iterator[_K]:
202
+ return iter(self.ordered_keys)
203
+
204
+ def keys(self) -> KeysView[_K]:
205
+ return KeysView(self.ordered_keys)
206
+
207
+
208
+ class CacheInfo(NamedTuple):
209
+ hits: int
210
+ total: int
211
+
212
+ @property
213
+ def hit_ratio(self) -> float:
214
+ if self.total == 0:
215
+ return 0
216
+
217
+ return self.hits / self.total
218
+
219
+ def __sub__(self, other: CacheInfo):
220
+ return CacheInfo(
221
+ hits=self.hits - other.hits,
222
+ total=self.total - other.total,
223
+ )
224
+
225
+
226
+ class LRUCache(cachetools.LRUCache[_K, _V], Generic[_K, _V]):
227
+
228
+ def __init__(self,
229
+ capacity: float,
230
+ getsizeof: Optional[Callable[[_V], float]] = None):
231
+ super().__init__(capacity, getsizeof)
232
+
233
+ self.pinned_items = set[_K]()
234
+
235
+ self._hits = 0
236
+ self._total = 0
237
+ self._last_info = CacheInfo(hits=0, total=0)
238
+
239
+ def __getitem__(self, key: _K, *, update_info: bool = True) -> _V:
240
+ value = super().__getitem__(key)
241
+
242
+ if update_info:
243
+ self._hits += 1
244
+ self._total += 1
245
+
246
+ return value
247
+
248
+ def __delitem__(self, key: _K) -> None:
249
+ run_on_remove = key in self
250
+ value = self.__getitem__(key,
251
+ update_info=False) # type: ignore[call-arg]
252
+ super().__delitem__(key)
253
+ if key in self.pinned_items:
254
+ # Todo: add warning to inform that del pinned item
255
+ self._unpin(key)
256
+ if run_on_remove:
257
+ self._on_remove(key, value)
258
+
259
+ @property
260
+ def cache(self) -> Mapping[_K, _V]:
261
+ """Return the internal cache dictionary in order (read-only)."""
262
+ return _MappingOrderCacheView(
263
+ self._Cache__data, # type: ignore
264
+ self.order)
265
+
266
+ @property
267
+ def order(self) -> Mapping[_K, None]:
268
+ """Return the internal order dictionary (read-only)."""
269
+ return MappingProxyType(self._LRUCache__order) # type: ignore
270
+
271
+ @property
272
+ def capacity(self) -> float:
273
+ return self.maxsize
274
+
275
+ @property
276
+ def usage(self) -> float:
277
+ if self.maxsize == 0:
278
+ return 0
279
+
280
+ return self.currsize / self.maxsize
281
+
282
+ def stat(self, *, delta: bool = False) -> CacheInfo:
283
+ """
284
+ Gets the cumulative number of hits and queries against this cache.
285
+
286
+ If `delta=True`, instead gets these statistics
287
+ since the last call that also passed `delta=True`.
288
+ """
289
+ info = CacheInfo(hits=self._hits, total=self._total)
290
+
291
+ if delta:
292
+ info_delta = info - self._last_info
293
+ self._last_info = info
294
+ info = info_delta
295
+
296
+ return info
297
+
298
+ def touch(self, key: _K) -> None:
299
+ try:
300
+ self._LRUCache__order.move_to_end(key) # type: ignore
301
+ except KeyError:
302
+ self._LRUCache__order[key] = None # type: ignore
303
+
304
+ @overload
305
+ def get(self, key: _K, /) -> Optional[_V]:
306
+ ...
307
+
308
+ @overload
309
+ def get(self, key: _K, /, default: Union[_V, _T]) -> Union[_V, _T]:
310
+ ...
311
+
312
+ def get(self,
313
+ key: _K,
314
+ /,
315
+ default: Optional[Union[_V,
316
+ _T]] = None) -> Optional[Union[_V, _T]]:
317
+ value: Optional[Union[_V, _T]]
318
+ if key in self:
319
+ value = self.__getitem__(
320
+ key, update_info=False) # type: ignore[call-arg]
321
+
322
+ self._hits += 1
323
+ else:
324
+ value = default
325
+
326
+ self._total += 1
327
+ return value
328
+
329
+ @overload
330
+ def pop(self, key: _K) -> _V:
331
+ ...
332
+
333
+ @overload
334
+ def pop(self, key: _K, default: Union[_V, _T]) -> Union[_V, _T]:
335
+ ...
336
+
337
+ def pop(self,
338
+ key: _K,
339
+ default: Optional[Union[_V,
340
+ _T]] = None) -> Optional[Union[_V, _T]]:
341
+ value: Optional[Union[_V, _T]]
342
+ if key not in self:
343
+ return default
344
+
345
+ value = self.__getitem__(key,
346
+ update_info=False) # type: ignore[call-arg]
347
+ self.__delitem__(key)
348
+ return value
349
+
350
+ def put(self, key: _K, value: _V) -> None:
351
+ self.__setitem__(key, value)
352
+
353
+ def pin(self, key: _K) -> None:
354
+ """
355
+ Pins a key in the cache preventing it from being
356
+ evicted in the LRU order.
357
+ """
358
+ if key not in self:
359
+ raise ValueError(f"Cannot pin key: {key} not in cache.")
360
+ self.pinned_items.add(key)
361
+
362
+ def _unpin(self, key: _K) -> None:
363
+ """
364
+ Unpins a key in the cache allowing it to be
365
+ evicted in the LRU order.
366
+ """
367
+ self.pinned_items.remove(key)
368
+
369
+ def _on_remove(self, key: _K, value: Optional[_V]) -> None:
370
+ pass
371
+
372
+ def remove_oldest(self, *, remove_pinned: bool = False) -> None:
373
+ if len(self) == 0:
374
+ return
375
+
376
+ self.popitem(remove_pinned=remove_pinned)
377
+
378
+ def _remove_old_if_needed(self) -> None:
379
+ while self.currsize > self.capacity:
380
+ self.remove_oldest()
381
+
382
+ def popitem(self, remove_pinned: bool = False):
383
+ """Remove and return the `(key, value)` pair least recently used."""
384
+ if not remove_pinned:
385
+ # pop the oldest item in the cache that is not pinned
386
+ lru_key = next(
387
+ (key for key in self.order if key not in self.pinned_items),
388
+ ALL_PINNED_SENTINEL)
389
+ if lru_key is ALL_PINNED_SENTINEL:
390
+ raise RuntimeError("All items are pinned, "
391
+ "cannot remove oldest from the cache.")
392
+ else:
393
+ lru_key = next(iter(self.order))
394
+ value = self.pop(cast(_K, lru_key))
395
+ return (lru_key, value)
396
+
397
+ def clear(self) -> None:
398
+ while len(self) > 0:
399
+ self.remove_oldest(remove_pinned=True)
400
+
401
+ self._hits = 0
402
+ self._total = 0
403
+ self._last_info = CacheInfo(hits=0, total=0)
404
+
405
+
406
+ class PyObjectCache:
407
+ """Used to cache python objects to avoid object allocations
408
+ across scheduler iterations.
409
+ """
410
+
411
+ def __init__(self, obj_builder):
412
+ self._obj_builder = obj_builder
413
+ self._index = 0
414
+
415
+ self._obj_cache = []
416
+ for _ in range(128):
417
+ self._obj_cache.append(self._obj_builder())
418
+
419
+ def _grow_cache(self):
420
+ # Double the size of the cache
421
+ num_objs = len(self._obj_cache)
422
+ for _ in range(num_objs):
423
+ self._obj_cache.append(self._obj_builder())
424
+
425
+ def get_object(self):
426
+ """Returns a pre-allocated cached object. If there is not enough
427
+ objects, then the cache size will double.
428
+ """
429
+ if self._index >= len(self._obj_cache):
430
+ self._grow_cache()
431
+ assert self._index < len(self._obj_cache)
432
+
433
+ obj = self._obj_cache[self._index]
434
+ self._index += 1
435
+
436
+ return obj
437
+
438
+ def reset(self):
439
+ """Makes all cached-objects available for the next scheduler iteration.
440
+ """
441
+ self._index = 0
442
+
443
+
444
+ @cache
445
+ def get_max_shared_memory_bytes(gpu: int = 0) -> int:
446
+ """Returns the maximum shared memory per thread block in bytes."""
447
+ from vllm import _custom_ops as ops
448
+ max_shared_mem = (
449
+ ops.get_max_shared_memory_per_block_device_attribute(gpu))
450
+ # value 0 will cause MAX_SEQ_LEN become negative and test_attention.py
451
+ # will fail
452
+ assert max_shared_mem > 0, "max_shared_mem can not be zero"
453
+ return int(max_shared_mem)
454
+
455
+
456
+ def get_cpu_memory() -> int:
457
+ """Returns the total CPU memory of the node in bytes."""
458
+ return psutil.virtual_memory().total
459
+
460
+
461
+ def random_uuid() -> str:
462
+ return str(uuid.uuid4().hex)
463
+
464
+
465
+ class AsyncMicrobatchTokenizer:
466
+ """Asynchronous tokenizer with micro-batching.
467
+
468
+ Pulls pending encode/decode requests from a queue and batches them
469
+ up to reduce overhead. A single-thread ThreadPoolExecutor is used
470
+ so the event loop stays responsive.
471
+ """
472
+
473
+ def __init__(
474
+ self,
475
+ tokenizer,
476
+ max_batch_size: int = 32,
477
+ batch_wait_timeout_s: float = 0.002,
478
+ ) -> None:
479
+ self.tokenizer = tokenizer
480
+ self.max_batch_size = max_batch_size
481
+ self.batch_wait_timeout_s = batch_wait_timeout_s
482
+
483
+ self._loop = asyncio.get_running_loop()
484
+ self._queues: dict[tuple,
485
+ asyncio.Queue[Union[tuple[str, dict,
486
+ asyncio.Future],
487
+ tuple[list[int],
488
+ asyncio.Future]]]] = {}
489
+ self._batcher_tasks: list[asyncio.Task] = []
490
+
491
+ # Single-thread executor for blocking tokenizer calls.
492
+ self._executor = ThreadPoolExecutor(max_workers=1)
493
+
494
+ # === Public async API ===
495
+ async def __call__(self, prompt, **kwargs):
496
+ result_future: asyncio.Future = self._loop.create_future()
497
+ key = self._queue_key("encode", kwargs)
498
+ queue = self._get_queue(self._loop, key)
499
+ await queue.put((prompt, kwargs, result_future))
500
+ return await result_future
501
+
502
+ async def decode(self, token_ids, **kwargs):
503
+ result_future: asyncio.Future = self._loop.create_future()
504
+ key = self._queue_key("decode", kwargs)
505
+ queue = self._get_queue(self._loop, key)
506
+ await queue.put((token_ids, result_future))
507
+ return await result_future
508
+
509
+ # === Internal helpers ===
510
+ def _get_queue(
511
+ self, loop: asyncio.AbstractEventLoop, key: tuple
512
+ ) -> asyncio.Queue[Union[tuple[str, dict, asyncio.Future], tuple[
513
+ list[int], asyncio.Future]]]:
514
+ """Get the request queue for the given operation key, creating a new
515
+ queue and batcher task if needed."""
516
+ queue = self._queues.get(key)
517
+ if queue is None:
518
+ self._queues[key] = queue = asyncio.Queue()
519
+ if key[0] == "encode":
520
+ can_batch = key[1] != "other"
521
+ coro = self._batch_encode_loop(queue, can_batch)
522
+ else:
523
+ assert key[0] == "decode", \
524
+ f"Unknown operation type: {key[0]}."
525
+ coro = self._batch_decode_loop(queue)
526
+ self._batcher_tasks.append(loop.create_task(coro))
527
+ return queue
528
+
529
+ async def _batch_encode_loop(self, queue: asyncio.Queue, can_batch: bool):
530
+ """Batch incoming encode requests for efficiency."""
531
+ while True:
532
+ prompt, kwargs, result_future = await queue.get()
533
+ prompts = [prompt]
534
+ kwargs_list = [kwargs]
535
+ result_futures = [result_future]
536
+ deadline = self._loop.time() + self.batch_wait_timeout_s
537
+
538
+ while len(prompts) < self.max_batch_size:
539
+ timeout = deadline - self._loop.time()
540
+ if timeout <= 0:
541
+ break
542
+ try:
543
+ prompt, kwargs, result_future = await asyncio.wait_for(
544
+ queue.get(), timeout)
545
+ prompts.append(prompt)
546
+ result_futures.append(result_future)
547
+ if not can_batch:
548
+ kwargs_list.append(kwargs)
549
+ except asyncio.TimeoutError:
550
+ break
551
+
552
+ try:
553
+ # If every request uses identical kwargs we can run a single
554
+ # batched tokenizer call for a big speed-up.
555
+ if can_batch and len(prompts) > 1:
556
+ batch_encode_fn = partial(self.tokenizer, prompts,
557
+ **kwargs)
558
+ results = await self._loop.run_in_executor(
559
+ self._executor, batch_encode_fn)
560
+
561
+ for i, fut in enumerate(result_futures):
562
+ if not fut.done():
563
+ data = {k: v[i] for k, v in results.items()}
564
+ fut.set_result(BatchEncoding(data))
565
+ else:
566
+ encode_fn = lambda prompts=prompts, kwargs=kwargs_list: [
567
+ self.tokenizer(p, **kw)
568
+ for p, kw in zip(prompts, kwargs)
569
+ ]
570
+ results = await self._loop.run_in_executor(
571
+ self._executor, encode_fn)
572
+
573
+ for fut, res in zip(result_futures, results):
574
+ if not fut.done():
575
+ fut.set_result(res)
576
+ except Exception as e:
577
+ for fut in result_futures:
578
+ if not fut.done():
579
+ fut.set_exception(e)
580
+
581
+ async def _batch_decode_loop(self, queue: asyncio.Queue):
582
+ """Batch incoming decode requests for efficiency."""
583
+ while True:
584
+ token_ids, result_future = await queue.get()
585
+ token_ids_list = [token_ids]
586
+ result_futures = [result_future]
587
+ deadline = self._loop.time() + self.batch_wait_timeout_s
588
+
589
+ while len(token_ids_list) < self.max_batch_size:
590
+ timeout = deadline - self._loop.time()
591
+ if timeout <= 0:
592
+ break
593
+ try:
594
+ token_ids, result_future = await asyncio.wait_for(
595
+ queue.get(), timeout)
596
+ token_ids_list.append(token_ids)
597
+ result_futures.append(result_future)
598
+ except asyncio.TimeoutError:
599
+ break
600
+
601
+ try:
602
+ # Perform a single batched decode call for all requests
603
+ results = await self._loop.run_in_executor(
604
+ self._executor, self.tokenizer.batch_decode,
605
+ token_ids_list)
606
+ for fut, res in zip(result_futures, results):
607
+ if not fut.done():
608
+ fut.set_result(res)
609
+ except Exception as e:
610
+ for fut in result_futures:
611
+ if not fut.done():
612
+ fut.set_exception(e)
613
+
614
+ def _queue_key(self, op: str, kwargs: dict) -> tuple:
615
+ """
616
+ Return a normalized key describing operation + kwargs.
617
+
618
+ - `add_special_tokens`: {True/False}
619
+ - `truncation`: {True/False}
620
+ - If `truncation` is False (`max_length` is None),
621
+ returns a key for a can_batch queue.
622
+ - If `truncation` is True and `max_length` is None or equals
623
+ `tokenizer.model_max_length`, returns a key for a can_batch queue.
624
+ - Otherwise, returns a key for a cannot_batch queue.
625
+
626
+ Examples:
627
+ - Decode: ("decode",)
628
+ - Encode typical:
629
+ ("encode", add_special_tokens, bool_truncation, max_length_label)
630
+ - Fallback: ("encode", "other")
631
+ """
632
+
633
+ if op == "decode":
634
+ return ("decode", )
635
+
636
+ add_special_tokens = kwargs.get("add_special_tokens", True)
637
+ truncation = kwargs.get("truncation", False)
638
+ max_length = kwargs.get("max_length")
639
+
640
+ if not truncation:
641
+ return "encode", add_special_tokens, False, None
642
+
643
+ model_max = getattr(self.tokenizer, "model_max_length", None)
644
+ if max_length is None or (model_max is not None
645
+ and max_length == model_max):
646
+ return "encode", add_special_tokens, True, "model_max"
647
+
648
+ return "encode", "other"
649
+
650
+ def __del__(self):
651
+ if ((tasks := getattr(self, "_batcher_tasks", None))
652
+ and (loop := getattr(self, "_loop", None))
653
+ and not loop.is_closed()):
654
+
655
+ def cancel_tasks():
656
+ for task in tasks:
657
+ task.cancel()
658
+
659
+ loop.call_soon_threadsafe(cancel_tasks)
660
+
661
+
662
+ def cancel_task_threadsafe(task: Task):
663
+ if task and not task.done():
664
+ run_in_loop(task.get_loop(), task.cancel)
665
+
666
+
667
+ def close_sockets(sockets: Sequence[Union[zmq.Socket, zmq.asyncio.Socket]]):
668
+ for sock in sockets:
669
+ if sock is not None:
670
+ sock.close(linger=0)
671
+
672
+
673
+ def run_in_loop(loop: AbstractEventLoop, function: Callable, *args):
674
+ if in_loop(loop):
675
+ function(*args)
676
+ elif not loop.is_closed():
677
+ loop.call_soon_threadsafe(function, *args)
678
+
679
+
680
+ def in_loop(event_loop: AbstractEventLoop) -> bool:
681
+ try:
682
+ return asyncio.get_running_loop() == event_loop
683
+ except RuntimeError:
684
+ return False
685
+
686
+
687
+ def make_async(
688
+ func: Callable[P, T],
689
+ executor: Optional[concurrent.futures.Executor] = None
690
+ ) -> Callable[P, Awaitable[T]]:
691
+ """Take a blocking function, and run it on in an executor thread.
692
+
693
+ This function prevents the blocking function from blocking the
694
+ asyncio event loop.
695
+ The code in this function needs to be thread safe.
696
+ """
697
+
698
+ def _async_wrapper(*args: P.args, **kwargs: P.kwargs) -> asyncio.Future:
699
+ loop = asyncio.get_event_loop()
700
+ p_func = partial(func, *args, **kwargs)
701
+ return loop.run_in_executor(executor=executor, func=p_func)
702
+
703
+ return _async_wrapper
704
+
705
+
706
+ def _next_task(iterator: AsyncGenerator[T, None],
707
+ loop: AbstractEventLoop) -> Task:
708
+ # Can use anext() in python >= 3.10
709
+ return loop.create_task(iterator.__anext__()) # type: ignore[arg-type]
710
+
711
+
712
+ async def merge_async_iterators(
713
+ *iterators: AsyncGenerator[T,
714
+ None], ) -> AsyncGenerator[tuple[int, T], None]:
715
+ """Merge multiple asynchronous iterators into a single iterator.
716
+
717
+ This method handle the case where some iterators finish before others.
718
+ When it yields, it yields a tuple (i, item) where i is the index of the
719
+ iterator that yields the item.
720
+ """
721
+ if len(iterators) == 1:
722
+ # Fast-path single iterator case.
723
+ async for item in iterators[0]:
724
+ yield 0, item
725
+ return
726
+
727
+ loop = asyncio.get_running_loop()
728
+
729
+ awaits = {_next_task(pair[1], loop): pair for pair in enumerate(iterators)}
730
+ try:
731
+ while awaits:
732
+ done, _ = await asyncio.wait(awaits.keys(),
733
+ return_when=FIRST_COMPLETED)
734
+ for d in done:
735
+ pair = awaits.pop(d)
736
+ try:
737
+ item = await d
738
+ i, it = pair
739
+ awaits[_next_task(it, loop)] = pair
740
+ yield i, item
741
+ except StopAsyncIteration:
742
+ pass
743
+ finally:
744
+ # Cancel any remaining iterators
745
+ for f, (_, it) in awaits.items():
746
+ with contextlib.suppress(BaseException):
747
+ f.cancel()
748
+ await it.aclose()
749
+
750
+
751
+ async def collect_from_async_generator(
752
+ iterator: AsyncGenerator[T, None]) -> list[T]:
753
+ """Collect all items from an async generator into a list."""
754
+ items = []
755
+ async for item in iterator:
756
+ items.append(item)
757
+ return items
758
+
759
+
760
+ def get_ip() -> str:
761
+ host_ip = envs.VLLM_HOST_IP
762
+ if "HOST_IP" in os.environ and "VLLM_HOST_IP" not in os.environ:
763
+ logger.warning(
764
+ "The environment variable HOST_IP is deprecated and ignored, as"
765
+ " it is often used by Docker and other software to"
766
+ " interact with the container's network stack. Please "
767
+ "use VLLM_HOST_IP instead to set the IP address for vLLM processes"
768
+ " to communicate with each other.")
769
+ if host_ip:
770
+ return host_ip
771
+
772
+ # IP is not set, try to get it from the network interface
773
+
774
+ # try ipv4
775
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
776
+ try:
777
+ s.connect(("8.8.8.8", 80)) # Doesn't need to be reachable
778
+ return s.getsockname()[0]
779
+ except Exception:
780
+ pass
781
+
782
+ # try ipv6
783
+ try:
784
+ s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
785
+ # Google's public DNS server, see
786
+ # https://developers.google.com/speed/public-dns/docs/using#addresses
787
+ s.connect(("2001:4860:4860::8888", 80)) # Doesn't need to be reachable
788
+ return s.getsockname()[0]
789
+ except Exception:
790
+ pass
791
+
792
+ warnings.warn(
793
+ "Failed to get the IP address, using 0.0.0.0 by default."
794
+ "The value can be set by the environment variable"
795
+ " VLLM_HOST_IP or HOST_IP.",
796
+ stacklevel=2)
797
+ return "0.0.0.0"
798
+
799
+
800
+ def test_loopback_bind(address, family):
801
+ try:
802
+ s = socket.socket(family, socket.SOCK_DGRAM)
803
+ s.bind((address, 0)) # Port 0 = auto assign
804
+ s.close()
805
+ return True
806
+ except OSError:
807
+ return False
808
+
809
+
810
+ def get_loopback_ip() -> str:
811
+ loopback_ip = envs.VLLM_LOOPBACK_IP
812
+ if loopback_ip:
813
+ return loopback_ip
814
+
815
+ # VLLM_LOOPBACK_IP is not set, try to get it based on network interface
816
+
817
+ if test_loopback_bind("127.0.0.1", socket.AF_INET):
818
+ return "127.0.0.1"
819
+ elif test_loopback_bind("::1", socket.AF_INET6):
820
+ return "::1"
821
+ else:
822
+ raise RuntimeError(
823
+ "Neither 127.0.0.1 nor ::1 are bound to a local interface. "
824
+ "Set the VLLM_LOOPBACK_IP environment variable explicitly.")
825
+
826
+
827
+ def is_valid_ipv6_address(address: str) -> bool:
828
+ try:
829
+ ipaddress.IPv6Address(address)
830
+ return True
831
+ except ValueError:
832
+ return False
833
+
834
+
835
+ def split_host_port(host_port: str) -> tuple[str, int]:
836
+ # ipv6
837
+ if host_port.startswith('['):
838
+ host, port = host_port.rsplit(']', 1)
839
+ host = host[1:]
840
+ port = port.split(':')[1]
841
+ return host, int(port)
842
+ else:
843
+ host, port = host_port.split(':')
844
+ return host, int(port)
845
+
846
+
847
+ def join_host_port(host: str, port: int) -> str:
848
+ if is_valid_ipv6_address(host):
849
+ return f"[{host}]:{port}"
850
+ else:
851
+ return f"{host}:{port}"
852
+
853
+
854
+ def get_distributed_init_method(ip: str, port: int) -> str:
855
+ return get_tcp_uri(ip, port)
856
+
857
+
858
+ def get_tcp_uri(ip: str, port: int) -> str:
859
+ if is_valid_ipv6_address(ip):
860
+ return f"tcp://[{ip}]:{port}"
861
+ else:
862
+ return f"tcp://{ip}:{port}"
863
+
864
+
865
+ def get_open_zmq_ipc_path() -> str:
866
+ base_rpc_path = envs.VLLM_RPC_BASE_PATH
867
+ return f"ipc://{base_rpc_path}/{uuid4()}"
868
+
869
+
870
+ def get_open_zmq_inproc_path() -> str:
871
+ return f"inproc://{uuid4()}"
872
+
873
+
874
+ def get_open_port() -> int:
875
+ """
876
+ Get an open port for the vLLM process to listen on.
877
+ An edge case to handle, is when we run data parallel,
878
+ we need to avoid ports that are potentially used by
879
+ the data parallel master process.
880
+ Right now we reserve 10 ports for the data parallel master
881
+ process. Currently it uses 2 ports.
882
+ """
883
+ if "VLLM_DP_MASTER_PORT" in os.environ:
884
+ dp_master_port = envs.VLLM_DP_MASTER_PORT
885
+ reserved_port_range = range(dp_master_port, dp_master_port + 10)
886
+ while True:
887
+ candidate_port = _get_open_port()
888
+ if candidate_port not in reserved_port_range:
889
+ return candidate_port
890
+ return _get_open_port()
891
+
892
+
893
+ def get_open_ports_list(count: int = 5) -> list[int]:
894
+ """Get a list of open ports."""
895
+ ports = set[int]()
896
+ while len(ports) < count:
897
+ ports.add(get_open_port())
898
+ return list(ports)
899
+
900
+
901
+ def _get_open_port() -> int:
902
+ port = envs.VLLM_PORT
903
+ if port is not None:
904
+ while True:
905
+ try:
906
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
907
+ s.bind(("", port))
908
+ return port
909
+ except OSError:
910
+ port += 1 # Increment port number if already in use
911
+ logger.info("Port %d is already in use, trying port %d",
912
+ port - 1, port)
913
+ # try ipv4
914
+ try:
915
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
916
+ s.bind(("", 0))
917
+ return s.getsockname()[1]
918
+ except OSError:
919
+ # try ipv6
920
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
921
+ s.bind(("", 0))
922
+ return s.getsockname()[1]
923
+
924
+
925
+ def find_process_using_port(port: int) -> Optional[psutil.Process]:
926
+ # TODO: We can not check for running processes with network
927
+ # port on macOS. Therefore, we can not have a full graceful shutdown
928
+ # of vLLM. For now, let's not look for processes in this case.
929
+ # Ref: https://www.florianreinhard.de/accessdenied-in-psutil/
930
+ if sys.platform.startswith("darwin"):
931
+ return None
932
+
933
+ our_pid = os.getpid()
934
+ for conn in psutil.net_connections():
935
+ if conn.laddr.port == port and (conn.pid is not None
936
+ and conn.pid != our_pid):
937
+ try:
938
+ return psutil.Process(conn.pid)
939
+ except psutil.NoSuchProcess:
940
+ return None
941
+ return None
942
+
943
+
944
+ def update_environment_variables(envs: dict[str, str]):
945
+ for k, v in envs.items():
946
+ if k in os.environ and os.environ[k] != v:
947
+ logger.warning(
948
+ "Overwriting environment variable %s "
949
+ "from '%s' to '%s'", k, os.environ[k], v)
950
+ os.environ[k] = v
951
+
952
+
953
+ def chunk_list(lst: list[T], chunk_size: int):
954
+ """Yield successive chunk_size chunks from lst."""
955
+ for i in range(0, len(lst), chunk_size):
956
+ yield lst[i:i + chunk_size]
957
+
958
+
959
+ def cdiv(a: int, b: int) -> int:
960
+ """Ceiling division."""
961
+ return -(a // -b)
962
+
963
+
964
+ def next_power_of_2(n) -> int:
965
+ """The next power of 2 (inclusive)"""
966
+ if n < 1:
967
+ return 1
968
+ return 1 << (n - 1).bit_length()
969
+
970
+
971
+ def prev_power_of_2(n: int) -> int:
972
+ """The previous power of 2 (inclusive)"""
973
+ if n <= 0:
974
+ return 0
975
+ return 1 << (n.bit_length() - 1)
976
+
977
+
978
+ def round_up(x: int, y: int) -> int:
979
+ return ((x + y - 1) // y) * y
980
+
981
+
982
+ def round_down(x: int, y: int) -> int:
983
+ return (x // y) * y
984
+
985
+
986
+ def _generate_random_fp8(
987
+ tensor: torch.Tensor,
988
+ low: float,
989
+ high: float,
990
+ ) -> None:
991
+ # NOTE(zhaoyang): Due to NaN and Inf representation for fp8 data type,
992
+ # it may occur Inf or NaN if we directly use torch.randint
993
+ # to generate random data for fp8 data.
994
+ # For example, s.11111.00 in fp8e5m2 format represents Inf.
995
+ # | E4M3 | E5M2
996
+ # -----|-------------|-------------------
997
+ # Inf | N/A | s.11111.00
998
+ # NaN | s.1111.111 | s.11111.{01,10,11}
999
+ from vllm import _custom_ops as ops
1000
+ tensor_tmp = torch.empty_like(tensor, dtype=torch.float16)
1001
+ tensor_tmp.uniform_(low, high)
1002
+ ops.convert_fp8(tensor, tensor_tmp)
1003
+ del tensor_tmp
1004
+
1005
+
1006
+ def get_kv_cache_torch_dtype(
1007
+ cache_dtype: Optional[Union[str, torch.dtype]],
1008
+ model_dtype: Optional[Union[str, torch.dtype]] = None) -> torch.dtype:
1009
+ if isinstance(cache_dtype, str):
1010
+ if cache_dtype == "auto":
1011
+ if isinstance(model_dtype,
1012
+ str) and model_dtype in STR_DTYPE_TO_TORCH_DTYPE:
1013
+ torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[model_dtype]
1014
+ elif isinstance(model_dtype, torch.dtype):
1015
+ torch_dtype = model_dtype
1016
+ else:
1017
+ raise ValueError(f"Invalid model dtype: {model_dtype}")
1018
+ elif cache_dtype in STR_DTYPE_TO_TORCH_DTYPE:
1019
+ torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype]
1020
+ else:
1021
+ raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
1022
+ elif isinstance(cache_dtype, torch.dtype):
1023
+ torch_dtype = cache_dtype
1024
+ else:
1025
+ raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
1026
+ return torch_dtype
1027
+
1028
+
1029
+ def create_kv_caches_with_random_flash(
1030
+ num_blocks: int,
1031
+ block_size: int,
1032
+ num_layers: int,
1033
+ num_heads: int,
1034
+ head_size: int,
1035
+ cache_dtype: Optional[Union[str, torch.dtype]],
1036
+ model_dtype: Optional[Union[str, torch.dtype]] = None,
1037
+ seed: Optional[int] = None,
1038
+ device: Optional[str] = "cuda",
1039
+ cache_layout: Optional[str] = "NHD",
1040
+ ) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
1041
+ from vllm.platforms import current_platform
1042
+ current_platform.seed_everything(seed)
1043
+
1044
+ torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype)
1045
+ generic_kv_cache_shape = (num_blocks, 2, block_size, num_heads, head_size)
1046
+ assert cache_layout in ("NHD", "HND")
1047
+ stride_order = (0, 1, 2, 3, 4) if cache_layout == "NHD" else (0, 1, 3, 2,
1048
+ 4)
1049
+
1050
+ kv_cache_allocation_shape = tuple(generic_kv_cache_shape[i]
1051
+ for i in stride_order)
1052
+ scale = head_size**-0.5
1053
+
1054
+ key_caches: list[torch.Tensor] = []
1055
+ value_caches: list[torch.Tensor] = []
1056
+
1057
+ for _ in range(num_layers):
1058
+ key_value_cache = torch.empty(size=kv_cache_allocation_shape,
1059
+ dtype=torch_dtype,
1060
+ device=device).permute(*stride_order)
1061
+ if cache_dtype in ["auto", "half", "bfloat16", "float"]:
1062
+ key_value_cache.uniform_(-scale, scale)
1063
+ elif cache_dtype == 'fp8':
1064
+ _generate_random_fp8(key_value_cache, -scale, scale)
1065
+ else:
1066
+ raise ValueError(
1067
+ f"Does not support key cache of type {cache_dtype}")
1068
+ key_caches.append(key_value_cache[:, 0])
1069
+ value_caches.append(key_value_cache[:, 1])
1070
+ return key_caches, value_caches
1071
+
1072
+
1073
+ def create_kv_caches_with_random(
1074
+ num_blocks: int,
1075
+ block_size: int,
1076
+ num_layers: int,
1077
+ num_heads: int,
1078
+ head_size: int,
1079
+ cache_dtype: Optional[Union[str, torch.dtype]],
1080
+ model_dtype: Optional[Union[str, torch.dtype]] = None,
1081
+ seed: Optional[int] = None,
1082
+ device: Optional[str] = "cuda",
1083
+ ) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
1084
+ if cache_dtype == "fp8" and head_size % 16:
1085
+ raise ValueError(
1086
+ f"Does not support key cache of type fp8 with head_size {head_size}"
1087
+ )
1088
+ from vllm.platforms import current_platform
1089
+ current_platform.seed_everything(seed)
1090
+
1091
+ torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype)
1092
+
1093
+ scale = head_size**-0.5
1094
+ x = 16 // torch.tensor([], dtype=torch_dtype).element_size()
1095
+ key_cache_shape = (num_blocks, num_heads, head_size // x, block_size, x)
1096
+ key_caches: list[torch.Tensor] = []
1097
+ for _ in range(num_layers):
1098
+ key_cache = torch.empty(size=key_cache_shape,
1099
+ dtype=torch_dtype,
1100
+ device=device)
1101
+ if cache_dtype in ["auto", "half", "bfloat16", "float"]:
1102
+ key_cache.uniform_(-scale, scale)
1103
+ elif cache_dtype == 'fp8':
1104
+ _generate_random_fp8(key_cache, -scale, scale)
1105
+ else:
1106
+ raise ValueError(
1107
+ f"Does not support key cache of type {cache_dtype}")
1108
+ key_caches.append(key_cache)
1109
+
1110
+ value_cache_shape = (num_blocks, num_heads, head_size, block_size)
1111
+ value_caches: list[torch.Tensor] = []
1112
+ for _ in range(num_layers):
1113
+ value_cache = torch.empty(size=value_cache_shape,
1114
+ dtype=torch_dtype,
1115
+ device=device)
1116
+ if cache_dtype in ["auto", "half", "bfloat16", "float"]:
1117
+ value_cache.uniform_(-scale, scale)
1118
+ elif cache_dtype == 'fp8':
1119
+ _generate_random_fp8(value_cache, -scale, scale)
1120
+ else:
1121
+ raise ValueError(
1122
+ f"Does not support value cache of type {cache_dtype}")
1123
+ value_caches.append(value_cache)
1124
+ return key_caches, value_caches
1125
+
1126
+
1127
+ @cache
1128
+ def is_pin_memory_available() -> bool:
1129
+ from vllm.platforms import current_platform
1130
+ return current_platform.is_pin_memory_available()
1131
+
1132
+
1133
+ @cache
1134
+ def is_uva_available() -> bool:
1135
+ """Check if Unified Virtual Addressing (UVA) is available."""
1136
+ # UVA requires pinned memory.
1137
+ # TODO: Add more requirements for UVA if needed.
1138
+ return is_pin_memory_available()
1139
+
1140
+
1141
+ class DeviceMemoryProfiler:
1142
+
1143
+ def __init__(self, device: Optional[torch.types.Device] = None):
1144
+ self.device = device
1145
+
1146
+ def current_memory_usage(self) -> float:
1147
+ # Return the memory usage in bytes.
1148
+ from vllm.platforms import current_platform
1149
+ gc.collect()
1150
+ return current_platform.get_current_memory_usage(self.device)
1151
+
1152
+ def __enter__(self):
1153
+ self.initial_memory = self.current_memory_usage()
1154
+ # This allows us to call methods of the context manager if needed
1155
+ return self
1156
+
1157
+ def __exit__(self, exc_type, exc_val, exc_tb):
1158
+ self.final_memory = self.current_memory_usage()
1159
+ self.consumed_memory = self.final_memory - self.initial_memory
1160
+
1161
+ # Force garbage collection
1162
+ gc.collect()
1163
+
1164
+
1165
+ def make_ndarray_with_pad(
1166
+ x: list[list[T]],
1167
+ pad: T,
1168
+ dtype: npt.DTypeLike,
1169
+ *,
1170
+ max_len: Optional[int] = None,
1171
+ ) -> npt.NDArray:
1172
+ """
1173
+ Make a padded array from 2D inputs.
1174
+
1175
+ The padding is applied to the end of each inner list until it reaches
1176
+ `max_len`.
1177
+ """
1178
+ if max_len is None:
1179
+ # Unlike for most functions, map is faster than a genexpr over `len`
1180
+ max_len = max(map(len, x), default=0)
1181
+
1182
+ padded_x = np.full((len(x), max_len), pad, dtype=dtype)
1183
+ for ind, blocktb in enumerate(x):
1184
+ assert len(blocktb) <= max_len
1185
+ padded_x[ind, :len(blocktb)] = blocktb
1186
+
1187
+ return padded_x
1188
+
1189
+
1190
+ def make_tensor_with_pad(
1191
+ x: list[list[T]],
1192
+ pad: T,
1193
+ dtype: torch.dtype,
1194
+ *,
1195
+ max_len: Optional[int] = None,
1196
+ device: Optional[Union[str, torch.device]] = None,
1197
+ pin_memory: bool = False,
1198
+ ) -> torch.Tensor:
1199
+ """
1200
+ Make a padded tensor from 2D inputs.
1201
+
1202
+ The padding is applied to the end of each inner list until it reaches
1203
+ `max_len`.
1204
+ """
1205
+ np_dtype = TORCH_DTYPE_TO_NUMPY_DTYPE[dtype]
1206
+ padded_x = make_ndarray_with_pad(x, pad, np_dtype, max_len=max_len)
1207
+
1208
+ tensor = torch.from_numpy(padded_x).to(device)
1209
+ if pin_memory:
1210
+ tensor = tensor.pin_memory()
1211
+
1212
+ return tensor
1213
+
1214
+
1215
+ def async_tensor_h2d(
1216
+ data: list,
1217
+ dtype: torch.dtype,
1218
+ target_device: Union[str, torch.device],
1219
+ pin_memory: bool,
1220
+ ) -> torch.Tensor:
1221
+ """Asynchronously create a tensor and copy it from host to device."""
1222
+ t = torch.tensor(data, dtype=dtype, pin_memory=pin_memory, device="cpu")
1223
+ return t.to(device=target_device, non_blocking=True)
1224
+
1225
+
1226
+ def get_dtype_size(dtype: torch.dtype) -> int:
1227
+ """Get the size of the data type in bytes."""
1228
+ return torch.tensor([], dtype=dtype).element_size()
1229
+
1230
+
1231
+ # bool = 0, int = 1, float = 2, complex = 3
1232
+ def _get_precision_level(dtype: torch.dtype) -> int:
1233
+ # NOTE: Complex dtypes return `is_floating_point=False`
1234
+ return ((dtype != torch.bool) + dtype.is_floating_point +
1235
+ dtype.is_complex * 2)
1236
+
1237
+
1238
+ def is_lossless_cast(src_dtype: torch.dtype, tgt_dtype: torch.dtype):
1239
+ """
1240
+ Test whether it is lossless to cast a tensor from
1241
+ `src_dtype` to `tgt_dtype`.
1242
+ """
1243
+ if src_dtype == tgt_dtype:
1244
+ return True
1245
+
1246
+ src_level = _get_precision_level(src_dtype)
1247
+ tgt_level = _get_precision_level(tgt_dtype)
1248
+
1249
+ if src_level < tgt_level:
1250
+ return True
1251
+ if src_level > tgt_level:
1252
+ return False
1253
+
1254
+ # Compare integral types
1255
+ if not src_dtype.is_floating_point and not src_dtype.is_complex:
1256
+ src_info = torch.iinfo(src_dtype)
1257
+ tgt_info = torch.iinfo(tgt_dtype)
1258
+ return src_info.min >= tgt_info.min and src_info.max <= tgt_info.max
1259
+
1260
+ # Compare floating-point types
1261
+ src_info = torch.finfo(src_dtype)
1262
+ tgt_info = torch.finfo(tgt_dtype)
1263
+ return (src_info.min >= tgt_info.min and src_info.max <= tgt_info.max
1264
+ and src_info.resolution >= tgt_info.resolution)
1265
+
1266
+
1267
+ def common_broadcastable_dtype(dtypes: Collection[torch.dtype]):
1268
+ """
1269
+ Get the common `dtype` where all of the other `dtypes` can be
1270
+ cast to it without losing any information.
1271
+ """
1272
+ return max(
1273
+ dtypes,
1274
+ key=lambda dtype: sum(is_lossless_cast(dt, dtype) for dt in dtypes),
1275
+ )
1276
+
1277
+
1278
+ def as_list(maybe_list: Iterable[T]) -> list[T]:
1279
+ """Convert iterable to list, unless it's already a list."""
1280
+ return maybe_list if isinstance(maybe_list, list) else list(maybe_list)
1281
+
1282
+
1283
+ def as_iter(obj: Union[T, Iterable[T]]) -> Iterable[T]:
1284
+ if isinstance(obj, str) or not isinstance(obj, Iterable):
1285
+ return [obj] # type: ignore[list-item]
1286
+ return obj
1287
+
1288
+
1289
+ # `collections` helpers
1290
+ def is_list_of(
1291
+ value: object,
1292
+ typ: Union[type[T], tuple[type[T], ...]],
1293
+ *,
1294
+ check: Literal["first", "all"] = "first",
1295
+ ) -> TypeIs[list[T]]:
1296
+ if not isinstance(value, list):
1297
+ return False
1298
+
1299
+ if check == "first":
1300
+ return len(value) == 0 or isinstance(value[0], typ)
1301
+ elif check == "all":
1302
+ return all(isinstance(v, typ) for v in value)
1303
+
1304
+ assert_never(check)
1305
+
1306
+
1307
+ def flatten_2d_lists(lists: Iterable[Iterable[T]]) -> list[T]:
1308
+ """Flatten a list of lists to a single list."""
1309
+ return [item for sublist in lists for item in sublist]
1310
+
1311
+
1312
+ def full_groupby(values: Iterable[_V], *, key: Callable[[_V], _K]):
1313
+ """
1314
+ Unlike [`itertools.groupby`][], groups are not broken by
1315
+ non-contiguous data.
1316
+ """
1317
+ groups = defaultdict[_K, list[_V]](list)
1318
+
1319
+ for value in values:
1320
+ groups[key(value)].append(value)
1321
+
1322
+ return groups.items()
1323
+
1324
+
1325
+ # TODO: This function can be removed if transformer_modules classes are
1326
+ # serialized by value when communicating between processes
1327
+ def init_cached_hf_modules() -> None:
1328
+ """
1329
+ Lazy initialization of the Hugging Face modules.
1330
+ """
1331
+ from transformers.dynamic_module_utils import init_hf_modules
1332
+ init_hf_modules()
1333
+
1334
+
1335
+ @cache
1336
+ def find_library(lib_name: str) -> str:
1337
+ """
1338
+ Find the library file in the system.
1339
+ `lib_name` is full filename, with both prefix and suffix.
1340
+ This function resolves `lib_name` to the full path of the library.
1341
+ """
1342
+ # Adapted from https://github.com/openai/triton/blob/main/third_party/nvidia/backend/driver.py#L19 # noqa
1343
+ # According to https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
1344
+ # `/sbin/ldconfig` should exist in all Linux systems.
1345
+ # `/sbin/ldconfig` searches the library in the system
1346
+ libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode()
1347
+ # each line looks like the following:
1348
+ # libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1
1349
+ locs = [line.split()[-1] for line in libs.splitlines() if lib_name in line]
1350
+ # `LD_LIBRARY_PATH` searches the library in the user-defined paths
1351
+ env_ld_library_path = envs.LD_LIBRARY_PATH
1352
+ if not locs and env_ld_library_path:
1353
+ locs = [
1354
+ os.path.join(dir, lib_name)
1355
+ for dir in env_ld_library_path.split(":")
1356
+ if os.path.exists(os.path.join(dir, lib_name))
1357
+ ]
1358
+ if not locs:
1359
+ raise ValueError(f"Cannot find {lib_name} in the system.")
1360
+ return locs[0]
1361
+
1362
+
1363
+ def find_nccl_library() -> str:
1364
+ """
1365
+ We either use the library file specified by the `VLLM_NCCL_SO_PATH`
1366
+ environment variable, or we find the library file brought by PyTorch.
1367
+ After importing `torch`, `libnccl.so.2` or `librccl.so.1` can be
1368
+ found by `ctypes` automatically.
1369
+ """
1370
+ so_file = envs.VLLM_NCCL_SO_PATH
1371
+
1372
+ # manually load the nccl library
1373
+ if so_file:
1374
+ logger.info(
1375
+ "Found nccl from environment variable VLLM_NCCL_SO_PATH=%s",
1376
+ so_file)
1377
+ else:
1378
+ if torch.version.cuda is not None:
1379
+ so_file = "libnccl.so.2"
1380
+ elif torch.version.hip is not None:
1381
+ so_file = "librccl.so.1"
1382
+ else:
1383
+ raise ValueError("NCCL only supports CUDA and ROCm backends.")
1384
+ logger.info("Found nccl from library %s", so_file)
1385
+ return so_file
1386
+
1387
+
1388
+ def find_nccl_include_paths() -> Optional[list[str]]:
1389
+ """
1390
+ We either use the nccl.h specified by the `VLLM_NCCL_INCLUDE_PATH`
1391
+ environment variable, or we find the library file brought by
1392
+ nvidia-nccl-cuXX. load_inline by default uses
1393
+ torch.utils.cpp_extension.include_paths
1394
+ """
1395
+ paths: list[str] = []
1396
+ inc = envs.VLLM_NCCL_INCLUDE_PATH
1397
+ if inc and os.path.isdir(inc):
1398
+ paths.append(inc)
1399
+
1400
+ try:
1401
+ import importlib.util
1402
+ spec = importlib.util.find_spec("nvidia.nccl")
1403
+ if spec and getattr(spec, "submodule_search_locations", None):
1404
+ for loc in spec.submodule_search_locations:
1405
+ inc_dir = os.path.join(loc, "include")
1406
+ if os.path.exists(os.path.join(inc_dir, "nccl.h")):
1407
+ paths.append(inc_dir)
1408
+ except Exception:
1409
+ pass
1410
+
1411
+ seen = set()
1412
+ out: list[str] = []
1413
+ for p in paths:
1414
+ if p and p not in seen:
1415
+ out.append(p)
1416
+ seen.add(p)
1417
+ return out or None
1418
+
1419
+
1420
+ prev_set_stream = torch.cuda.set_stream
1421
+
1422
+ _current_stream_tls = threading.local()
1423
+
1424
+
1425
+ def _patched_set_stream(stream: torch.cuda.Stream) -> None:
1426
+ _current_stream_tls.value = stream
1427
+ prev_set_stream(stream)
1428
+
1429
+
1430
+ torch.cuda.set_stream = _patched_set_stream
1431
+
1432
+
1433
+ class _StreamPlaceholder:
1434
+
1435
+ def __init__(self):
1436
+ self.synchronize = lambda: None
1437
+
1438
+
1439
+ def current_stream() -> torch.cuda.Stream:
1440
+ """
1441
+ replace `torch.cuda.current_stream()` with `vllm.utils.current_stream()`.
1442
+ it turns out that `torch.cuda.current_stream()` is quite expensive,
1443
+ as it will construct a new stream object at each call.
1444
+ here we patch `torch.cuda.set_stream` to keep track of the current stream
1445
+ directly, so that we can avoid calling `torch.cuda.current_stream()`.
1446
+
1447
+ the underlying hypothesis is that we do not call `torch._C._cuda_setStream`
1448
+ from C/C++ code.
1449
+ """
1450
+ from vllm.platforms import current_platform
1451
+ if not hasattr(_current_stream_tls,
1452
+ "value") or _current_stream_tls.value is None:
1453
+ # when this function is called before any stream is set,
1454
+ # we return the default stream.
1455
+ # On ROCm using the default 0 stream in combination with RCCL
1456
+ # is hurting performance. Therefore creating a dedicated stream
1457
+ # per process
1458
+ if current_platform.is_rocm():
1459
+ # torch.cuda.set_stream here is the alias of _pathed_set_stream
1460
+ torch.cuda.set_stream(torch.cuda.Stream())
1461
+ elif current_platform.is_cpu():
1462
+ _current_stream_tls.value = _StreamPlaceholder()
1463
+ else:
1464
+ current_stream = current_platform.current_stream
1465
+ if current_stream is not None:
1466
+ _current_stream_tls.value = current_stream()
1467
+ else:
1468
+ raise ValueError(
1469
+ "Fail to set current stream, current platform "
1470
+ "may not support current_stream with torch API")
1471
+ return _current_stream_tls.value
1472
+
1473
+
1474
+ def enable_trace_function_call_for_thread(vllm_config: VllmConfig) -> None:
1475
+ """Set up function tracing for the current thread,
1476
+ if enabled via the VLLM_TRACE_FUNCTION environment variable
1477
+ """
1478
+
1479
+ if envs.VLLM_TRACE_FUNCTION:
1480
+ tmp_dir = tempfile.gettempdir()
1481
+ # add username to tmp_dir to avoid permission issues
1482
+ tmp_dir = os.path.join(tmp_dir, getpass.getuser())
1483
+ filename = (f"VLLM_TRACE_FUNCTION_for_process_{os.getpid()}"
1484
+ f"_thread_{threading.get_ident()}_"
1485
+ f"at_{datetime.datetime.now()}.log").replace(" ", "_")
1486
+ log_path = os.path.join(tmp_dir, "vllm",
1487
+ f"vllm-instance-{vllm_config.instance_id}",
1488
+ filename)
1489
+ os.makedirs(os.path.dirname(log_path), exist_ok=True)
1490
+ enable_trace_function_call(log_path)
1491
+
1492
+
1493
+ # `functools` helpers
1494
+ def identity(value: T, **kwargs) -> T:
1495
+ """Returns the first provided value."""
1496
+ return value
1497
+
1498
+
1499
+ F = TypeVar('F', bound=Callable[..., Any])
1500
+
1501
+
1502
+ def deprecate_args(
1503
+ start_index: int,
1504
+ is_deprecated: Union[bool, Callable[[], bool]] = True,
1505
+ additional_message: Optional[str] = None,
1506
+ ) -> Callable[[F], F]:
1507
+ if not callable(is_deprecated):
1508
+ is_deprecated = partial(identity, is_deprecated)
1509
+
1510
+ def wrapper(fn: F) -> F:
1511
+
1512
+ params = inspect.signature(fn).parameters
1513
+ pos_types = (
1514
+ inspect.Parameter.POSITIONAL_ONLY,
1515
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
1516
+ )
1517
+ pos_kws = [
1518
+ kw for kw, param in params.items() if param.kind in pos_types
1519
+ ]
1520
+
1521
+ @wraps(fn)
1522
+ def inner(*args, **kwargs):
1523
+ if is_deprecated():
1524
+ deprecated_args = pos_kws[start_index:len(args)]
1525
+ if deprecated_args:
1526
+ msg = (
1527
+ f"The positional arguments {deprecated_args} are "
1528
+ "deprecated and will be removed in a future update.")
1529
+ if additional_message is not None:
1530
+ msg += f" {additional_message}"
1531
+
1532
+ warnings.warn(
1533
+ DeprecationWarning(msg),
1534
+ stacklevel=3, # The inner function takes up one level
1535
+ )
1536
+
1537
+ return fn(*args, **kwargs)
1538
+
1539
+ return inner # type: ignore
1540
+
1541
+ return wrapper
1542
+
1543
+
1544
+ def deprecate_kwargs(
1545
+ *kws: str,
1546
+ is_deprecated: Union[bool, Callable[[], bool]] = True,
1547
+ additional_message: Optional[str] = None,
1548
+ ) -> Callable[[F], F]:
1549
+ deprecated_kws = set(kws)
1550
+
1551
+ if not callable(is_deprecated):
1552
+ is_deprecated = partial(identity, is_deprecated)
1553
+
1554
+ def wrapper(fn: F) -> F:
1555
+
1556
+ @wraps(fn)
1557
+ def inner(*args, **kwargs):
1558
+ if is_deprecated():
1559
+ deprecated_kwargs = kwargs.keys() & deprecated_kws
1560
+ if deprecated_kwargs:
1561
+ msg = (
1562
+ f"The keyword arguments {deprecated_kwargs} are "
1563
+ "deprecated and will be removed in a future update.")
1564
+ if additional_message is not None:
1565
+ msg += f" {additional_message}"
1566
+
1567
+ warnings.warn(
1568
+ DeprecationWarning(msg),
1569
+ stacklevel=3, # The inner function takes up one level
1570
+ )
1571
+
1572
+ return fn(*args, **kwargs)
1573
+
1574
+ return inner # type: ignore
1575
+
1576
+ return wrapper
1577
+
1578
+
1579
+ @lru_cache(maxsize=8)
1580
+ def _cuda_device_count_stateless(
1581
+ cuda_visible_devices: Optional[str] = None) -> int:
1582
+ # Note: cuda_visible_devices is not used, but we keep it as an argument for
1583
+ # LRU Cache purposes.
1584
+
1585
+ # Code below is based on
1586
+ # https://github.com/pytorch/pytorch/blob/
1587
+ # c1cd946818442aca8c7f812b16d187ce1586c3bc/
1588
+ # torch/cuda/__init__.py#L831C1-L831C17
1589
+ import torch.cuda
1590
+ import torch.version
1591
+
1592
+ from vllm.platforms import current_platform
1593
+ if not torch.cuda._is_compiled():
1594
+ return 0
1595
+ if current_platform.is_rocm():
1596
+ # ROCm uses amdsmi instead of nvml for stateless device count
1597
+ # This requires a sufficiently modern version of Torch 2.4.0
1598
+ raw_count = torch.cuda._device_count_amdsmi() if (hasattr(
1599
+ torch.cuda, "_device_count_amdsmi")) else -1
1600
+ else:
1601
+ raw_count = torch.cuda._device_count_nvml()
1602
+ r = torch._C._cuda_getDeviceCount() if raw_count < 0 else raw_count
1603
+ return r
1604
+
1605
+
1606
+ def cuda_device_count_stateless() -> int:
1607
+ """Get number of CUDA devices, caching based on the value of
1608
+ CUDA_VISIBLE_DEVICES at the time of call.
1609
+
1610
+ This should be used instead of torch.cuda.device_count()
1611
+ unless CUDA_VISIBLE_DEVICES has already been set to the desired
1612
+ value."""
1613
+
1614
+ # This can be removed and simply replaced with torch.cuda.get_device_count
1615
+ # after https://github.com/pytorch/pytorch/pull/122815 is released.
1616
+ return _cuda_device_count_stateless(envs.CUDA_VISIBLE_DEVICES)
1617
+
1618
+
1619
+ def cuda_is_initialized() -> bool:
1620
+ """Check if CUDA is initialized."""
1621
+ if not torch.cuda._is_compiled():
1622
+ return False
1623
+ return torch.cuda.is_initialized()
1624
+
1625
+
1626
+ def xpu_is_initialized() -> bool:
1627
+ """Check if XPU is initialized."""
1628
+ if not torch.xpu._is_compiled():
1629
+ return False
1630
+ return torch.xpu.is_initialized()
1631
+
1632
+
1633
+ def cuda_get_device_properties(device,
1634
+ names: Sequence[str],
1635
+ init_cuda=False) -> tuple[Any, ...]:
1636
+ """Get specified CUDA device property values without initializing CUDA in
1637
+ the current process."""
1638
+ if init_cuda or cuda_is_initialized():
1639
+ props = torch.cuda.get_device_properties(device)
1640
+ return tuple(getattr(props, name) for name in names)
1641
+
1642
+ # Run in subprocess to avoid initializing CUDA as a side effect.
1643
+ mp_ctx = multiprocessing.get_context("fork")
1644
+ with ProcessPoolExecutor(max_workers=1, mp_context=mp_ctx) as executor:
1645
+ return executor.submit(cuda_get_device_properties, device, names,
1646
+ True).result()
1647
+
1648
+
1649
+ def weak_bind(bound_method: Callable[..., Any], ) -> Callable[..., None]:
1650
+ """Make an instance method that weakly references
1651
+ its associated instance and no-ops once that
1652
+ instance is collected."""
1653
+ ref = weakref.ref(bound_method.__self__) # type: ignore[attr-defined]
1654
+ unbound = bound_method.__func__ # type: ignore[attr-defined]
1655
+
1656
+ def weak_bound(*args, **kwargs) -> None:
1657
+ if inst := ref():
1658
+ unbound(inst, *args, **kwargs)
1659
+
1660
+ return weak_bound
1661
+
1662
+
1663
+ def run_once(f: Callable[P, None]) -> Callable[P, None]:
1664
+
1665
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> None:
1666
+ if wrapper.has_run: # type: ignore[attr-defined]
1667
+ return
1668
+
1669
+ with wrapper.lock: # type: ignore[attr-defined]
1670
+ if not wrapper.has_run: # type: ignore[attr-defined]
1671
+ wrapper.has_run = True # type: ignore[attr-defined]
1672
+ return f(*args, **kwargs)
1673
+
1674
+ wrapper.has_run = False # type: ignore[attr-defined]
1675
+ wrapper.lock = threading.Lock() # type: ignore[attr-defined]
1676
+ return wrapper
1677
+
1678
+
1679
+ class StoreBoolean(Action):
1680
+
1681
+ def __call__(self, parser, namespace, values, option_string=None):
1682
+ if values.lower() == "true":
1683
+ setattr(namespace, self.dest, True)
1684
+ elif values.lower() == "false":
1685
+ setattr(namespace, self.dest, False)
1686
+ else:
1687
+ raise ValueError(f"Invalid boolean value: {values}. "
1688
+ "Expected 'true' or 'false'.")
1689
+
1690
+
1691
+ class SortedHelpFormatter(ArgumentDefaultsHelpFormatter,
1692
+ RawDescriptionHelpFormatter):
1693
+ """SortedHelpFormatter that sorts arguments by their option strings."""
1694
+
1695
+ def _split_lines(self, text, width):
1696
+ """
1697
+ 1. Sentences split across lines have their single newlines removed.
1698
+ 2. Paragraphs and explicit newlines are split into separate lines.
1699
+ 3. Each line is wrapped to the specified width (width of terminal).
1700
+ """
1701
+ # The patterns also include whitespace after the newline
1702
+ single_newline = re.compile(r"(?<!\n)\n(?!\n)\s*")
1703
+ multiple_newlines = re.compile(r"\n{2,}\s*")
1704
+ text = single_newline.sub(' ', text)
1705
+ lines = re.split(multiple_newlines, text)
1706
+ return sum([textwrap.wrap(line, width) for line in lines], [])
1707
+
1708
+ def add_arguments(self, actions):
1709
+ actions = sorted(actions, key=lambda x: x.option_strings)
1710
+ super().add_arguments(actions)
1711
+
1712
+
1713
+ class FlexibleArgumentParser(ArgumentParser):
1714
+ """ArgumentParser that allows both underscore and dash in names."""
1715
+
1716
+ _deprecated: set[Action] = set()
1717
+ _json_tip: str = (
1718
+ "When passing JSON CLI arguments, the following sets of arguments "
1719
+ "are equivalent:\n"
1720
+ ' --json-arg \'{"key1": "value1", "key2": {"key3": "value2"}}\'\n'
1721
+ " --json-arg.key1 value1 --json-arg.key2.key3 value2\n\n"
1722
+ "Additionally, list elements can be passed individually using +:\n"
1723
+ ' --json-arg \'{"key4": ["value3", "value4", "value5"]}\'\n'
1724
+ " --json-arg.key4+ value3 --json-arg.key4+=\'value4,value5\'\n\n")
1725
+ _search_keyword: Optional[str] = None
1726
+
1727
+ def __init__(self, *args, **kwargs):
1728
+ # Set the default "formatter_class" to SortedHelpFormatter
1729
+ if "formatter_class" not in kwargs:
1730
+ kwargs["formatter_class"] = SortedHelpFormatter
1731
+ # Pop kwarg "add_json_tip" to control whether to add the JSON tip
1732
+ self.add_json_tip = kwargs.pop("add_json_tip", True)
1733
+ super().__init__(*args, **kwargs)
1734
+
1735
+ if sys.version_info < (3, 13):
1736
+ # Enable the deprecated kwarg for Python 3.12 and below
1737
+
1738
+ def parse_known_args(self, args=None, namespace=None):
1739
+ if args is not None and "--disable-log-requests" in args:
1740
+ # Special case warning because the warning below won't trigger
1741
+ # if –-disable-log-requests because its value is default.
1742
+ logger.warning_once(
1743
+ "argument '--disable-log-requests' is deprecated and "
1744
+ "replaced with '--enable-log-requests'. This will be "
1745
+ "removed in v0.12.0.")
1746
+ namespace, args = super().parse_known_args(args, namespace)
1747
+ for action in FlexibleArgumentParser._deprecated:
1748
+ if (hasattr(namespace, dest := action.dest)
1749
+ and getattr(namespace, dest) != action.default):
1750
+ logger.warning_once("argument '%s' is deprecated", dest)
1751
+ return namespace, args
1752
+
1753
+ def add_argument(self, *args, **kwargs):
1754
+ deprecated = kwargs.pop("deprecated", False)
1755
+ action = super().add_argument(*args, **kwargs)
1756
+ if deprecated:
1757
+ FlexibleArgumentParser._deprecated.add(action)
1758
+ return action
1759
+
1760
+ class _FlexibleArgumentGroup(_ArgumentGroup):
1761
+
1762
+ def add_argument(self, *args, **kwargs):
1763
+ deprecated = kwargs.pop("deprecated", False)
1764
+ action = super().add_argument(*args, **kwargs)
1765
+ if deprecated:
1766
+ FlexibleArgumentParser._deprecated.add(action)
1767
+ return action
1768
+
1769
+ def add_argument_group(self, *args, **kwargs):
1770
+ group = self._FlexibleArgumentGroup(self, *args, **kwargs)
1771
+ self._action_groups.append(group)
1772
+ return group
1773
+
1774
+ def format_help(self):
1775
+ # Only use custom help formatting for bottom level parsers
1776
+ if self._subparsers is not None:
1777
+ return super().format_help()
1778
+
1779
+ formatter = self._get_formatter()
1780
+
1781
+ # Handle keyword search of the args
1782
+ if (search_keyword := self._search_keyword) is not None:
1783
+ # Normalise the search keyword
1784
+ search_keyword = search_keyword.lower().replace("_", "-")
1785
+ # Return full help if searching for 'all'
1786
+ if search_keyword == 'all':
1787
+ self.epilog = self._json_tip
1788
+ return super().format_help()
1789
+
1790
+ # Return group help if searching for a group title
1791
+ for group in self._action_groups:
1792
+ if group.title and group.title.lower() == search_keyword:
1793
+ formatter.start_section(group.title)
1794
+ formatter.add_text(group.description)
1795
+ formatter.add_arguments(group._group_actions)
1796
+ formatter.end_section()
1797
+ formatter.add_text(self._json_tip)
1798
+ return formatter.format_help()
1799
+
1800
+ # Return matched args if searching for an arg name
1801
+ matched_actions = []
1802
+ for group in self._action_groups:
1803
+ for action in group._group_actions:
1804
+ # search option name
1805
+ if any(search_keyword in opt.lower()
1806
+ for opt in action.option_strings):
1807
+ matched_actions.append(action)
1808
+ if matched_actions:
1809
+ formatter.start_section(
1810
+ f"Arguments matching '{search_keyword}'")
1811
+ formatter.add_arguments(matched_actions)
1812
+ formatter.end_section()
1813
+ formatter.add_text(self._json_tip)
1814
+ return formatter.format_help()
1815
+
1816
+ # No match found
1817
+ formatter.add_text(
1818
+ f"No group or arguments matching '{search_keyword}'.\n"
1819
+ "Use '--help' to see available groups or "
1820
+ "'--help=all' to see all available parameters.")
1821
+ return formatter.format_help()
1822
+
1823
+ # usage
1824
+ formatter.add_usage(self.usage, self._actions,
1825
+ self._mutually_exclusive_groups)
1826
+
1827
+ # description
1828
+ formatter.add_text(self.description)
1829
+
1830
+ # positionals, optionals and user-defined groups
1831
+ formatter.start_section("Config Groups")
1832
+ config_groups = ""
1833
+ for group in self._action_groups:
1834
+ if not group._group_actions:
1835
+ continue
1836
+ title = group.title
1837
+ description = group.description or ""
1838
+ config_groups += f"{title: <24}{description}\n"
1839
+ formatter.add_text(config_groups)
1840
+ formatter.end_section()
1841
+
1842
+ # epilog
1843
+ formatter.add_text(self.epilog)
1844
+
1845
+ # determine help from format above
1846
+ return formatter.format_help()
1847
+
1848
+ def parse_args( # type: ignore[override]
1849
+ self,
1850
+ args: list[str] | None = None,
1851
+ namespace: Namespace | None = None,
1852
+ ):
1853
+ if args is None:
1854
+ args = sys.argv[1:]
1855
+
1856
+ # Check for --model in command line arguments first
1857
+ if args and args[0] == "serve":
1858
+ model_in_cli_args = any(arg == '--model' for arg in args)
1859
+
1860
+ if model_in_cli_args:
1861
+ raise ValueError(
1862
+ "With `vllm serve`, you should provide the model as a "
1863
+ "positional argument or in a config file instead of via "
1864
+ "the `--model` option.")
1865
+
1866
+ if '--config' in args:
1867
+ args = self._pull_args_from_config(args)
1868
+
1869
+ def repl(match: re.Match) -> str:
1870
+ """Replaces underscores with dashes in the matched string."""
1871
+ return match.group(0).replace("_", "-")
1872
+
1873
+ # Everything between the first -- and the first .
1874
+ pattern = re.compile(r"(?<=--)[^\.]*")
1875
+
1876
+ # Convert underscores to dashes and vice versa in argument names
1877
+ processed_args = list[str]()
1878
+ for i, arg in enumerate(args):
1879
+ if arg.startswith("--help="):
1880
+ FlexibleArgumentParser._search_keyword = arg.split(
1881
+ '=', 1)[-1].lower()
1882
+ processed_args.append("--help")
1883
+ elif arg.startswith('--'):
1884
+ if '=' in arg:
1885
+ key, value = arg.split('=', 1)
1886
+ key = pattern.sub(repl, key, count=1)
1887
+ processed_args.append(f'{key}={value}')
1888
+ else:
1889
+ key = pattern.sub(repl, arg, count=1)
1890
+ processed_args.append(key)
1891
+ elif arg.startswith('-O') and arg != '-O' and arg[2] != '.':
1892
+ # allow -O flag to be used without space, e.g. -O3 or -Odecode
1893
+ # -O.<...> handled later
1894
+ # also handle -O=<level> here
1895
+ level = arg[3:] if arg[2] == '=' else arg[2:]
1896
+ processed_args.append(f'-O.level={level}')
1897
+ elif arg == '-O' and i + 1 < len(args) and args[i + 1] in {
1898
+ "0", "1", "2", "3"
1899
+ }:
1900
+ # Convert -O <n> to -O.level <n>
1901
+ processed_args.append('-O.level')
1902
+ else:
1903
+ processed_args.append(arg)
1904
+
1905
+ def create_nested_dict(keys: list[str], value: str) -> dict[str, Any]:
1906
+ """Creates a nested dictionary from a list of keys and a value.
1907
+
1908
+ For example, `keys = ["a", "b", "c"]` and `value = 1` will create:
1909
+ `{"a": {"b": {"c": 1}}}`
1910
+ """
1911
+ nested_dict: Any = value
1912
+ for key in reversed(keys):
1913
+ nested_dict = {key: nested_dict}
1914
+ return nested_dict
1915
+
1916
+ def recursive_dict_update(
1917
+ original: dict[str, Any],
1918
+ update: dict[str, Any],
1919
+ ) -> set[str]:
1920
+ """Recursively updates a dictionary with another dictionary.
1921
+ Returns a set of duplicate keys that were overwritten.
1922
+ """
1923
+ duplicates = set[str]()
1924
+ for k, v in update.items():
1925
+ if isinstance(v, dict) and isinstance(original.get(k), dict):
1926
+ nested_duplicates = recursive_dict_update(original[k], v)
1927
+ duplicates |= {f"{k}.{d}" for d in nested_duplicates}
1928
+ elif isinstance(v, list) and isinstance(original.get(k), list):
1929
+ original[k] += v
1930
+ else:
1931
+ if k in original:
1932
+ duplicates.add(k)
1933
+ original[k] = v
1934
+ return duplicates
1935
+
1936
+ delete = set[int]()
1937
+ dict_args = defaultdict[str, dict[str, Any]](dict)
1938
+ duplicates = set[str]()
1939
+ for i, processed_arg in enumerate(processed_args):
1940
+ if i in delete: # skip if value from previous arg
1941
+ continue
1942
+
1943
+ if processed_arg.startswith("-") and "." in processed_arg:
1944
+ if "=" in processed_arg:
1945
+ processed_arg, value_str = processed_arg.split("=", 1)
1946
+ if "." not in processed_arg:
1947
+ # False positive, '.' was only in the value
1948
+ continue
1949
+ else:
1950
+ value_str = processed_args[i + 1]
1951
+ delete.add(i + 1)
1952
+
1953
+ if processed_arg.endswith("+"):
1954
+ processed_arg = processed_arg[:-1]
1955
+ value_str = json.dumps(list(value_str.split(",")))
1956
+
1957
+ key, *keys = processed_arg.split(".")
1958
+ try:
1959
+ value = json.loads(value_str)
1960
+ except json.decoder.JSONDecodeError:
1961
+ value = value_str
1962
+
1963
+ # Merge all values with the same key into a single dict
1964
+ arg_dict = create_nested_dict(keys, value)
1965
+ arg_duplicates = recursive_dict_update(dict_args[key],
1966
+ arg_dict)
1967
+ duplicates |= {f'{key}.{d}' for d in arg_duplicates}
1968
+ delete.add(i)
1969
+ # Filter out the dict args we set to None
1970
+ processed_args = [
1971
+ a for i, a in enumerate(processed_args) if i not in delete
1972
+ ]
1973
+ if duplicates:
1974
+ logger.warning("Found duplicate keys %s", ", ".join(duplicates))
1975
+
1976
+ # Add the dict args back as if they were originally passed as JSON
1977
+ for dict_arg, dict_value in dict_args.items():
1978
+ processed_args.append(dict_arg)
1979
+ processed_args.append(json.dumps(dict_value))
1980
+
1981
+ return super().parse_args(processed_args, namespace)
1982
+
1983
+ def check_port(self, value):
1984
+ try:
1985
+ value = int(value)
1986
+ except ValueError:
1987
+ msg = "Port must be an integer"
1988
+ raise ArgumentTypeError(msg) from None
1989
+
1990
+ if not (1024 <= value <= 65535):
1991
+ raise ArgumentTypeError("Port must be between 1024 and 65535")
1992
+
1993
+ return value
1994
+
1995
+ def _pull_args_from_config(self, args: list[str]) -> list[str]:
1996
+ """Method to pull arguments specified in the config file
1997
+ into the command-line args variable.
1998
+
1999
+ The arguments in config file will be inserted between
2000
+ the argument list.
2001
+
2002
+ example:
2003
+ ```yaml
2004
+ port: 12323
2005
+ tensor-parallel-size: 4
2006
+ ```
2007
+ ```python
2008
+ $: vllm {serve,chat,complete} "facebook/opt-12B" \
2009
+ --config config.yaml -tp 2
2010
+ $: args = [
2011
+ "serve,chat,complete",
2012
+ "facebook/opt-12B",
2013
+ '--config', 'config.yaml',
2014
+ '-tp', '2'
2015
+ ]
2016
+ $: args = [
2017
+ "serve,chat,complete",
2018
+ "facebook/opt-12B",
2019
+ '--port', '12323',
2020
+ '--tensor-parallel-size', '4',
2021
+ '-tp', '2'
2022
+ ]
2023
+ ```
2024
+
2025
+ Please note how the config args are inserted after the sub command.
2026
+ this way the order of priorities is maintained when these are args
2027
+ parsed by super().
2028
+ """
2029
+ assert args.count(
2030
+ '--config') <= 1, "More than one config file specified!"
2031
+
2032
+ index = args.index('--config')
2033
+ if index == len(args) - 1:
2034
+ raise ValueError("No config file specified! \
2035
+ Please check your command-line arguments.")
2036
+
2037
+ file_path = args[index + 1]
2038
+
2039
+ config_args = self.load_config_file(file_path)
2040
+
2041
+ # 0th index might be the sub command {serve,chat,complete,...}
2042
+ # optionally followed by model_tag (only for serve)
2043
+ # followed by config args
2044
+ # followed by rest of cli args.
2045
+ # maintaining this order will enforce the precedence
2046
+ # of cli > config > defaults
2047
+ if args[0].startswith('-'):
2048
+ # No sub command (e.g., api_server entry point)
2049
+ args = config_args + args[0:index] + args[index + 2:]
2050
+ elif args[0] == "serve":
2051
+ model_in_cli = len(args) > 1 and not args[1].startswith('-')
2052
+ model_in_config = any(arg == '--model' for arg in config_args)
2053
+
2054
+ if not model_in_cli and not model_in_config:
2055
+ raise ValueError(
2056
+ "No model specified! Please specify model either "
2057
+ "as a positional argument or in a config file.")
2058
+
2059
+ if model_in_cli:
2060
+ # Model specified as positional arg, keep CLI version
2061
+ args = [args[0]] + [
2062
+ args[1]
2063
+ ] + config_args + args[2:index] + args[index + 2:]
2064
+ else:
2065
+ # No model in CLI, use config if available
2066
+ args = [args[0]
2067
+ ] + config_args + args[1:index] + args[index + 2:]
2068
+ else:
2069
+ args = [args[0]] + config_args + args[1:index] + args[index + 2:]
2070
+
2071
+ return args
2072
+
2073
+ def load_config_file(self, file_path: str) -> list[str]:
2074
+ """Loads a yaml file and returns the key value pairs as a
2075
+ flattened list with argparse like pattern
2076
+ ```yaml
2077
+ port: 12323
2078
+ tensor-parallel-size: 4
2079
+ ```
2080
+ returns:
2081
+ processed_args: list[str] = [
2082
+ '--port': '12323',
2083
+ '--tensor-parallel-size': '4'
2084
+ ]
2085
+ """
2086
+ extension: str = file_path.split('.')[-1]
2087
+ if extension not in ('yaml', 'yml'):
2088
+ raise ValueError(
2089
+ "Config file must be of a yaml/yml type.\
2090
+ %s supplied", extension)
2091
+
2092
+ # only expecting a flat dictionary of atomic types
2093
+ processed_args: list[str] = []
2094
+
2095
+ config: dict[str, Union[int, str]] = {}
2096
+ try:
2097
+ with open(file_path) as config_file:
2098
+ config = yaml.safe_load(config_file)
2099
+ except Exception as ex:
2100
+ logger.error(
2101
+ "Unable to read the config file at %s. \
2102
+ Make sure path is correct", file_path)
2103
+ raise ex
2104
+
2105
+ store_boolean_arguments = [
2106
+ action.dest for action in self._actions
2107
+ if isinstance(action, StoreBoolean)
2108
+ ]
2109
+
2110
+ for key, value in config.items():
2111
+ if isinstance(value, bool) and key not in store_boolean_arguments:
2112
+ if value:
2113
+ processed_args.append('--' + key)
2114
+ elif isinstance(value, list):
2115
+ if value:
2116
+ processed_args.append('--' + key)
2117
+ for item in value:
2118
+ processed_args.append(str(item))
2119
+ else:
2120
+ processed_args.append('--' + key)
2121
+ processed_args.append(str(value))
2122
+
2123
+ return processed_args
2124
+
2125
+
2126
+ async def _run_task_with_lock(task: Callable, lock: asyncio.Lock, *args,
2127
+ **kwargs):
2128
+ """Utility function to run async task in a lock"""
2129
+ async with lock:
2130
+ return await task(*args, **kwargs)
2131
+
2132
+
2133
+ @lru_cache
2134
+ def supports_kw(
2135
+ callable: Callable[..., object],
2136
+ kw_name: str,
2137
+ *,
2138
+ requires_kw_only: bool = False,
2139
+ allow_var_kwargs: bool = True,
2140
+ ) -> bool:
2141
+ """Check if a keyword is a valid kwarg for a callable; if requires_kw_only
2142
+ disallows kwargs names that can also be positional arguments.
2143
+ """
2144
+ params = inspect.signature(callable).parameters
2145
+ if not params:
2146
+ return False
2147
+
2148
+ param_val = params.get(kw_name)
2149
+
2150
+ # Types where the it may be valid, i.e., explicitly defined & nonvariadic
2151
+ passable_kw_types = set((inspect.Parameter.POSITIONAL_ONLY,
2152
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
2153
+ inspect.Parameter.KEYWORD_ONLY))
2154
+
2155
+ if param_val:
2156
+ is_sig_param = param_val.kind in passable_kw_types
2157
+ # We want kwargs only, but this is passable as a positional arg
2158
+ if (requires_kw_only and is_sig_param
2159
+ and param_val.kind != inspect.Parameter.KEYWORD_ONLY):
2160
+ return False
2161
+ if ((requires_kw_only
2162
+ and param_val.kind == inspect.Parameter.KEYWORD_ONLY)
2163
+ or (not requires_kw_only and is_sig_param)):
2164
+ return True
2165
+
2166
+ # If we're okay with var-kwargs, it's supported as long as
2167
+ # the kw_name isn't something like *args, **kwargs
2168
+ if allow_var_kwargs:
2169
+ # Get the last param; type is ignored here because params is a proxy
2170
+ # mapping, but it wraps an ordered dict, and they appear in order.
2171
+ # Ref: https://docs.python.org/3/library/inspect.html#inspect.Signature.parameters
2172
+ last_param = params[next(reversed(params))] # type: ignore
2173
+ return (last_param.kind == inspect.Parameter.VAR_KEYWORD
2174
+ and last_param.name != kw_name)
2175
+
2176
+ return False
2177
+
2178
+
2179
+ def get_allowed_kwarg_only_overrides(
2180
+ callable: Callable[..., object],
2181
+ overrides: Optional[Mapping[str, object]],
2182
+ *,
2183
+ requires_kw_only: bool = True,
2184
+ allow_var_kwargs: bool = False,
2185
+ ) -> dict[str, Any]:
2186
+ """
2187
+ Given a callable which has one or more keyword only params and a dict
2188
+ mapping param names to values, drop values that can be not be kwarg
2189
+ expanded to overwrite one or more keyword-only args. This is used in a
2190
+ few places to handle custom processor overrides for multimodal models,
2191
+ e.g., for profiling when processor options provided by the user
2192
+ may affect the number of mm tokens per instance.
2193
+
2194
+ Args:
2195
+ callable: Callable which takes 0 or more keyword only arguments.
2196
+ If None is provided, all overrides names are allowed.
2197
+ overrides: Potential overrides to be used when invoking the callable.
2198
+ allow_var_kwargs: Allows overrides that are expandable for var kwargs.
2199
+
2200
+ Returns:
2201
+ Dictionary containing the kwargs to be leveraged which may be used
2202
+ to overwrite one or more keyword only arguments when invoking the
2203
+ callable.
2204
+ """
2205
+ if not overrides:
2206
+ return {}
2207
+
2208
+ # Drop any mm_processor_kwargs provided by the user that
2209
+ # are not kwargs, unless it can fit it var_kwargs param
2210
+ filtered_overrides = {
2211
+ kwarg_name: val
2212
+ for kwarg_name, val in overrides.items()
2213
+ if supports_kw(callable,
2214
+ kwarg_name,
2215
+ requires_kw_only=requires_kw_only,
2216
+ allow_var_kwargs=allow_var_kwargs)
2217
+ }
2218
+
2219
+ # If anything is dropped, log a warning
2220
+ dropped_keys = overrides.keys() - filtered_overrides.keys()
2221
+ if dropped_keys:
2222
+ if requires_kw_only:
2223
+ logger.warning(
2224
+ "The following intended overrides are not keyword-only args "
2225
+ "and will be dropped: %s", dropped_keys)
2226
+ else:
2227
+ logger.warning(
2228
+ "The following intended overrides are not keyword args "
2229
+ "and will be dropped: %s", dropped_keys)
2230
+
2231
+ return filtered_overrides
2232
+
2233
+
2234
+ # Using dynamo with vLLM doesn't really work well with PyTorch versions < 2.4.0.
2235
+ # In particular, the FakeScalarType is not supported for earlier versions of
2236
+ # PyTorch which breaks dynamo for any ops registered using ScalarType.
2237
+ def supports_dynamo() -> bool:
2238
+ base_torch_version = Version(Version(torch.__version__).base_version)
2239
+ return base_torch_version >= Version("2.4.0")
2240
+
2241
+
2242
+ # Supports xccl with PyTorch versions >= 2.8.0.dev for XPU platform
2243
+ def supports_xccl() -> bool:
2244
+ return is_torch_equal_or_newer(
2245
+ "2.8.0.dev") and torch.distributed.is_xccl_available()
2246
+
2247
+
2248
+ # Some backends use pytorch version < 2.4.0 which doesn't
2249
+ # support `torch.library.custom_op`.
2250
+ def supports_custom_op() -> bool:
2251
+ return hasattr(torch.library, "custom_op")
2252
+
2253
+
2254
+ class AtomicCounter:
2255
+ """An atomic, thread-safe counter"""
2256
+
2257
+ def __init__(self, initial=0):
2258
+ """Initialize a new atomic counter to given initial value"""
2259
+ self._value = initial
2260
+ self._lock = threading.Lock()
2261
+
2262
+ def inc(self, num=1):
2263
+ """Atomically increment the counter by num and return the new value"""
2264
+ with self._lock:
2265
+ self._value += num
2266
+ return self._value
2267
+
2268
+ def dec(self, num=1):
2269
+ """Atomically decrement the counter by num and return the new value"""
2270
+ with self._lock:
2271
+ self._value -= num
2272
+ return self._value
2273
+
2274
+ @property
2275
+ def value(self):
2276
+ return self._value
2277
+
2278
+
2279
+ # Adapted from: https://stackoverflow.com/a/47212782/5082708
2280
+ class LazyDict(Mapping[str, T], Generic[T]):
2281
+
2282
+ def __init__(self, factory: dict[str, Callable[[], T]]):
2283
+ self._factory = factory
2284
+ self._dict: dict[str, T] = {}
2285
+
2286
+ def __getitem__(self, key: str) -> T:
2287
+ if key not in self._dict:
2288
+ if key not in self._factory:
2289
+ raise KeyError(key)
2290
+ self._dict[key] = self._factory[key]()
2291
+ return self._dict[key]
2292
+
2293
+ def __setitem__(self, key: str, value: Callable[[], T]):
2294
+ self._factory[key] = value
2295
+
2296
+ def __iter__(self):
2297
+ return iter(self._factory)
2298
+
2299
+ def __len__(self):
2300
+ return len(self._factory)
2301
+
2302
+
2303
+ class ClassRegistry(UserDict[type[T], _V]):
2304
+
2305
+ def __getitem__(self, key: type[T]) -> _V:
2306
+ for cls in key.mro():
2307
+ if cls in self.data:
2308
+ return self.data[cls]
2309
+
2310
+ raise KeyError(key)
2311
+
2312
+ def __contains__(self, key: object) -> bool:
2313
+ return self.contains(key)
2314
+
2315
+ def contains(self, key: object, *, strict: bool = False) -> bool:
2316
+ if not isinstance(key, type):
2317
+ return False
2318
+
2319
+ if strict:
2320
+ return key in self.data
2321
+
2322
+ return any(cls in self.data for cls in key.mro())
2323
+
2324
+
2325
+ def weak_ref_tensor(tensor: Any) -> Any:
2326
+ """
2327
+ Create a weak reference to a tensor.
2328
+ The new tensor will share the same data as the original tensor,
2329
+ but will not keep the original tensor alive.
2330
+ """
2331
+ if isinstance(tensor, torch.Tensor):
2332
+ return torch.ops._C.weak_ref_tensor(tensor)
2333
+ else:
2334
+ return tensor
2335
+
2336
+
2337
+ def weak_ref_tensors(
2338
+ tensors: Union[torch.Tensor, list[torch.Tensor], tuple[torch.Tensor],
2339
+ IntermediateTensors]
2340
+ ) -> Union[torch.Tensor, list[Any], tuple[Any], Any]:
2341
+ """
2342
+ Convenience function to create weak references to tensors,
2343
+ for single tensor, list of tensors or tuple of tensors.
2344
+ """
2345
+ if isinstance(tensors, torch.Tensor):
2346
+ return weak_ref_tensor(tensors)
2347
+ if isinstance(tensors, list):
2348
+ return [weak_ref_tensor(t) for t in tensors]
2349
+ if isinstance(tensors, tuple):
2350
+ return tuple(weak_ref_tensor(t) for t in tensors)
2351
+
2352
+ # For IntermediateTensors used in pipeline parallelism
2353
+ from vllm.sequence import IntermediateTensors
2354
+ if isinstance(tensors, IntermediateTensors):
2355
+ ret = IntermediateTensors({
2356
+ key: weak_ref_tensor(val)
2357
+ for key, val in tensors.tensors.items()
2358
+ })
2359
+ return ret
2360
+ raise ValueError("Invalid type for tensors")
2361
+
2362
+
2363
+ def get_cuda_view_from_cpu_tensor(cpu_tensor: torch.Tensor) -> torch.Tensor:
2364
+ """
2365
+ Get a CUDA view of a CPU tensor using Unified Virtual Addressing (UVA).
2366
+ """
2367
+ assert cpu_tensor.is_pinned(), "CPU tensor must be pinned"
2368
+ return torch.ops._C.get_cuda_view_from_cpu_tensor(cpu_tensor)
2369
+
2370
+
2371
+ def import_from_path(module_name: str, file_path: Union[str, os.PathLike]):
2372
+ """
2373
+ Import a Python file according to its file path.
2374
+
2375
+ Based on the official recipe:
2376
+ https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
2377
+ """
2378
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
2379
+ if spec is None:
2380
+ raise ModuleNotFoundError(f"No module named '{module_name}'")
2381
+
2382
+ assert spec.loader is not None
2383
+
2384
+ module = importlib.util.module_from_spec(spec)
2385
+ sys.modules[module_name] = module
2386
+ spec.loader.exec_module(module)
2387
+ return module
2388
+
2389
+
2390
+ @cache
2391
+ def get_vllm_optional_dependencies():
2392
+ metadata = importlib.metadata.metadata("vllm")
2393
+ requirements = metadata.get_all("Requires-Dist", [])
2394
+ extras = metadata.get_all("Provides-Extra", [])
2395
+
2396
+ return {
2397
+ extra: [
2398
+ re.split(r";|>=|<=|==", req)[0] for req in requirements
2399
+ if req.endswith(f'extra == "{extra}"')
2400
+ ]
2401
+ for extra in extras
2402
+ }
2403
+
2404
+
2405
+ class _PlaceholderBase:
2406
+ """
2407
+ Disallows downstream usage of placeholder modules.
2408
+
2409
+ We need to explicitly override each dunder method because
2410
+ [`__getattr__`][vllm.utils._PlaceholderBase.__getattr__]
2411
+ is not called when they are accessed.
2412
+
2413
+ Info:
2414
+ [Special method lookup](https://docs.python.org/3/reference/datamodel.html#special-lookup)
2415
+ """
2416
+
2417
+ def __getattr__(self, key: str) -> Never:
2418
+ """
2419
+ The main class should implement this to throw an error
2420
+ for attribute accesses representing downstream usage.
2421
+ """
2422
+ raise NotImplementedError
2423
+
2424
+ # [Basic customization]
2425
+
2426
+ def __lt__(self, other: object):
2427
+ return self.__getattr__("__lt__")
2428
+
2429
+ def __le__(self, other: object):
2430
+ return self.__getattr__("__le__")
2431
+
2432
+ def __eq__(self, other: object):
2433
+ return self.__getattr__("__eq__")
2434
+
2435
+ def __ne__(self, other: object):
2436
+ return self.__getattr__("__ne__")
2437
+
2438
+ def __gt__(self, other: object):
2439
+ return self.__getattr__("__gt__")
2440
+
2441
+ def __ge__(self, other: object):
2442
+ return self.__getattr__("__ge__")
2443
+
2444
+ def __hash__(self):
2445
+ return self.__getattr__("__hash__")
2446
+
2447
+ def __bool__(self):
2448
+ return self.__getattr__("__bool__")
2449
+
2450
+ # [Callable objects]
2451
+
2452
+ def __call__(self, *args: object, **kwargs: object):
2453
+ return self.__getattr__("__call__")
2454
+
2455
+ # [Container types]
2456
+
2457
+ def __len__(self):
2458
+ return self.__getattr__("__len__")
2459
+
2460
+ def __getitem__(self, key: object):
2461
+ return self.__getattr__("__getitem__")
2462
+
2463
+ def __setitem__(self, key: object, value: object):
2464
+ return self.__getattr__("__setitem__")
2465
+
2466
+ def __delitem__(self, key: object):
2467
+ return self.__getattr__("__delitem__")
2468
+
2469
+ # __missing__ is optional according to __getitem__ specification,
2470
+ # so it is skipped
2471
+
2472
+ # __iter__ and __reversed__ have a default implementation
2473
+ # based on __len__ and __getitem__, so they are skipped.
2474
+
2475
+ # [Numeric Types]
2476
+
2477
+ def __add__(self, other: object):
2478
+ return self.__getattr__("__add__")
2479
+
2480
+ def __sub__(self, other: object):
2481
+ return self.__getattr__("__sub__")
2482
+
2483
+ def __mul__(self, other: object):
2484
+ return self.__getattr__("__mul__")
2485
+
2486
+ def __matmul__(self, other: object):
2487
+ return self.__getattr__("__matmul__")
2488
+
2489
+ def __truediv__(self, other: object):
2490
+ return self.__getattr__("__truediv__")
2491
+
2492
+ def __floordiv__(self, other: object):
2493
+ return self.__getattr__("__floordiv__")
2494
+
2495
+ def __mod__(self, other: object):
2496
+ return self.__getattr__("__mod__")
2497
+
2498
+ def __divmod__(self, other: object):
2499
+ return self.__getattr__("__divmod__")
2500
+
2501
+ def __pow__(self, other: object, modulo: object = ...):
2502
+ return self.__getattr__("__pow__")
2503
+
2504
+ def __lshift__(self, other: object):
2505
+ return self.__getattr__("__lshift__")
2506
+
2507
+ def __rshift__(self, other: object):
2508
+ return self.__getattr__("__rshift__")
2509
+
2510
+ def __and__(self, other: object):
2511
+ return self.__getattr__("__and__")
2512
+
2513
+ def __xor__(self, other: object):
2514
+ return self.__getattr__("__xor__")
2515
+
2516
+ def __or__(self, other: object):
2517
+ return self.__getattr__("__or__")
2518
+
2519
+ # r* and i* methods have lower priority than
2520
+ # the methods for left operand so they are skipped
2521
+
2522
+ def __neg__(self):
2523
+ return self.__getattr__("__neg__")
2524
+
2525
+ def __pos__(self):
2526
+ return self.__getattr__("__pos__")
2527
+
2528
+ def __abs__(self):
2529
+ return self.__getattr__("__abs__")
2530
+
2531
+ def __invert__(self):
2532
+ return self.__getattr__("__invert__")
2533
+
2534
+ # __complex__, __int__ and __float__ have a default implementation
2535
+ # based on __index__, so they are skipped.
2536
+
2537
+ def __index__(self):
2538
+ return self.__getattr__("__index__")
2539
+
2540
+ def __round__(self, ndigits: object = ...):
2541
+ return self.__getattr__("__round__")
2542
+
2543
+ def __trunc__(self):
2544
+ return self.__getattr__("__trunc__")
2545
+
2546
+ def __floor__(self):
2547
+ return self.__getattr__("__floor__")
2548
+
2549
+ def __ceil__(self):
2550
+ return self.__getattr__("__ceil__")
2551
+
2552
+ # [Context managers]
2553
+
2554
+ def __enter__(self):
2555
+ return self.__getattr__("__enter__")
2556
+
2557
+ def __exit__(self, *args: object, **kwargs: object):
2558
+ return self.__getattr__("__exit__")
2559
+
2560
+
2561
+ class PlaceholderModule(_PlaceholderBase):
2562
+ """
2563
+ A placeholder object to use when a module does not exist.
2564
+
2565
+ This enables more informative errors when trying to access attributes
2566
+ of a module that does not exist.
2567
+ """
2568
+
2569
+ def __init__(self, name: str) -> None:
2570
+ super().__init__()
2571
+
2572
+ # Apply name mangling to avoid conflicting with module attributes
2573
+ self.__name = name
2574
+
2575
+ def placeholder_attr(self, attr_path: str):
2576
+ return _PlaceholderModuleAttr(self, attr_path)
2577
+
2578
+ def __getattr__(self, key: str):
2579
+ name = self.__name
2580
+
2581
+ try:
2582
+ importlib.import_module(name)
2583
+ except ImportError as exc:
2584
+ for extra, names in get_vllm_optional_dependencies().items():
2585
+ if name in names:
2586
+ msg = f"Please install vllm[{extra}] for {extra} support"
2587
+ raise ImportError(msg) from exc
2588
+
2589
+ raise exc
2590
+
2591
+ raise AssertionError("PlaceholderModule should not be used "
2592
+ "when the original module can be imported")
2593
+
2594
+
2595
+ class _PlaceholderModuleAttr(_PlaceholderBase):
2596
+
2597
+ def __init__(self, module: PlaceholderModule, attr_path: str) -> None:
2598
+ super().__init__()
2599
+
2600
+ # Apply name mangling to avoid conflicting with module attributes
2601
+ self.__module = module
2602
+ self.__attr_path = attr_path
2603
+
2604
+ def placeholder_attr(self, attr_path: str):
2605
+ return _PlaceholderModuleAttr(self.__module,
2606
+ f"{self.__attr_path}.{attr_path}")
2607
+
2608
+ def __getattr__(self, key: str):
2609
+ getattr(self.__module, f"{self.__attr_path}.{key}")
2610
+
2611
+ raise AssertionError("PlaceholderModule should not be used "
2612
+ "when the original module can be imported")
2613
+
2614
+
2615
+ # create a library to hold the custom op
2616
+ vllm_lib = Library("vllm", "FRAGMENT") # noqa
2617
+
2618
+
2619
+ def direct_register_custom_op(
2620
+ op_name: str,
2621
+ op_func: Callable,
2622
+ mutates_args: Optional[list[str]] = None,
2623
+ fake_impl: Optional[Callable] = None,
2624
+ target_lib: Optional[Library] = None,
2625
+ dispatch_key: Optional[str] = None,
2626
+ tags: tuple[torch.Tag, ...] = (),
2627
+ ):
2628
+ """
2629
+ `torch.library.custom_op` can have significant overhead because it
2630
+ needs to consider complicated dispatching logic. This function
2631
+ directly registers a custom op and dispatches it to the CUDA backend.
2632
+ See https://gist.github.com/youkaichao/ecbea9ec9fc79a45d2adce1784d7a9a5
2633
+ for more details.
2634
+
2635
+ By default, the custom op is registered to the vLLM library. If you
2636
+ want to register it to a different library, you can pass the library
2637
+ object to the `target_lib` argument.
2638
+
2639
+ IMPORTANT: the lifetime of the operator is tied to the lifetime of the
2640
+ library object. If you want to bind the operator to a different library,
2641
+ make sure the library object is alive when the operator is used.
2642
+ """
2643
+ if not supports_custom_op():
2644
+ from vllm.platforms import current_platform
2645
+ assert not current_platform.is_cuda_alike(), (
2646
+ "cuda platform needs torch>=2.4 to support custom op, "
2647
+ "chances are you are using an old version of pytorch "
2648
+ "or a custom build of pytorch. It is recommended to "
2649
+ "use vLLM in a fresh new environment and let it install "
2650
+ "the required dependencies.")
2651
+ return
2652
+
2653
+ if mutates_args is None:
2654
+ mutates_args = []
2655
+
2656
+ if dispatch_key is None:
2657
+ from vllm.platforms import current_platform
2658
+ dispatch_key = current_platform.dispatch_key
2659
+
2660
+ import torch.library
2661
+ if hasattr(torch.library, "infer_schema"):
2662
+ schema_str = torch.library.infer_schema(op_func,
2663
+ mutates_args=mutates_args)
2664
+ else:
2665
+ # for pytorch 2.4
2666
+ import torch._custom_op.impl
2667
+ schema_str = torch._custom_op.impl.infer_schema(op_func, mutates_args)
2668
+ my_lib = target_lib or vllm_lib
2669
+ my_lib.define(op_name + schema_str, tags=tags)
2670
+ my_lib.impl(op_name, op_func, dispatch_key=dispatch_key)
2671
+ if fake_impl is not None:
2672
+ my_lib._register_fake(op_name, fake_impl)
2673
+
2674
+
2675
+ def resolve_obj_by_qualname(qualname: str) -> Any:
2676
+ """
2677
+ Resolve an object by its fully-qualified class name.
2678
+ """
2679
+ module_name, obj_name = qualname.rsplit(".", 1)
2680
+ module = importlib.import_module(module_name)
2681
+ return getattr(module, obj_name)
2682
+
2683
+
2684
+ def kill_process_tree(pid: int):
2685
+ """
2686
+ Kills all descendant processes of the given pid by sending SIGKILL.
2687
+
2688
+ Args:
2689
+ pid (int): Process ID of the parent process
2690
+ """
2691
+ try:
2692
+ parent = psutil.Process(pid)
2693
+ except psutil.NoSuchProcess:
2694
+ return
2695
+
2696
+ # Get all children recursively
2697
+ children = parent.children(recursive=True)
2698
+
2699
+ # Send SIGKILL to all children first
2700
+ for child in children:
2701
+ with contextlib.suppress(ProcessLookupError):
2702
+ os.kill(child.pid, signal.SIGKILL)
2703
+
2704
+ # Finally kill the parent
2705
+ with contextlib.suppress(ProcessLookupError):
2706
+ os.kill(pid, signal.SIGKILL)
2707
+
2708
+
2709
+ @dataclass
2710
+ class MemorySnapshot:
2711
+ """Memory snapshot."""
2712
+ torch_peak: int = 0
2713
+ free_memory: int = 0
2714
+ total_memory: int = 0
2715
+ cuda_memory: int = 0
2716
+ torch_memory: int = 0
2717
+ non_torch_memory: int = 0
2718
+ timestamp: float = 0.0
2719
+ auto_measure: bool = True
2720
+
2721
+ def __post_init__(self):
2722
+ if self.auto_measure:
2723
+ self.measure()
2724
+
2725
+ def measure(self):
2726
+ # we measure the torch peak memory usage via allocated_bytes,
2727
+ # rather than `torch.cuda.memory_reserved()` .
2728
+ # After `torch.cuda.reset_peak_memory_stats()`,
2729
+ # `torch.cuda.memory_reserved()` will keep growing, and only shrink
2730
+ # when we call `torch.cuda.empty_cache()` or OOM happens.
2731
+ self.torch_peak = torch.cuda.memory_stats().get(
2732
+ "allocated_bytes.all.peak", 0)
2733
+
2734
+ self.free_memory, self.total_memory = torch.cuda.mem_get_info()
2735
+ self.cuda_memory = self.total_memory - self.free_memory
2736
+
2737
+ # torch.cuda.memory_reserved() is how many bytes
2738
+ # PyTorch gets from cuda (by calling cudaMalloc, etc.)
2739
+ # this is used to measure the non-torch memory usage
2740
+ self.torch_memory = torch.cuda.memory_reserved()
2741
+
2742
+ self.non_torch_memory = self.cuda_memory - self.torch_memory
2743
+ self.timestamp = time.time()
2744
+
2745
+ def __sub__(self, other: MemorySnapshot) -> MemorySnapshot:
2746
+ return MemorySnapshot(
2747
+ torch_peak=self.torch_peak - other.torch_peak,
2748
+ free_memory=self.free_memory - other.free_memory,
2749
+ total_memory=self.total_memory - other.total_memory,
2750
+ cuda_memory=self.cuda_memory - other.cuda_memory,
2751
+ torch_memory=self.torch_memory - other.torch_memory,
2752
+ non_torch_memory=self.non_torch_memory - other.non_torch_memory,
2753
+ timestamp=self.timestamp - other.timestamp,
2754
+ auto_measure=False,
2755
+ )
2756
+
2757
+
2758
+ @dataclass
2759
+ class MemoryProfilingResult:
2760
+ """Memory profiling result. All numbers are in bytes.
2761
+ """
2762
+ non_kv_cache_memory: int = 0
2763
+ torch_peak_increase: int = 0
2764
+ non_torch_increase: int = 0
2765
+ weights_memory: float = 0
2766
+ before_create: MemorySnapshot = field(default_factory=MemorySnapshot)
2767
+ before_profile: MemorySnapshot = field(default_factory=MemorySnapshot)
2768
+ after_profile: MemorySnapshot = field(default_factory=MemorySnapshot)
2769
+ profile_time: float = 0.0
2770
+
2771
+ def __repr__(self) -> str:
2772
+ return (f"Memory profiling takes {self.profile_time:.2f} seconds. "
2773
+ f"Total non KV cache memory: "
2774
+ f"{(self.non_kv_cache_memory / GiB_bytes):.2f}GiB; "
2775
+ f"torch peak memory increase: "
2776
+ f"{(self.torch_peak_increase / GiB_bytes):.2f}GiB; "
2777
+ f"non-torch forward increase memory: "
2778
+ f"{(self.non_torch_increase / GiB_bytes):.2f}GiB; "
2779
+ f"weights memory: {(self.weights_memory / GiB_bytes):.2f}GiB.")
2780
+
2781
+
2782
+ @contextlib.contextmanager
2783
+ def memory_profiling(
2784
+ baseline_snapshot: MemorySnapshot,
2785
+ weights_memory: int) -> Generator[MemoryProfilingResult, None, None]:
2786
+ """Memory profiling context manager.
2787
+ baseline_snapshot: the memory snapshot before the current vLLM instance.
2788
+ weights_memory: memory used by PyTorch when loading the model weights.
2789
+ Note that, before loading the model weights, we also initialize the device
2790
+ and distributed environment, which may consume some memory. This part is not
2791
+ included in the weights_memory because PyTorch does not control it.
2792
+
2793
+ The memory in one GPU can be classified into 3 categories:
2794
+ 1. memory used by anything other than the current vLLM instance.
2795
+ 2. memory used by torch in the current vLLM instance.
2796
+ 3. memory used in the current vLLM instance, but not by torch.
2797
+
2798
+ A quantitive example:
2799
+
2800
+ Before creating the current vLLM instance:
2801
+ category 1: 1 GiB
2802
+ category 2: 0 GiB
2803
+ category 3: 0 GiB
2804
+
2805
+ After creating the current vLLM instance and loading the model,
2806
+ (i.e. before profiling):
2807
+ category 1: 1 GiB
2808
+ category 2: 2 GiB (model weights take 2 GiB)
2809
+ category 3: 0.5 GiB (memory used by NCCL)
2810
+
2811
+ During profiling (peak):
2812
+ category 1: 1 GiB
2813
+ category 2: 4 GiB (peak activation tensors take 2 GiB)
2814
+ category 3: 1 GiB (memory used by NCCL + buffers for some attention backends)
2815
+
2816
+ After profiling:
2817
+ category 1: 1 GiB
2818
+ category 2: 3 GiB (after garbage-collecting activation tensors)
2819
+ category 3: 1 GiB (memory used by NCCL + buffers for some attention backends)
2820
+
2821
+ In this case, non-kv cache takes 5 GiB in total, including:
2822
+ a. 2 GiB used by the model weights (category 2)
2823
+ b. 2 GiB reserved for the peak activation tensors (category 2)
2824
+ c. 1 GiB used by non-torch components (category 3)
2825
+
2826
+ The memory used for loading weights (a.) is directly given from the argument `weights_memory`.
2827
+
2828
+ The increase of `torch.cuda.memory_stats()["allocated_bytes.all.peak"]` during profiling gives (b.).
2829
+
2830
+ The increase of `non_torch_memory` from creating the current vLLM instance until after profiling to get (c.).
2831
+ """ # noqa
2832
+ gc.collect()
2833
+ torch.cuda.empty_cache()
2834
+ torch.cuda.reset_peak_memory_stats()
2835
+
2836
+ result = MemoryProfilingResult()
2837
+
2838
+ result.before_create = baseline_snapshot
2839
+ # the part of memory used for holding the model weights
2840
+ result.weights_memory = weights_memory
2841
+
2842
+ result.before_profile.measure()
2843
+
2844
+ yield result
2845
+
2846
+ gc.collect()
2847
+ torch.cuda.empty_cache()
2848
+
2849
+ result.after_profile.measure()
2850
+
2851
+ diff_profile = result.after_profile - result.before_profile
2852
+ diff_from_create = result.after_profile - result.before_create
2853
+ result.torch_peak_increase = diff_profile.torch_peak
2854
+ result.non_torch_increase = diff_from_create.non_torch_memory
2855
+ result.profile_time = diff_profile.timestamp
2856
+
2857
+ non_torch_memory = result.non_torch_increase
2858
+ peak_activation_memory = result.torch_peak_increase
2859
+ result.non_kv_cache_memory = non_torch_memory + peak_activation_memory + result.weights_memory # noqa
2860
+
2861
+
2862
+ # Adapted from: https://github.com/sgl-project/sglang/blob/v0.4.1/python/sglang/srt/utils.py#L630 # noqa: E501
2863
+ def set_ulimit(target_soft_limit=65535):
2864
+ if sys.platform.startswith('win'):
2865
+ logger.info("Windows detected, skipping ulimit adjustment.")
2866
+ return
2867
+
2868
+ import resource
2869
+ resource_type = resource.RLIMIT_NOFILE
2870
+ current_soft, current_hard = resource.getrlimit(resource_type)
2871
+
2872
+ if current_soft < target_soft_limit:
2873
+ try:
2874
+ resource.setrlimit(resource_type,
2875
+ (target_soft_limit, current_hard))
2876
+ except ValueError as e:
2877
+ logger.warning(
2878
+ "Found ulimit of %s and failed to automatically increase "
2879
+ "with error %s. This can cause fd limit errors like "
2880
+ "`OSError: [Errno 24] Too many open files`. Consider "
2881
+ "increasing with ulimit -n", current_soft, e)
2882
+
2883
+
2884
+ # Adapted from: https://github.com/sgl-project/sglang/blob/v0.4.1/python/sglang/utils.py#L28 # noqa: E501
2885
+ def get_exception_traceback():
2886
+ etype, value, tb = sys.exc_info()
2887
+ err_str = "".join(traceback.format_exception(etype, value, tb))
2888
+ return err_str
2889
+
2890
+
2891
+ def split_zmq_path(path: str) -> tuple[str, str, str]:
2892
+ """Split a zmq path into its parts."""
2893
+ parsed = urlparse(path)
2894
+ if not parsed.scheme:
2895
+ raise ValueError(f"Invalid zmq path: {path}")
2896
+
2897
+ scheme = parsed.scheme
2898
+ host = parsed.hostname or ""
2899
+ port = str(parsed.port or "")
2900
+
2901
+ if scheme == "tcp" and not all((host, port)):
2902
+ # The host and port fields are required for tcp
2903
+ raise ValueError(f"Invalid zmq path: {path}")
2904
+
2905
+ if scheme != "tcp" and port:
2906
+ # port only makes sense with tcp
2907
+ raise ValueError(f"Invalid zmq path: {path}")
2908
+
2909
+ return scheme, host, port
2910
+
2911
+
2912
+ def make_zmq_path(scheme: str, host: str, port: Optional[int] = None) -> str:
2913
+ """Make a ZMQ path from its parts.
2914
+
2915
+ Args:
2916
+ scheme: The ZMQ transport scheme (e.g. tcp, ipc, inproc).
2917
+ host: The host - can be an IPv4 address, IPv6 address, or hostname.
2918
+ port: Optional port number, only used for TCP sockets.
2919
+
2920
+ Returns:
2921
+ A properly formatted ZMQ path string.
2922
+ """
2923
+ if port is None:
2924
+ return f"{scheme}://{host}"
2925
+ if is_valid_ipv6_address(host):
2926
+ return f"{scheme}://[{host}]:{port}"
2927
+ return f"{scheme}://{host}:{port}"
2928
+
2929
+
2930
+ # Adapted from: https://github.com/sgl-project/sglang/blob/v0.4.1/python/sglang/srt/utils.py#L783 # noqa: E501
2931
+ def make_zmq_socket(
2932
+ ctx: Union[zmq.asyncio.Context, zmq.Context], # type: ignore[name-defined]
2933
+ path: str,
2934
+ socket_type: Any,
2935
+ bind: Optional[bool] = None,
2936
+ identity: Optional[bytes] = None,
2937
+ linger: Optional[int] = None,
2938
+ ) -> Union[zmq.Socket, zmq.asyncio.Socket]: # type: ignore[name-defined]
2939
+ """Make a ZMQ socket with the proper bind/connect semantics."""
2940
+
2941
+ mem = psutil.virtual_memory()
2942
+ socket = ctx.socket(socket_type)
2943
+
2944
+ # Calculate buffer size based on system memory
2945
+ total_mem = mem.total / 1024**3
2946
+ available_mem = mem.available / 1024**3
2947
+ # For systems with substantial memory (>32GB total, >16GB available):
2948
+ # - Set a large 0.5GB buffer to improve throughput
2949
+ # For systems with less memory:
2950
+ # - Use system default (-1) to avoid excessive memory consumption
2951
+ if total_mem > 32 and available_mem > 16:
2952
+ buf_size = int(0.5 * 1024**3) # 0.5GB in bytes
2953
+ else:
2954
+ buf_size = -1 # Use system default buffer size
2955
+
2956
+ if bind is None:
2957
+ bind = socket_type not in (zmq.PUSH, zmq.SUB, zmq.XSUB)
2958
+
2959
+ if socket_type in (zmq.PULL, zmq.DEALER, zmq.ROUTER):
2960
+ socket.setsockopt(zmq.RCVHWM, 0)
2961
+ socket.setsockopt(zmq.RCVBUF, buf_size)
2962
+
2963
+ if socket_type in (zmq.PUSH, zmq.DEALER, zmq.ROUTER):
2964
+ socket.setsockopt(zmq.SNDHWM, 0)
2965
+ socket.setsockopt(zmq.SNDBUF, buf_size)
2966
+
2967
+ if identity is not None:
2968
+ socket.setsockopt(zmq.IDENTITY, identity)
2969
+
2970
+ if linger is not None:
2971
+ socket.setsockopt(zmq.LINGER, linger)
2972
+
2973
+ if socket_type == zmq.XPUB:
2974
+ socket.setsockopt(zmq.XPUB_VERBOSE, True)
2975
+
2976
+ # Determine if the path is a TCP socket with an IPv6 address.
2977
+ # Enable IPv6 on the zmq socket if so.
2978
+ scheme, host, _ = split_zmq_path(path)
2979
+ if scheme == "tcp" and is_valid_ipv6_address(host):
2980
+ socket.setsockopt(zmq.IPV6, 1)
2981
+
2982
+ if bind:
2983
+ socket.bind(path)
2984
+ else:
2985
+ socket.connect(path)
2986
+
2987
+ return socket
2988
+
2989
+
2990
+ @contextlib.contextmanager
2991
+ def zmq_socket_ctx(
2992
+ path: str,
2993
+ socket_type: Any,
2994
+ bind: Optional[bool] = None,
2995
+ linger: int = 0,
2996
+ identity: Optional[bytes] = None,
2997
+ ) -> Iterator[zmq.Socket]:
2998
+ """Context manager for a ZMQ socket"""
2999
+
3000
+ ctx = zmq.Context() # type: ignore[attr-defined]
3001
+ try:
3002
+ yield make_zmq_socket(ctx,
3003
+ path,
3004
+ socket_type,
3005
+ bind=bind,
3006
+ identity=identity)
3007
+ except KeyboardInterrupt:
3008
+ logger.debug("Got Keyboard Interrupt.")
3009
+
3010
+ finally:
3011
+ ctx.destroy(linger=linger)
3012
+
3013
+
3014
+ def _maybe_force_spawn():
3015
+ """Check if we need to force the use of the `spawn` multiprocessing start
3016
+ method.
3017
+ """
3018
+ if os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") == "spawn":
3019
+ return
3020
+
3021
+ reasons = []
3022
+ if is_in_ray_actor():
3023
+ # even if we choose to spawn, we need to pass the ray address
3024
+ # to the subprocess so that it knows how to connect to the ray cluster.
3025
+ # env vars are inherited by subprocesses, even if we use spawn.
3026
+ import ray
3027
+ os.environ["RAY_ADDRESS"] = ray.get_runtime_context().gcs_address
3028
+ reasons.append("In a Ray actor and can only be spawned")
3029
+
3030
+ if cuda_is_initialized():
3031
+ reasons.append("CUDA is initialized")
3032
+ elif xpu_is_initialized():
3033
+ reasons.append("XPU is initialized")
3034
+
3035
+ if reasons:
3036
+ logger.warning(
3037
+ "We must use the `spawn` multiprocessing start method. "
3038
+ "Overriding VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. "
3039
+ "See https://docs.vllm.ai/en/latest/usage/"
3040
+ "troubleshooting.html#python-multiprocessing "
3041
+ "for more information. Reasons: %s", "; ".join(reasons))
3042
+ os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
3043
+
3044
+
3045
+ def get_mp_context():
3046
+ """Get a multiprocessing context with a particular method (spawn or fork).
3047
+ By default we follow the value of the VLLM_WORKER_MULTIPROC_METHOD to
3048
+ determine the multiprocessing method (default is fork). However, under
3049
+ certain conditions, we may enforce spawn and override the value of
3050
+ VLLM_WORKER_MULTIPROC_METHOD.
3051
+ """
3052
+ _maybe_force_spawn()
3053
+ mp_method = envs.VLLM_WORKER_MULTIPROC_METHOD
3054
+ return multiprocessing.get_context(mp_method)
3055
+
3056
+
3057
+ def bind_kv_cache(
3058
+ ctx: dict[str, Any],
3059
+ kv_cache: list[list[torch.Tensor]], # [virtual_engine][layer_index]
3060
+ shared_kv_cache_layers: Optional[dict[str, str]] = None
3061
+ ) -> None:
3062
+ # Bind the kv_cache tensor to Attention modules, similar to
3063
+ # ctx[layer_name].kv_cache[ve]=kv_cache[ve][extract_layer_index(layer_name)]
3064
+ # Special things handled here:
3065
+ # 1. Some models have non-attention layers, e.g., Jamba
3066
+ # 2. Pipeline parallelism, each rank only has a subset of layers
3067
+ # 3. Encoder attention has no kv cache
3068
+ # 4. Encoder-decoder models, encoder-decoder attention and decoder-only
3069
+ # attention of the same layer (e.g., bart's decoder.layers.1.self_attn
3070
+ # and decoder.layers.1.encoder_attn) is mapped to the same kv cache
3071
+ # tensor
3072
+ # 5. Some models have attention layers that share kv cache with previous
3073
+ # layers, this is specified through shared_kv_cache_layers
3074
+ if shared_kv_cache_layers is None:
3075
+ shared_kv_cache_layers = {}
3076
+ from vllm.attention import AttentionType
3077
+ from vllm.model_executor.models.utils import extract_layer_index
3078
+ layer_need_kv_cache = [
3079
+ layer_name for layer_name in ctx
3080
+ if (hasattr(ctx[layer_name], 'attn_type') and ctx[layer_name].attn_type
3081
+ in (AttentionType.DECODER, AttentionType.ENCODER_DECODER)) \
3082
+ and ctx[layer_name].kv_sharing_target_layer_name is None
3083
+ ]
3084
+ layer_index_sorted = sorted(
3085
+ set(
3086
+ extract_layer_index(layer_name)
3087
+ for layer_name in layer_need_kv_cache))
3088
+ for layer_name in layer_need_kv_cache:
3089
+ kv_cache_idx = layer_index_sorted.index(
3090
+ extract_layer_index(layer_name))
3091
+ forward_ctx = ctx[layer_name]
3092
+ assert len(forward_ctx.kv_cache) == len(kv_cache)
3093
+ for ve, ve_kv_cache in enumerate(kv_cache):
3094
+ forward_ctx.kv_cache[ve] = ve_kv_cache[kv_cache_idx]
3095
+ if shared_kv_cache_layers is not None:
3096
+ for layer_name, target_layer_name in shared_kv_cache_layers.items():
3097
+ assert extract_layer_index(target_layer_name) < \
3098
+ extract_layer_index(layer_name), \
3099
+ "v0 doesn't support interleaving kv sharing"
3100
+ ctx[layer_name].kv_cache = ctx[target_layer_name].kv_cache
3101
+
3102
+
3103
+ def run_method(obj: Any, method: Union[str, bytes, Callable], args: tuple[Any],
3104
+ kwargs: dict[str, Any]) -> Any:
3105
+ """
3106
+ Run a method of an object with the given arguments and keyword arguments.
3107
+ If the method is string, it will be converted to a method using getattr.
3108
+ If the method is serialized bytes and will be deserialized using
3109
+ cloudpickle.
3110
+ If the method is a callable, it will be called directly.
3111
+ """
3112
+ if isinstance(method, bytes):
3113
+ func = partial(cloudpickle.loads(method), obj)
3114
+ elif isinstance(method, str):
3115
+ try:
3116
+ func = getattr(obj, method)
3117
+ except AttributeError:
3118
+ raise NotImplementedError(f"Method {method!r} is not"
3119
+ " implemented.") from None
3120
+ else:
3121
+ func = partial(method, obj) # type: ignore
3122
+ return func(*args, **kwargs)
3123
+
3124
+
3125
+ def import_pynvml():
3126
+ """
3127
+ Historical comments:
3128
+
3129
+ libnvml.so is the library behind nvidia-smi, and
3130
+ pynvml is a Python wrapper around it. We use it to get GPU
3131
+ status without initializing CUDA context in the current process.
3132
+ Historically, there are two packages that provide pynvml:
3133
+ - `nvidia-ml-py` (https://pypi.org/project/nvidia-ml-py/): The official
3134
+ wrapper. It is a dependency of vLLM, and is installed when users
3135
+ install vLLM. It provides a Python module named `pynvml`.
3136
+ - `pynvml` (https://pypi.org/project/pynvml/): An unofficial wrapper.
3137
+ Prior to version 12.0, it also provides a Python module `pynvml`,
3138
+ and therefore conflicts with the official one. What's worse,
3139
+ the module is a Python package, and has higher priority than
3140
+ the official one which is a standalone Python file.
3141
+ This causes errors when both of them are installed.
3142
+ Starting from version 12.0, it migrates to a new module
3143
+ named `pynvml_utils` to avoid the conflict.
3144
+ It is so confusing that many packages in the community use the
3145
+ unofficial one by mistake, and we have to handle this case.
3146
+ For example, `nvcr.io/nvidia/pytorch:24.12-py3` uses the unofficial
3147
+ one, and it will cause errors, see the issue
3148
+ https://github.com/vllm-project/vllm/issues/12847 for example.
3149
+ After all the troubles, we decide to copy the official `pynvml`
3150
+ module to our codebase, and use it directly.
3151
+ """
3152
+ import vllm.third_party.pynvml as pynvml
3153
+ return pynvml
3154
+
3155
+
3156
+ def warn_for_unimplemented_methods(cls: type[T]) -> type[T]:
3157
+ """
3158
+ A replacement for `abc.ABC`.
3159
+ When we use `abc.ABC`, subclasses will fail to instantiate
3160
+ if they do not implement all abstract methods.
3161
+ Here, we only require `raise NotImplementedError` in the
3162
+ base class, and log a warning if the method is not implemented
3163
+ in the subclass.
3164
+ """
3165
+
3166
+ original_init = cls.__init__
3167
+
3168
+ def find_unimplemented_methods(self: object):
3169
+ unimplemented_methods = []
3170
+ for attr_name in dir(self):
3171
+ # bypass inner method
3172
+ if attr_name.startswith('_'):
3173
+ continue
3174
+
3175
+ try:
3176
+ attr = getattr(self, attr_name)
3177
+ # get the func of callable method
3178
+ if callable(attr):
3179
+ attr_func = attr.__func__
3180
+ except AttributeError:
3181
+ continue
3182
+ src = inspect.getsource(attr_func)
3183
+ if "NotImplementedError" in src:
3184
+ unimplemented_methods.append(attr_name)
3185
+ if unimplemented_methods:
3186
+ method_names = ','.join(unimplemented_methods)
3187
+ msg = (f"Methods {method_names} not implemented in {self}")
3188
+ logger.debug(msg)
3189
+
3190
+ @wraps(original_init)
3191
+ def wrapped_init(self, *args, **kwargs) -> None:
3192
+ original_init(self, *args, **kwargs)
3193
+ find_unimplemented_methods(self)
3194
+
3195
+ type.__setattr__(cls, '__init__', wrapped_init)
3196
+ return cls
3197
+
3198
+
3199
+ class LazyLoader(types.ModuleType):
3200
+ """
3201
+ LazyLoader module borrowed from Tensorflow
3202
+ https://github.com/tensorflow/tensorflow/blob/main/tensorflow/python/util/lazy_loader.py
3203
+ with an addition of "module caching".
3204
+
3205
+ Lazily import a module, mainly to avoid pulling in large dependencies.
3206
+ Modules such as `xgrammar` might do additional side effects, so we
3207
+ only want to use this when it is needed, delaying all eager effects
3208
+ """
3209
+
3210
+ def __init__(
3211
+ self,
3212
+ local_name: str,
3213
+ parent_module_globals: dict[str, Any],
3214
+ name: str,
3215
+ ):
3216
+ self._local_name = local_name
3217
+ self._parent_module_globals = parent_module_globals
3218
+ self._module: types.ModuleType | None = None
3219
+
3220
+ super().__init__(str(name))
3221
+
3222
+ def _load(self) -> types.ModuleType:
3223
+ # Import the target module and insert it into the parent's namespace
3224
+ try:
3225
+ module = importlib.import_module(self.__name__)
3226
+ self._parent_module_globals[self._local_name] = module
3227
+ # The additional add to sys.modules
3228
+ # ensures library is actually loaded.
3229
+ sys.modules[self._local_name] = module
3230
+ except ModuleNotFoundError as err:
3231
+ raise err from None
3232
+
3233
+ # Update this object's dict so that if someone keeps a
3234
+ # reference to the LazyLoader, lookups are efficient
3235
+ # (__getattr__ is only called on lookups that fail).
3236
+ self.__dict__.update(module.__dict__)
3237
+ return module
3238
+
3239
+ def __getattr__(self, item: Any) -> Any:
3240
+ if self._module is None:
3241
+ self._module = self._load()
3242
+ return getattr(self._module, item)
3243
+
3244
+ def __dir__(self) -> list[str]:
3245
+ if self._module is None:
3246
+ self._module = self._load()
3247
+ return dir(self._module)
3248
+
3249
+
3250
+ def swap_dict_values(obj: dict[_K, _V], key1: _K, key2: _K) -> None:
3251
+ """
3252
+ Helper function to swap values for two keys
3253
+ """
3254
+ v1 = obj.get(key1)
3255
+ v2 = obj.get(key2)
3256
+ if v1 is not None:
3257
+ obj[key2] = v1
3258
+ else:
3259
+ obj.pop(key2, None)
3260
+ if v2 is not None:
3261
+ obj[key1] = v2
3262
+ else:
3263
+ obj.pop(key1, None)
3264
+
3265
+
3266
+ @contextlib.contextmanager
3267
+ def cprofile_context(save_file: Optional[str] = None):
3268
+ """Run a cprofile
3269
+
3270
+ Args:
3271
+ save_file: path to save the profile result. "1" or
3272
+ None will result in printing to stdout.
3273
+ """
3274
+ import cProfile
3275
+
3276
+ prof = cProfile.Profile()
3277
+ prof.enable()
3278
+
3279
+ try:
3280
+ yield
3281
+ finally:
3282
+ prof.disable()
3283
+ if save_file and save_file != "1":
3284
+ prof.dump_stats(save_file)
3285
+ else:
3286
+ prof.print_stats(sort="cumtime")
3287
+
3288
+
3289
+ def cprofile(save_file: Optional[str] = None, enabled: bool = True):
3290
+ """Decorator to profile a Python method using cProfile.
3291
+
3292
+ Args:
3293
+ save_file: Path to save the profile result.
3294
+ If "1", None, or "", results will be printed to stdout.
3295
+ enabled: Set to false to turn this into a no-op
3296
+ """
3297
+
3298
+ def decorator(func: Callable):
3299
+
3300
+ @wraps(func)
3301
+ def wrapper(*args, **kwargs):
3302
+ if not enabled:
3303
+ # If profiling is disabled, just call the function directly.
3304
+ return func(*args, **kwargs)
3305
+
3306
+ with cprofile_context(save_file):
3307
+ return func(*args, **kwargs)
3308
+
3309
+ return wrapper
3310
+
3311
+ return decorator
3312
+
3313
+
3314
+ # Only relevant for models using ALiBi (e.g, MPT)
3315
+ def check_use_alibi(model_config: ModelConfig) -> bool:
3316
+ cfg = model_config.hf_text_config
3317
+ return (getattr(cfg, "alibi", False) # Falcon
3318
+ or ("BloomForCausalLM" in getattr(model_config.hf_config,
3319
+ "architectures", [])) # Bloom
3320
+ or getattr(cfg, "position_encoding_type", "") ==
3321
+ "alibi" # codellm_1b_alibi
3322
+ or (hasattr(cfg, "attn_config") # MPT
3323
+ and ((isinstance(cfg.attn_config, dict)
3324
+ and cfg.attn_config.get("alibi", False)) or
3325
+ (not isinstance(cfg.attn_config, dict)
3326
+ and getattr(cfg.attn_config, "alibi", False)))))
3327
+
3328
+
3329
+ def sha256(input: Any) -> bytes:
3330
+ """Hash any picklable Python object using SHA-256.
3331
+
3332
+ The input is serialized using pickle before hashing, which allows
3333
+ arbitrary Python objects to be used. Note that this function does
3334
+ not use a hash seed—if you need one, prepend it explicitly to the input.
3335
+
3336
+ Args:
3337
+ input: Any picklable Python object.
3338
+
3339
+ Returns:
3340
+ Bytes representing the SHA-256 hash of the serialized input.
3341
+ """
3342
+ input_bytes = pickle.dumps(input, protocol=pickle.HIGHEST_PROTOCOL)
3343
+ return hashlib.sha256(input_bytes).digest()
3344
+
3345
+
3346
+ def sha256_cbor(input: Any) -> bytes:
3347
+ """
3348
+ Hash objects using CBOR serialization and SHA-256.
3349
+
3350
+ This option is useful for non-Python-dependent serialization and hashing.
3351
+
3352
+ Args:
3353
+ input: Object to be serialized and hashed. Supported types include
3354
+ basic Python types and complex structures like lists, tuples, and
3355
+ dictionaries.
3356
+ Custom classes must implement CBOR serialization methods.
3357
+
3358
+ Returns:
3359
+ Bytes representing the SHA-256 hash of the CBOR serialized input.
3360
+ """
3361
+ input_bytes = cbor2.dumps(input, canonical=True)
3362
+ return hashlib.sha256(input_bytes).digest()
3363
+
3364
+
3365
+ def get_hash_fn_by_name(hash_fn_name: str) -> Callable[[Any], bytes]:
3366
+ """Get a hash function by name, or raise an error if
3367
+ the function is not found.
3368
+ Args:
3369
+ hash_fn_name: Name of the hash function.
3370
+ Returns:
3371
+ A hash function.
3372
+ """
3373
+ if hash_fn_name == "sha256":
3374
+ return sha256
3375
+ if hash_fn_name == "sha256_cbor":
3376
+ return sha256_cbor
3377
+
3378
+ raise ValueError(f"Unsupported hash function: {hash_fn_name}")
3379
+
3380
+
3381
+ def is_torch_equal_or_newer(target: str) -> bool:
3382
+ """Check if the installed torch version is >= the target version.
3383
+
3384
+ Args:
3385
+ target: a version string, like "2.6.0".
3386
+
3387
+ Returns:
3388
+ Whether the condition meets.
3389
+ """
3390
+ try:
3391
+ return _is_torch_equal_or_newer(str(torch.__version__), target)
3392
+ except Exception:
3393
+ # Fallback to PKG-INFO to load the package info, needed by the doc gen.
3394
+ return Version(importlib.metadata.version('torch')) >= Version(target)
3395
+
3396
+
3397
+ # Helper function used in testing.
3398
+ def _is_torch_equal_or_newer(torch_version: str, target: str) -> bool:
3399
+ torch_version = version.parse(torch_version)
3400
+ return torch_version >= version.parse(target)
3401
+
3402
+
3403
+ @cache
3404
+ def _has_module(module_name: str) -> bool:
3405
+ """Return True if *module_name* can be found in the current environment.
3406
+
3407
+ The result is cached so that subsequent queries for the same module incur
3408
+ no additional overhead.
3409
+ """
3410
+ return importlib.util.find_spec(module_name) is not None
3411
+
3412
+
3413
+ def has_pplx() -> bool:
3414
+ """Whether the optional `pplx_kernels` package is available."""
3415
+
3416
+ return _has_module("pplx_kernels")
3417
+
3418
+
3419
+ def has_deep_ep() -> bool:
3420
+ """Whether the optional `deep_ep` package is available."""
3421
+
3422
+ return _has_module("deep_ep")
3423
+
3424
+
3425
+ def has_deep_gemm() -> bool:
3426
+ """Whether the optional `deep_gemm` package is available."""
3427
+
3428
+ return _has_module("deep_gemm")
3429
+
3430
+
3431
+ def has_triton_kernels() -> bool:
3432
+ """Whether the optional `triton_kernels` package is available."""
3433
+
3434
+ return _has_module("triton_kernels")
3435
+
3436
+
3437
+ def has_tilelang() -> bool:
3438
+ """Whether the optional `tilelang` package is available."""
3439
+
3440
+ return _has_module("tilelang")
3441
+
3442
+
3443
+ def set_process_title(name: str,
3444
+ suffix: str = "",
3445
+ prefix: str = envs.VLLM_PROCESS_NAME_PREFIX) -> None:
3446
+ """
3447
+ Set the current process title to a specific name with an
3448
+ optional suffix.
3449
+
3450
+ Args:
3451
+ name: The title to assign to the current process.
3452
+ suffix: An optional suffix to append to the base name.
3453
+ prefix: A prefix to prepend to the front separated by `::`.
3454
+ """
3455
+ if suffix:
3456
+ name = f"{name}_{suffix}"
3457
+ setproctitle.setproctitle(f"{prefix}::{name}")
3458
+
3459
+
3460
+ def _add_prefix(file: TextIO, worker_name: str, pid: int) -> None:
3461
+ """Prepend each output line with process-specific prefix"""
3462
+
3463
+ prefix = f"{CYAN}({worker_name} pid={pid}){RESET} "
3464
+ file_write = file.write
3465
+
3466
+ def write_with_prefix(s: str):
3467
+ if not s:
3468
+ return
3469
+ if file.start_new_line: # type: ignore[attr-defined]
3470
+ file_write(prefix)
3471
+ idx = 0
3472
+ while (next_idx := s.find('\n', idx)) != -1:
3473
+ next_idx += 1
3474
+ file_write(s[idx:next_idx])
3475
+ if next_idx == len(s):
3476
+ file.start_new_line = True # type: ignore[attr-defined]
3477
+ return
3478
+ file_write(prefix)
3479
+ idx = next_idx
3480
+ file_write(s[idx:])
3481
+ file.start_new_line = False # type: ignore[attr-defined]
3482
+
3483
+ file.start_new_line = True # type: ignore[attr-defined]
3484
+ file.write = write_with_prefix # type: ignore[method-assign]
3485
+
3486
+
3487
+ def decorate_logs(process_name: Optional[str] = None) -> None:
3488
+ """
3489
+ Adds a process-specific prefix to each line of output written to stdout and
3490
+ stderr.
3491
+
3492
+ This function is intended to be called before initializing the api_server,
3493
+ engine_core, or worker classes, so that all subsequent output from the
3494
+ process is prefixed with the process name and PID. This helps distinguish
3495
+ log output from different processes in multi-process environments.
3496
+
3497
+ Args:
3498
+ process_name: Optional; the name of the process to use in the prefix.
3499
+ If not provided, the current process name from the multiprocessing
3500
+ context is used.
3501
+ """
3502
+ if process_name is None:
3503
+ process_name = get_mp_context().current_process().name
3504
+ pid = os.getpid()
3505
+ _add_prefix(sys.stdout, process_name, pid)
3506
+ _add_prefix(sys.stderr, process_name, pid)
3507
+
3508
+
3509
+ def length_from_prompt_token_ids_or_embeds(
3510
+ prompt_token_ids: Optional[list[int]],
3511
+ prompt_embeds: Optional[torch.Tensor],
3512
+ ) -> int:
3513
+ """Calculate the request length (in number of tokens) give either
3514
+ prompt_token_ids or prompt_embeds.
3515
+ """
3516
+ prompt_token_len = None if prompt_token_ids is None else len(
3517
+ prompt_token_ids)
3518
+ prompt_embeds_len = \
3519
+ None if prompt_embeds is None else len(prompt_embeds)
3520
+
3521
+ if prompt_token_len is None:
3522
+ if prompt_embeds_len is None:
3523
+ raise ValueError(
3524
+ "Neither prompt_token_ids nor prompt_embeds were defined.")
3525
+ return prompt_embeds_len
3526
+ else:
3527
+ if (prompt_embeds_len is not None
3528
+ and prompt_embeds_len != prompt_token_len):
3529
+ raise ValueError(
3530
+ "Prompt token ids and prompt embeds had different lengths"
3531
+ f" prompt_token_ids={prompt_token_len}"
3532
+ f" prompt_embeds={prompt_embeds_len}")
3533
+ return prompt_token_len
3534
+
3535
+
3536
+ @contextlib.contextmanager
3537
+ def set_env_var(key, value):
3538
+ old = os.environ.get(key)
3539
+ os.environ[key] = value
3540
+ try:
3541
+ yield
3542
+ finally:
3543
+ if old is None:
3544
+ del os.environ[key]
3545
+ else:
3546
+ os.environ[key] = old
3547
+
3548
+
3549
+ def unique_filepath(fn: Callable[[int], Path]) -> Path:
3550
+ """
3551
+ unique_filepath returns a unique path by trying
3552
+ to include an integer in increasing order.
3553
+
3554
+ fn should be a callable that returns a path that
3555
+ includes the passed int at a fixed location.
3556
+
3557
+ Note: This function has a TOCTOU race condition.
3558
+ Caller should use atomic operations (e.g., open with 'x' mode)
3559
+ when creating the file to ensure thread safety.
3560
+ """
3561
+ i = 0
3562
+ while True:
3563
+ p = fn(i)
3564
+ if not p.exists():
3565
+ return p
3566
+ i += 1