vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
vllm/envs.py ADDED
@@ -0,0 +1,1590 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import hashlib
5
+ import json
6
+ import os
7
+ import sys
8
+ import tempfile
9
+ from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union
10
+
11
+ if TYPE_CHECKING:
12
+ VLLM_HOST_IP: str = ""
13
+ VLLM_PORT: Optional[int] = None
14
+ VLLM_RPC_BASE_PATH: str = tempfile.gettempdir()
15
+ VLLM_USE_MODELSCOPE: bool = False
16
+ VLLM_RINGBUFFER_WARNING_INTERVAL: int = 60
17
+ VLLM_NCCL_SO_PATH: Optional[str] = None
18
+ LD_LIBRARY_PATH: Optional[str] = None
19
+ VLLM_USE_TRITON_FLASH_ATTN: bool = True
20
+ VLLM_V1_USE_PREFILL_DECODE_ATTENTION: bool = False
21
+ VLLM_USE_AITER_UNIFIED_ATTENTION: bool = False
22
+ VLLM_FLASH_ATTN_VERSION: Optional[int] = None
23
+ LOCAL_RANK: int = 0
24
+ CUDA_VISIBLE_DEVICES: Optional[str] = None
25
+ VLLM_ENGINE_ITERATION_TIMEOUT_S: int = 60
26
+ VLLM_API_KEY: Optional[str] = None
27
+ S3_ACCESS_KEY_ID: Optional[str] = None
28
+ S3_SECRET_ACCESS_KEY: Optional[str] = None
29
+ S3_ENDPOINT_URL: Optional[str] = None
30
+ VLLM_MODEL_REDIRECT_PATH: Optional[str] = None
31
+ VLLM_CACHE_ROOT: str = os.path.expanduser("~/.cache/vllm")
32
+ VLLM_CONFIG_ROOT: str = os.path.expanduser("~/.config/vllm")
33
+ VLLM_USAGE_STATS_SERVER: str = "https://stats.vllm.ai"
34
+ VLLM_NO_USAGE_STATS: bool = False
35
+ VLLM_DISABLE_FLASHINFER_PREFILL: bool = False
36
+ VLLM_DO_NOT_TRACK: bool = False
37
+ VLLM_USAGE_SOURCE: str = ""
38
+ VLLM_CONFIGURE_LOGGING: int = 1
39
+ VLLM_LOGGING_LEVEL: str = "INFO"
40
+ VLLM_LOGGING_PREFIX: str = ""
41
+ VLLM_LOGGING_STREAM: str = "ext://sys.stdout"
42
+ VLLM_LOGGING_CONFIG_PATH: Optional[str] = None
43
+ VLLM_LOGITS_PROCESSOR_THREADS: Optional[int] = None
44
+ VLLM_LOG_STATS_INTERVAL: float = 10.
45
+ VLLM_TRACE_FUNCTION: int = 0
46
+ VLLM_ATTENTION_BACKEND: Optional[str] = None
47
+ VLLM_USE_FLASHINFER_SAMPLER: Optional[bool] = None
48
+ VLLM_PP_LAYER_PARTITION: Optional[str] = None
49
+ VLLM_CPU_KVCACHE_SPACE: Optional[int] = 0
50
+ VLLM_CPU_OMP_THREADS_BIND: str = ""
51
+ VLLM_CPU_NUM_OF_RESERVED_CPU: Optional[int] = None
52
+ VLLM_CPU_MOE_PREPACK: bool = True
53
+ VLLM_CPU_SGL_KERNEL: bool = False
54
+ VLLM_XLA_CACHE_PATH: str = os.path.join(VLLM_CACHE_ROOT, "xla_cache")
55
+ VLLM_XLA_CHECK_RECOMPILATION: bool = False
56
+ VLLM_FUSED_MOE_CHUNK_SIZE: int = 64 * 1024
57
+ VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING: bool = True
58
+ VLLM_USE_RAY_SPMD_WORKER: bool = False
59
+ VLLM_USE_RAY_COMPILED_DAG: bool = False
60
+ VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: Literal["auto", "nccl",
61
+ "shm"] = "auto"
62
+ VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM: bool = False
63
+ VLLM_USE_RAY_WRAPPED_PP_COMM: bool = True
64
+ VLLM_XLA_USE_SPMD: bool = False
65
+ VLLM_WORKER_MULTIPROC_METHOD: Literal["fork", "spawn"] = "fork"
66
+ VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets")
67
+ VLLM_ASSETS_CACHE_MODEL_CLEAN: bool = False
68
+ VLLM_IMAGE_FETCH_TIMEOUT: int = 5
69
+ VLLM_VIDEO_FETCH_TIMEOUT: int = 30
70
+ VLLM_AUDIO_FETCH_TIMEOUT: int = 10
71
+ VLLM_MEDIA_URL_ALLOW_REDIRECTS: bool = True
72
+ VLLM_MEDIA_LOADING_THREAD_COUNT: int = 8
73
+ VLLM_MAX_AUDIO_CLIP_FILESIZE_MB: int = 25
74
+ VLLM_VIDEO_LOADER_BACKEND: str = "opencv"
75
+ VLLM_MM_INPUT_CACHE_GIB: int = 4
76
+ VLLM_TARGET_DEVICE: str = "cuda"
77
+ VLLM_MAIN_CUDA_VERSION: str = "12.8"
78
+ MAX_JOBS: Optional[str] = None
79
+ NVCC_THREADS: Optional[str] = None
80
+ VLLM_USE_PRECOMPILED: bool = False
81
+ VLLM_DOCKER_BUILD_CONTEXT: bool = False
82
+ VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL: bool = False
83
+ VLLM_KEEP_ALIVE_ON_ENGINE_DEATH: bool = False
84
+ CMAKE_BUILD_TYPE: Optional[Literal["Debug", "Release",
85
+ "RelWithDebInfo"]] = None
86
+ VERBOSE: bool = False
87
+ VLLM_ALLOW_LONG_MAX_MODEL_LEN: bool = False
88
+ VLLM_RPC_TIMEOUT: int = 10000 # ms
89
+ VLLM_HTTP_TIMEOUT_KEEP_ALIVE: int = 5 # seconds
90
+ VLLM_PLUGINS: Optional[list[str]] = None
91
+ VLLM_LORA_RESOLVER_CACHE_DIR: Optional[str] = None
92
+ VLLM_TORCH_PROFILER_DIR: Optional[str] = None
93
+ VLLM_TORCH_PROFILER_RECORD_SHAPES: bool = False
94
+ VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY: bool = False
95
+ VLLM_TORCH_PROFILER_WITH_STACK: bool = True
96
+ VLLM_TORCH_PROFILER_WITH_FLOPS: bool = False
97
+ VLLM_USE_TRITON_AWQ: bool = False
98
+ VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False
99
+ VLLM_SKIP_P2P_CHECK: bool = False
100
+ VLLM_DISABLED_KERNELS: list[str] = []
101
+ VLLM_DISABLE_NCCL_FOR_DP_SYNCHRONIZATION: bool = False
102
+ VLLM_USE_V1: bool = True
103
+ VLLM_ROCM_USE_AITER: bool = False
104
+ VLLM_ROCM_USE_AITER_PAGED_ATTN: bool = False
105
+ VLLM_ROCM_USE_AITER_LINEAR: bool = True
106
+ VLLM_ROCM_USE_AITER_MOE: bool = True
107
+ VLLM_ROCM_USE_AITER_RMSNORM: bool = True
108
+ VLLM_ROCM_USE_AITER_MLA: bool = True
109
+ VLLM_ROCM_USE_AITER_MHA: bool = True
110
+ VLLM_ROCM_USE_AITER_FP4_ASM_GEMM: bool = False
111
+ VLLM_ROCM_USE_TRITON_ROPE: bool = False
112
+ VLLM_ROCM_USE_AITER_FP8BMM: bool = True
113
+ VLLM_ROCM_USE_SKINNY_GEMM: bool = True
114
+ VLLM_ROCM_FP8_PADDING: bool = True
115
+ VLLM_ROCM_MOE_PADDING: bool = True
116
+ VLLM_ROCM_CUSTOM_PAGED_ATTN: bool = True
117
+ VLLM_ENABLE_V1_MULTIPROCESSING: bool = True
118
+ VLLM_LOG_BATCHSIZE_INTERVAL: float = -1
119
+ VLLM_DISABLE_COMPILE_CACHE: bool = False
120
+ Q_SCALE_CONSTANT: int = 200
121
+ K_SCALE_CONSTANT: int = 200
122
+ V_SCALE_CONSTANT: int = 100
123
+ VLLM_SERVER_DEV_MODE: bool = False
124
+ VLLM_V1_OUTPUT_PROC_CHUNK_SIZE: int = 128
125
+ VLLM_MLA_DISABLE: bool = False
126
+ VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH: int = 32
127
+ VLLM_RAY_PER_WORKER_GPUS: float = 1.0
128
+ VLLM_RAY_BUNDLE_INDICES: str = ""
129
+ VLLM_CUDART_SO_PATH: Optional[str] = None
130
+ VLLM_DP_RANK: int = 0
131
+ VLLM_DP_RANK_LOCAL: int = -1
132
+ VLLM_DP_SIZE: int = 1
133
+ VLLM_USE_STANDALONE_COMPILE: bool = False
134
+ VLLM_DP_MASTER_IP: str = ""
135
+ VLLM_DP_MASTER_PORT: int = 0
136
+ VLLM_MOE_DP_CHUNK_SIZE: int = 256
137
+ VLLM_RANDOMIZE_DP_DUMMY_INPUTS: bool = False
138
+ VLLM_MARLIN_USE_ATOMIC_ADD: bool = False
139
+ VLLM_MXFP4_USE_MARLIN: Optional[bool] = None
140
+ VLLM_V0_USE_OUTLINES_CACHE: bool = False
141
+ VLLM_V1_USE_OUTLINES_CACHE: bool = False
142
+ VLLM_TPU_BUCKET_PADDING_GAP: int = 0
143
+ VLLM_TPU_MOST_MODEL_LEN: Optional[int] = None
144
+ VLLM_TPU_USING_PATHWAYS: bool = False
145
+ VLLM_USE_DEEP_GEMM: bool = True
146
+ VLLM_USE_DEEP_GEMM_E8M0: bool = True
147
+ VLLM_USE_DEEP_GEMM_E8M0_HOPPER: bool = False
148
+ VLLM_SKIP_DEEP_GEMM_WARMUP: bool = False
149
+ VLLM_USE_FUSED_MOE_GROUPED_TOPK: bool = True
150
+ VLLM_USE_FLASHINFER_MOE_FP16: bool = False
151
+ VLLM_USE_FLASHINFER_MOE_FP8: bool = False
152
+ VLLM_USE_FLASHINFER_MOE_FP4: bool = False
153
+ VLLM_FLASHINFER_MOE_BACKEND: Literal["throughput",
154
+ "latency"] = "throughput"
155
+ VLLM_XGRAMMAR_CACHE_MB: int = 0
156
+ VLLM_MSGPACK_ZERO_COPY_THRESHOLD: int = 256
157
+ VLLM_ALLOW_INSECURE_SERIALIZATION: bool = False
158
+ VLLM_NIXL_SIDE_CHANNEL_HOST: str = "localhost"
159
+ VLLM_NIXL_SIDE_CHANNEL_PORT: int = 5557
160
+ VLLM_ALL2ALL_BACKEND: Literal["naive", "pplx",
161
+ "deepep_high_throughput",
162
+ "deepep_low_latency",
163
+ "allgather_reducescatter",
164
+ "flashinfer_all2allv"] = \
165
+ "allgather_reducescatter"
166
+ VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE: int = 163840
167
+ VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS: int = 1
168
+ VLLM_SLEEP_WHEN_IDLE: bool = False
169
+ VLLM_MQ_MAX_CHUNK_BYTES_MB: int = 16
170
+ VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS: int = 300
171
+ VLLM_KV_CACHE_LAYOUT: Optional[Literal["NHD", "HND"]] = None
172
+ VLLM_COMPUTE_NANS_IN_LOGITS: bool = False
173
+ VLLM_USE_NVFP4_CT_EMULATIONS: bool = False
174
+ VLLM_ROCM_QUICK_REDUCE_QUANTIZATION: Literal["FP", "INT8", "INT6", "INT4",
175
+ "NONE"] = "NONE"
176
+ VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16: bool = True
177
+ VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB: Optional[int] = None
178
+ VLLM_NIXL_ABORT_REQUEST_TIMEOUT: int = 120
179
+ VLLM_USE_CUDNN_PREFILL: bool = False
180
+ VLLM_ENABLE_CUDAGRAPH_GC: bool = False
181
+ VLLM_LOOPBACK_IP: str = ""
182
+ VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE: bool = False
183
+ VLLM_ENABLE_RESPONSES_API_STORE: bool = False
184
+ VLLM_USE_TRTLLM_ATTENTION: Optional[str] = None
185
+ VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION: bool = False
186
+ VLLM_HAS_FLASHINFER_CUBIN: bool = False
187
+ VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8: bool = False
188
+ VLLM_USE_FLASHINFER_MOE_MXFP4_BF16: bool = False
189
+ VLLM_ROCM_FP8_MFMA_PAGE_ATTN: bool = False
190
+ VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS: bool = False
191
+ VLLM_ALLREDUCE_USE_SYMM_MEM: bool = True
192
+ VLLM_TUNED_CONFIG_FOLDER: Optional[str] = None
193
+ VLLM_DISABLE_PAD_FOR_CUDAGRAPH: bool = False
194
+ VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS: bool = False
195
+ VLLM_CUSTOM_SCOPES_FOR_PROFILING: bool = False
196
+ VLLM_NVTX_SCOPES_FOR_PROFILING: bool = False
197
+ VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES: bool = True
198
+ VLLM_OBJECT_STORAGE_SHM_BUFFER_NAME: str = "VLLM_OBJECT_STORAGE_SHM_BUFFER"
199
+ VLLM_DEEPEP_BUFFER_SIZE_MB: int = 1024
200
+ VLLM_DBO_COMM_SMS: int = 20
201
+ GPT_OSS_SYSTEM_TOOL_MCP_LABELS: list[str] = []
202
+ VLLM_PATTERN_MATCH_DEBUG: Optional[str] = None
203
+ VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE: bool = True
204
+ VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING: bool = True
205
+ VLLM_USE_NCCL_SYMM_MEM: bool = False
206
+ VLLM_NCCL_INCLUDE_PATH: Optional[str] = None
207
+ VLLM_USE_FBGEMM: bool = False
208
+
209
+
210
+ def get_default_cache_root():
211
+ return os.getenv(
212
+ "XDG_CACHE_HOME",
213
+ os.path.join(os.path.expanduser("~"), ".cache"),
214
+ )
215
+
216
+
217
+ def get_default_config_root():
218
+ return os.getenv(
219
+ "XDG_CONFIG_HOME",
220
+ os.path.join(os.path.expanduser("~"), ".config"),
221
+ )
222
+
223
+
224
+ def maybe_convert_int(value: Optional[str]) -> Optional[int]:
225
+ if value is None:
226
+ return None
227
+ return int(value)
228
+
229
+
230
+ def maybe_convert_bool(value: Optional[str]) -> Optional[bool]:
231
+ if value is None:
232
+ return None
233
+ return bool(int(value))
234
+
235
+
236
+ def env_with_choices(
237
+ env_name: str,
238
+ default: Optional[str],
239
+ choices: Union[list[str], Callable[[], list[str]]],
240
+ case_sensitive: bool = True) -> Callable[[], Optional[str]]:
241
+ """
242
+ Create a lambda that validates environment variable against allowed choices
243
+
244
+ Args:
245
+ env_name: Name of the environment variable
246
+ default: Default value if not set (can be None)
247
+ choices: List of valid string options or callable that returns list
248
+ case_sensitive: Whether validation should be case sensitive
249
+
250
+ Returns:
251
+ Lambda function for environment_variables dict
252
+ """
253
+
254
+ def _get_validated_env() -> Optional[str]:
255
+ value = os.getenv(env_name)
256
+ if value is None:
257
+ return default
258
+
259
+ # Resolve choices if it's a callable (for lazy loading)
260
+ actual_choices = choices() if callable(choices) else choices
261
+
262
+ if not case_sensitive:
263
+ check_value = value.lower()
264
+ check_choices = [choice.lower() for choice in actual_choices]
265
+ else:
266
+ check_value = value
267
+ check_choices = actual_choices
268
+
269
+ if check_value not in check_choices:
270
+ raise ValueError(f"Invalid value '{value}' for {env_name}. "
271
+ f"Valid options: {actual_choices}.")
272
+
273
+ return value
274
+
275
+ return _get_validated_env
276
+
277
+
278
+ def env_list_with_choices(
279
+ env_name: str,
280
+ default: list[str],
281
+ choices: Union[list[str], Callable[[], list[str]]],
282
+ case_sensitive: bool = True) -> Callable[[], list[str]]:
283
+ """
284
+ Create a lambda that validates environment variable
285
+ containing comma-separated values against allowed choices
286
+
287
+ Args:
288
+ env_name: Name of the environment variable
289
+ default: Default list of values if not set
290
+ choices: List of valid string options or callable that returns list
291
+ case_sensitive: Whether validation should be case sensitive
292
+
293
+ Returns:
294
+ Lambda function for environment_variables
295
+ dict that returns list of strings
296
+ """
297
+
298
+ def _get_validated_env_list() -> list[str]:
299
+ value = os.getenv(env_name)
300
+ if value is None:
301
+ return default
302
+
303
+ # Split comma-separated values and strip whitespace
304
+ values = [v.strip() for v in value.split(",") if v.strip()]
305
+
306
+ if not values:
307
+ return default
308
+
309
+ # Resolve choices if it's a callable (for lazy loading)
310
+ actual_choices = choices() if callable(choices) else choices
311
+
312
+ # Validate each value
313
+ for val in values:
314
+ if not case_sensitive:
315
+ check_value = val.lower()
316
+ check_choices = [choice.lower() for choice in actual_choices]
317
+ else:
318
+ check_value = val
319
+ check_choices = actual_choices
320
+
321
+ if check_value not in check_choices:
322
+ raise ValueError(f"Invalid value '{val}' in {env_name}. "
323
+ f"Valid options: {actual_choices}.")
324
+
325
+ return values
326
+
327
+ return _get_validated_env_list
328
+
329
+
330
+ def get_vllm_port() -> Optional[int]:
331
+ """Get the port from VLLM_PORT environment variable.
332
+
333
+ Returns:
334
+ The port number as an integer if VLLM_PORT is set, None otherwise.
335
+
336
+ Raises:
337
+ ValueError: If VLLM_PORT is a URI, suggest k8s service discovery issue.
338
+ """
339
+ if 'VLLM_PORT' not in os.environ:
340
+ return None
341
+
342
+ port = os.getenv('VLLM_PORT', '0')
343
+
344
+ try:
345
+ return int(port)
346
+ except ValueError as err:
347
+ from urllib.parse import urlparse
348
+ parsed = urlparse(port)
349
+ if parsed.scheme:
350
+ raise ValueError(
351
+ f"VLLM_PORT '{port}' appears to be a URI. "
352
+ "This may be caused by a Kubernetes service discovery issue,"
353
+ "check the warning in: https://docs.vllm.ai/en/stable/serving/env_vars.html"
354
+ ) from None
355
+ raise ValueError(
356
+ f"VLLM_PORT '{port}' must be a valid integer") from err
357
+
358
+
359
+ # The begin-* and end* here are used by the documentation generator
360
+ # to extract the used env vars.
361
+
362
+ # --8<-- [start:env-vars-definition]
363
+
364
+ environment_variables: dict[str, Callable[[], Any]] = {
365
+
366
+ # ================== Installation Time Env Vars ==================
367
+
368
+ # Target device of vLLM, supporting [cuda (by default),
369
+ # rocm, cpu]
370
+ "VLLM_TARGET_DEVICE":
371
+ lambda: os.getenv("VLLM_TARGET_DEVICE", "cuda").lower(),
372
+
373
+ # Main CUDA version of vLLM, supporting [12.6, 12.8, 12.9],
374
+ # 12.8 is the default. This follows PyTorch but can be overridden.
375
+ "VLLM_MAIN_CUDA_VERSION":
376
+ lambda: os.getenv("VLLM_MAIN_CUDA_VERSION", "").lower() or "12.8",
377
+
378
+ # Maximum number of compilation jobs to run in parallel.
379
+ # By default this is the number of CPUs
380
+ "MAX_JOBS":
381
+ lambda: os.getenv("MAX_JOBS", None),
382
+
383
+ # Number of threads to use for nvcc
384
+ # By default this is 1.
385
+ # If set, `MAX_JOBS` will be reduced to avoid oversubscribing the CPU.
386
+ "NVCC_THREADS":
387
+ lambda: os.getenv("NVCC_THREADS", None),
388
+
389
+ # If set, vllm will use precompiled binaries (*.so)
390
+ "VLLM_USE_PRECOMPILED":
391
+ lambda: os.environ.get("VLLM_USE_PRECOMPILED", "").strip().lower() in
392
+ ("1", "true") or bool(os.environ.get("VLLM_PRECOMPILED_WHEEL_LOCATION")),
393
+
394
+ # Used to mark that setup.py is running in a Docker build context,
395
+ # in order to force the use of precompiled binaries.
396
+ "VLLM_DOCKER_BUILD_CONTEXT":
397
+ lambda: os.environ.get("VLLM_DOCKER_BUILD_CONTEXT", "").strip().lower() in
398
+ ("1", "true"),
399
+
400
+ # Whether to force using nightly wheel in python build.
401
+ # This is used for testing the nightly wheel in python build.
402
+ "VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL":
403
+ lambda: bool(int(os.getenv("VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL", "0"))
404
+ ),
405
+
406
+ # CMake build type
407
+ # If not set, defaults to "Debug" or "RelWithDebInfo"
408
+ # Available options: "Debug", "Release", "RelWithDebInfo"
409
+ "CMAKE_BUILD_TYPE":
410
+ env_with_choices("CMAKE_BUILD_TYPE", None,
411
+ ["Debug", "Release", "RelWithDebInfo"]),
412
+
413
+ # If set, vllm will print verbose logs during installation
414
+ "VERBOSE":
415
+ lambda: bool(int(os.getenv('VERBOSE', '0'))),
416
+
417
+ # Root directory for vLLM configuration files
418
+ # Defaults to `~/.config/vllm` unless `XDG_CONFIG_HOME` is set
419
+ # Note that this not only affects how vllm finds its configuration files
420
+ # during runtime, but also affects how vllm installs its configuration
421
+ # files during **installation**.
422
+ "VLLM_CONFIG_ROOT":
423
+ lambda: os.path.expanduser(
424
+ os.getenv(
425
+ "VLLM_CONFIG_ROOT",
426
+ os.path.join(get_default_config_root(), "vllm"),
427
+ )),
428
+
429
+ # ================== Runtime Env Vars ==================
430
+
431
+ # Root directory for vLLM cache files
432
+ # Defaults to `~/.cache/vllm` unless `XDG_CACHE_HOME` is set
433
+ "VLLM_CACHE_ROOT":
434
+ lambda: os.path.expanduser(
435
+ os.getenv(
436
+ "VLLM_CACHE_ROOT",
437
+ os.path.join(get_default_cache_root(), "vllm"),
438
+ )),
439
+
440
+ # used in distributed environment to determine the ip address
441
+ # of the current node, when the node has multiple network interfaces.
442
+ # If you are using multi-node inference, you should set this differently
443
+ # on each node.
444
+ 'VLLM_HOST_IP':
445
+ lambda: os.getenv('VLLM_HOST_IP', ""),
446
+
447
+ # used in distributed environment to manually set the communication port
448
+ # Note: if VLLM_PORT is set, and some code asks for multiple ports, the
449
+ # VLLM_PORT will be used as the first port, and the rest will be generated
450
+ # by incrementing the VLLM_PORT value.
451
+ 'VLLM_PORT':
452
+ get_vllm_port,
453
+
454
+ # path used for ipc when the frontend api server is running in
455
+ # multi-processing mode to communicate with the backend engine process.
456
+ 'VLLM_RPC_BASE_PATH':
457
+ lambda: os.getenv('VLLM_RPC_BASE_PATH', tempfile.gettempdir()),
458
+
459
+ # If true, will load models from ModelScope instead of Hugging Face Hub.
460
+ # note that the value is true or false, not numbers
461
+ "VLLM_USE_MODELSCOPE":
462
+ lambda: os.environ.get("VLLM_USE_MODELSCOPE", "False").lower() == "true",
463
+
464
+ # Interval in seconds to log a warning message when the ring buffer is full
465
+ "VLLM_RINGBUFFER_WARNING_INTERVAL":
466
+ lambda: int(os.environ.get("VLLM_RINGBUFFER_WARNING_INTERVAL", "60")),
467
+
468
+ # path to cudatoolkit home directory, under which should be bin, include,
469
+ # and lib directories.
470
+ "CUDA_HOME":
471
+ lambda: os.environ.get("CUDA_HOME", None),
472
+
473
+ # Path to the NCCL library file. It is needed because nccl>=2.19 brought
474
+ # by PyTorch contains a bug: https://github.com/NVIDIA/nccl/issues/1234
475
+ "VLLM_NCCL_SO_PATH":
476
+ lambda: os.environ.get("VLLM_NCCL_SO_PATH", None),
477
+
478
+ # when `VLLM_NCCL_SO_PATH` is not set, vllm will try to find the nccl
479
+ # library file in the locations specified by `LD_LIBRARY_PATH`
480
+ "LD_LIBRARY_PATH":
481
+ lambda: os.environ.get("LD_LIBRARY_PATH", None),
482
+
483
+ # flag to control if vllm should use triton flash attention
484
+ "VLLM_USE_TRITON_FLASH_ATTN":
485
+ lambda: (os.environ.get("VLLM_USE_TRITON_FLASH_ATTN", "True").lower() in
486
+ ("true", "1")),
487
+
488
+ # Use separate prefill and decode kernels for V1 attention instead of
489
+ # the unified triton kernel.
490
+ "VLLM_V1_USE_PREFILL_DECODE_ATTENTION":
491
+ lambda:
492
+ (os.getenv("VLLM_V1_USE_PREFILL_DECODE_ATTENTION", "False").lower() in
493
+ ("true", "1")),
494
+
495
+ # Use AITER triton unified attention for V1 attention
496
+ "VLLM_USE_AITER_UNIFIED_ATTENTION":
497
+ lambda:
498
+ (os.getenv("VLLM_USE_AITER_UNIFIED_ATTENTION", "False").lower() in
499
+ ("true", "1")),
500
+
501
+ # Force vllm to use a specific flash-attention version (2 or 3), only valid
502
+ # when using the flash-attention backend.
503
+ "VLLM_FLASH_ATTN_VERSION":
504
+ lambda: maybe_convert_int(os.environ.get("VLLM_FLASH_ATTN_VERSION", None)),
505
+
506
+ # Feature flag to enable/disable Inductor standalone compile.
507
+ # In torch <= 2.7 we ignore this flag; in torch >= 2.8 this is
508
+ # disabled by default.
509
+ "VLLM_USE_STANDALONE_COMPILE":
510
+ lambda: os.environ.get("VLLM_USE_STANDALONE_COMPILE", "0") == "1",
511
+
512
+ # Debug pattern matching inside custom passes.
513
+ # Should be set to the fx.Node name (e.g. 'getitem_34' or 'scaled_mm_3').
514
+ "VLLM_PATTERN_MATCH_DEBUG":
515
+ lambda: os.environ.get("VLLM_PATTERN_MATCH_DEBUG", None),
516
+
517
+ # local rank of the process in the distributed setting, used to determine
518
+ # the GPU device id
519
+ "LOCAL_RANK":
520
+ lambda: int(os.environ.get("LOCAL_RANK", "0")),
521
+
522
+ # used to control the visible devices in the distributed setting
523
+ "CUDA_VISIBLE_DEVICES":
524
+ lambda: os.environ.get("CUDA_VISIBLE_DEVICES", None),
525
+
526
+ # timeout for each iteration in the engine
527
+ "VLLM_ENGINE_ITERATION_TIMEOUT_S":
528
+ lambda: int(os.environ.get("VLLM_ENGINE_ITERATION_TIMEOUT_S", "60")),
529
+
530
+ # API key for vLLM API server
531
+ "VLLM_API_KEY":
532
+ lambda: os.environ.get("VLLM_API_KEY", None),
533
+
534
+ # Whether to log responses from API Server for debugging
535
+ "VLLM_DEBUG_LOG_API_SERVER_RESPONSE":
536
+ lambda: os.environ.get("VLLM_DEBUG_LOG_API_SERVER_RESPONSE", "False"
537
+ ).lower() == "true",
538
+
539
+ # S3 access information, used for tensorizer to load model from S3
540
+ "S3_ACCESS_KEY_ID":
541
+ lambda: os.environ.get("S3_ACCESS_KEY_ID", None),
542
+ "S3_SECRET_ACCESS_KEY":
543
+ lambda: os.environ.get("S3_SECRET_ACCESS_KEY", None),
544
+ "S3_ENDPOINT_URL":
545
+ lambda: os.environ.get("S3_ENDPOINT_URL", None),
546
+
547
+ # Usage stats collection
548
+ "VLLM_USAGE_STATS_SERVER":
549
+ lambda: os.environ.get("VLLM_USAGE_STATS_SERVER", "https://stats.vllm.ai"),
550
+ "VLLM_NO_USAGE_STATS":
551
+ lambda: os.environ.get("VLLM_NO_USAGE_STATS", "0") == "1",
552
+ "VLLM_DISABLE_FLASHINFER_PREFILL":
553
+ lambda: os.environ.get("VLLM_DISABLE_FLASHINFER_PREFILL", "0") == "1",
554
+ "VLLM_DO_NOT_TRACK":
555
+ lambda: (os.environ.get("VLLM_DO_NOT_TRACK", None) or os.environ.get(
556
+ "DO_NOT_TRACK", None) or "0") == "1",
557
+ "VLLM_USAGE_SOURCE":
558
+ lambda: os.environ.get("VLLM_USAGE_SOURCE", "production"),
559
+
560
+ # Logging configuration
561
+ # If set to 0, vllm will not configure logging
562
+ # If set to 1, vllm will configure logging using the default configuration
563
+ # or the configuration file specified by VLLM_LOGGING_CONFIG_PATH
564
+ "VLLM_CONFIGURE_LOGGING":
565
+ lambda: int(os.getenv("VLLM_CONFIGURE_LOGGING", "1")),
566
+ "VLLM_LOGGING_CONFIG_PATH":
567
+ lambda: os.getenv("VLLM_LOGGING_CONFIG_PATH"),
568
+
569
+ # this is used for configuring the default logging level
570
+ "VLLM_LOGGING_LEVEL":
571
+ lambda: os.getenv("VLLM_LOGGING_LEVEL", "INFO").upper(),
572
+
573
+ # this is used for configuring the default logging stream
574
+ "VLLM_LOGGING_STREAM":
575
+ lambda: os.getenv("VLLM_LOGGING_STREAM", "ext://sys.stdout"),
576
+
577
+ # if set, VLLM_LOGGING_PREFIX will be prepended to all log messages
578
+ "VLLM_LOGGING_PREFIX":
579
+ lambda: os.getenv("VLLM_LOGGING_PREFIX", ""),
580
+
581
+ # if set, vllm will call logits processors in a thread pool with this many
582
+ # threads. This is useful when using custom logits processors that either
583
+ # (a) launch additional CUDA kernels or (b) do significant CPU-bound work
584
+ # while not holding the python GIL, or both.
585
+ "VLLM_LOGITS_PROCESSOR_THREADS":
586
+ lambda: int(os.getenv("VLLM_LOGITS_PROCESSOR_THREADS", "0"))
587
+ if "VLLM_LOGITS_PROCESSOR_THREADS" in os.environ else None,
588
+
589
+ # If set, vllm will log stats at this interval in seconds
590
+ # If not set, vllm will log stats every 10 seconds.
591
+ "VLLM_LOG_STATS_INTERVAL":
592
+ lambda: val if (val := float(os.getenv("VLLM_LOG_STATS_INTERVAL", "10.")))
593
+ > 0. else 10.,
594
+
595
+ # Trace function calls
596
+ # If set to 1, vllm will trace function calls
597
+ # Useful for debugging
598
+ "VLLM_TRACE_FUNCTION":
599
+ lambda: int(os.getenv("VLLM_TRACE_FUNCTION", "0")),
600
+
601
+ # Backend for attention computation
602
+ # Example options:
603
+ # - "TORCH_SDPA": use torch.nn.MultiheadAttention
604
+ # - "FLASH_ATTN": use FlashAttention
605
+ # - "XFORMERS": use XFormers
606
+ # - "FLASHINFER": use flashinfer
607
+ # - "FLASHMLA": use FlashMLA
608
+ # - "FLASH_ATTN_MLA": use FlashAttention for MLA
609
+ # - "FLASHINFER_MLA": use FlashInfer for MLA
610
+ # - "CUTLASS_MLA": use CUTLASS for MLA
611
+ # All possible options loaded dynamically from _Backend enum
612
+ "VLLM_ATTENTION_BACKEND":
613
+ env_with_choices("VLLM_ATTENTION_BACKEND", None,
614
+ lambda: list(__import__('vllm.platforms.interface', \
615
+ fromlist=['_Backend'])._Backend.__members__.keys())),
616
+
617
+ # If set, vllm will use flashinfer sampler
618
+ "VLLM_USE_FLASHINFER_SAMPLER":
619
+ lambda: bool(int(os.environ["VLLM_USE_FLASHINFER_SAMPLER"]))
620
+ if "VLLM_USE_FLASHINFER_SAMPLER" in os.environ else None,
621
+
622
+ # Pipeline stage partition strategy
623
+ "VLLM_PP_LAYER_PARTITION":
624
+ lambda: os.getenv("VLLM_PP_LAYER_PARTITION", None),
625
+
626
+ # (CPU backend only) CPU key-value cache space.
627
+ # default is None and will be set as 4 GB
628
+ "VLLM_CPU_KVCACHE_SPACE":
629
+ lambda: int(os.getenv("VLLM_CPU_KVCACHE_SPACE", "0"))
630
+ if "VLLM_CPU_KVCACHE_SPACE" in os.environ else None,
631
+
632
+ # (CPU backend only) CPU core ids bound by OpenMP threads, e.g., "0-31",
633
+ # "0,1,2", "0-31,33". CPU cores of different ranks are separated by '|'.
634
+ "VLLM_CPU_OMP_THREADS_BIND":
635
+ lambda: os.getenv("VLLM_CPU_OMP_THREADS_BIND", "auto"),
636
+
637
+ # (CPU backend only) CPU cores not used by OMP threads .
638
+ # Those CPU cores will not be used by OMP threads of a rank.
639
+ "VLLM_CPU_NUM_OF_RESERVED_CPU":
640
+ lambda: int(os.getenv("VLLM_CPU_NUM_OF_RESERVED_CPU", "0"))
641
+ if "VLLM_CPU_NUM_OF_RESERVED_CPU" in os.environ else None,
642
+
643
+ # (CPU backend only) whether to use prepack for MoE layer. This will be
644
+ # passed to ipex.llm.modules.GatedMLPMOE. On unsupported CPUs, you might
645
+ # need to set this to "0" (False).
646
+ "VLLM_CPU_MOE_PREPACK":
647
+ lambda: bool(int(os.getenv("VLLM_CPU_MOE_PREPACK", "1"))),
648
+
649
+ # (CPU backend only) whether to use SGL kernels, optimized for small batch.
650
+ "VLLM_CPU_SGL_KERNEL":
651
+ lambda: bool(int(os.getenv("VLLM_CPU_SGL_KERNEL", "0"))),
652
+
653
+ # If the env var is set, then all workers will execute as separate
654
+ # processes from the engine, and we use the same mechanism to trigger
655
+ # execution on all workers.
656
+ # Run vLLM with VLLM_USE_RAY_SPMD_WORKER=1 to enable it.
657
+ "VLLM_USE_RAY_SPMD_WORKER":
658
+ lambda: bool(int(os.getenv("VLLM_USE_RAY_SPMD_WORKER", "0"))),
659
+
660
+ # If the env var is set, it uses the Ray's Compiled Graph
661
+ # (previously known as ADAG) API which optimizes the
662
+ # control plane overhead.
663
+ # Run vLLM with VLLM_USE_RAY_COMPILED_DAG=1 to enable it.
664
+ # Note that this variable is set to 1 in V1 by default
665
+ # when ray distributed executor is used.
666
+ "VLLM_USE_RAY_COMPILED_DAG":
667
+ lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG", "0"))),
668
+
669
+ # If the env var is set, Ray Compiled Graph uses the specified
670
+ # channel type to communicate between workers belonging to
671
+ # different pipeline-parallel stages.
672
+ # Available options:
673
+ # - "auto": use the default channel type
674
+ # - "nccl": use NCCL for communication
675
+ # - "shm": use shared memory and gRPC for communication
676
+ # This flag is ignored if VLLM_USE_RAY_COMPILED_DAG is not set.
677
+ "VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE":
678
+ env_with_choices("VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE", "auto",
679
+ ["auto", "nccl", "shm"]),
680
+
681
+ # If the env var is set, it enables GPU communication overlap
682
+ # (experimental feature) in Ray's Compiled Graph. This flag is ignored if
683
+ # VLLM_USE_RAY_COMPILED_DAG is not set.
684
+ "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM":
685
+ lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM", "0"))
686
+ ),
687
+
688
+ # If the env var is set, it uses a Ray Communicator wrapping
689
+ # vLLM's pipeline parallelism communicator to interact with Ray's
690
+ # Compiled Graph. Otherwise, it uses Ray's NCCL communicator.
691
+ # This flag is ignored if VLLM_USE_RAY_COMPILED_DAG is not set.
692
+ "VLLM_USE_RAY_WRAPPED_PP_COMM":
693
+ lambda: bool(int(os.getenv("VLLM_USE_RAY_WRAPPED_PP_COMM", "1"))),
694
+
695
+ # Use dedicated multiprocess context for workers.
696
+ # Both spawn and fork work
697
+ "VLLM_WORKER_MULTIPROC_METHOD":
698
+ env_with_choices("VLLM_WORKER_MULTIPROC_METHOD", "fork",
699
+ ["spawn", "fork"]),
700
+
701
+ # Path to the cache for storing downloaded assets
702
+ "VLLM_ASSETS_CACHE":
703
+ lambda: os.path.expanduser(
704
+ os.getenv(
705
+ "VLLM_ASSETS_CACHE",
706
+ os.path.join(get_default_cache_root(), "vllm", "assets"),
707
+ )),
708
+
709
+ # If the env var is set, we will clean model file in
710
+ # this path $VLLM_ASSETS_CACHE/model_streamer/$model_name
711
+ "VLLM_ASSETS_CACHE_MODEL_CLEAN":
712
+ lambda: bool(int(os.getenv("VLLM_ASSETS_CACHE_MODEL_CLEAN", "0"))),
713
+
714
+ # Timeout for fetching images when serving multimodal models
715
+ # Default is 5 seconds
716
+ "VLLM_IMAGE_FETCH_TIMEOUT":
717
+ lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")),
718
+
719
+ # Timeout for fetching videos when serving multimodal models
720
+ # Default is 30 seconds
721
+ "VLLM_VIDEO_FETCH_TIMEOUT":
722
+ lambda: int(os.getenv("VLLM_VIDEO_FETCH_TIMEOUT", "30")),
723
+
724
+ # Timeout for fetching audio when serving multimodal models
725
+ # Default is 10 seconds
726
+ "VLLM_AUDIO_FETCH_TIMEOUT":
727
+ lambda: int(os.getenv("VLLM_AUDIO_FETCH_TIMEOUT", "10")),
728
+
729
+ # Whether to allow HTTP redirects when fetching from media URLs.
730
+ # Default to True
731
+ "VLLM_MEDIA_URL_ALLOW_REDIRECTS":
732
+ lambda: bool(int(os.getenv("VLLM_MEDIA_URL_ALLOW_REDIRECTS", "1"))),
733
+
734
+ # Max number of workers for the thread pool handling
735
+ # media bytes loading. Set to 1 to disable parallel processing.
736
+ # Default is 8
737
+ "VLLM_MEDIA_LOADING_THREAD_COUNT":
738
+ lambda: int(os.getenv("VLLM_MEDIA_LOADING_THREAD_COUNT", "8")),
739
+
740
+ # Maximum filesize in MB for a single audio file when processing
741
+ # speech-to-text requests. Files larger than this will be rejected.
742
+ # Default is 25 MB
743
+ "VLLM_MAX_AUDIO_CLIP_FILESIZE_MB":
744
+ lambda: int(os.getenv("VLLM_MAX_AUDIO_CLIP_FILESIZE_MB", "25")),
745
+
746
+ # Backend for Video IO
747
+ # - "opencv": Default backend that uses OpenCV stream buffered backend.
748
+ #
749
+ # Custom backend implementations can be registered
750
+ # via `@VIDEO_LOADER_REGISTRY.register("my_custom_video_loader")` and
751
+ # imported at runtime.
752
+ # If a non-existing backend is used, an AssertionError will be thrown.
753
+ "VLLM_VIDEO_LOADER_BACKEND":
754
+ lambda: os.getenv("VLLM_VIDEO_LOADER_BACKEND", "opencv"),
755
+
756
+ # [DEPRECATED] Cache size (in GiB per process) for multimodal input cache
757
+ # Default is 4 GiB per API process + 4 GiB per engine core process
758
+ "VLLM_MM_INPUT_CACHE_GIB":
759
+ lambda: int(os.getenv("VLLM_MM_INPUT_CACHE_GIB", "4")),
760
+
761
+ # Path to the XLA persistent cache directory.
762
+ # Only used for XLA devices such as TPUs.
763
+ "VLLM_XLA_CACHE_PATH":
764
+ lambda: os.path.expanduser(
765
+ os.getenv(
766
+ "VLLM_XLA_CACHE_PATH",
767
+ os.path.join(get_default_cache_root(), "vllm", "xla_cache"),
768
+ )),
769
+
770
+ # If set, assert on XLA recompilation after each execution step.
771
+ "VLLM_XLA_CHECK_RECOMPILATION":
772
+ lambda: bool(int(os.getenv("VLLM_XLA_CHECK_RECOMPILATION", "0"))),
773
+
774
+ # Enable SPMD mode for TPU backend.
775
+ "VLLM_XLA_USE_SPMD":
776
+ lambda: bool(int(os.getenv("VLLM_XLA_USE_SPMD", "0"))),
777
+ "VLLM_FUSED_MOE_CHUNK_SIZE":
778
+ lambda: int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768")),
779
+ # Control whether to use fused MoE activation chunking. Current chunking
780
+ # logic is incompatible with torch.compile and causes IMA. See issue
781
+ # https://github.com/vllm-project/vllm/issues/19631.
782
+ "VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING":
783
+ lambda: bool(
784
+ int(os.getenv("VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING", "1"))),
785
+
786
+ # If set, the OpenAI API server will stay alive even after the underlying
787
+ # AsyncLLMEngine errors and stops serving requests
788
+ "VLLM_KEEP_ALIVE_ON_ENGINE_DEATH":
789
+ lambda: bool(os.getenv("VLLM_KEEP_ALIVE_ON_ENGINE_DEATH", 0)),
790
+
791
+ # If the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN is set, it allows
792
+ # the user to specify a max sequence length greater than
793
+ # the max length derived from the model's config.json.
794
+ # To enable this, set VLLM_ALLOW_LONG_MAX_MODEL_LEN=1.
795
+ "VLLM_ALLOW_LONG_MAX_MODEL_LEN":
796
+ lambda:
797
+ (os.environ.get("VLLM_ALLOW_LONG_MAX_MODEL_LEN", "0").strip().lower() in
798
+ ("1", "true")),
799
+
800
+ # If set, forces FP8 Marlin to be used for FP8 quantization regardless
801
+ # of the hardware support for FP8 compute.
802
+ "VLLM_TEST_FORCE_FP8_MARLIN":
803
+ lambda:
804
+ (os.environ.get("VLLM_TEST_FORCE_FP8_MARLIN", "0").strip().lower() in
805
+ ("1", "true")),
806
+ "VLLM_TEST_FORCE_LOAD_FORMAT":
807
+ lambda: os.getenv("VLLM_TEST_FORCE_LOAD_FORMAT", "dummy"),
808
+
809
+ # Time in ms for the zmq client to wait for a response from the backend
810
+ # server for simple data operations
811
+ "VLLM_RPC_TIMEOUT":
812
+ lambda: int(os.getenv("VLLM_RPC_TIMEOUT", "10000")),
813
+
814
+ # Timeout in seconds for keeping HTTP connections alive in API server
815
+ "VLLM_HTTP_TIMEOUT_KEEP_ALIVE":
816
+ lambda: int(os.environ.get("VLLM_HTTP_TIMEOUT_KEEP_ALIVE", "5")),
817
+
818
+ # a list of plugin names to load, separated by commas.
819
+ # if this is not set, it means all plugins will be loaded
820
+ # if this is set to an empty string, no plugins will be loaded
821
+ "VLLM_PLUGINS":
822
+ lambda: None if "VLLM_PLUGINS" not in os.environ else os.environ[
823
+ "VLLM_PLUGINS"].split(","),
824
+
825
+ # a local directory to look in for unrecognized LoRA adapters.
826
+ # only works if plugins are enabled and
827
+ # VLLM_ALLOW_RUNTIME_LORA_UPDATING is enabled.
828
+ "VLLM_LORA_RESOLVER_CACHE_DIR":
829
+ lambda: os.getenv("VLLM_LORA_RESOLVER_CACHE_DIR", None),
830
+
831
+ # Enables torch profiler if set.
832
+ # Both AsyncLLM's CPU traces as well as workers'
833
+ # traces (CPU & GPU) will be saved under this directory.
834
+ # Note that it must be an absolute path.
835
+ "VLLM_TORCH_PROFILER_DIR":
836
+ lambda: (None if os.getenv("VLLM_TORCH_PROFILER_DIR", None) is None else os
837
+ .path.abspath(os.path.expanduser(os.getenv(
838
+ "VLLM_TORCH_PROFILER_DIR", ".")))),
839
+
840
+ # Enable torch profiler to record shapes if set
841
+ # VLLM_TORCH_PROFILER_RECORD_SHAPES=1. If not set, torch profiler will
842
+ # not record shapes.
843
+ "VLLM_TORCH_PROFILER_RECORD_SHAPES":
844
+ lambda: bool(os.getenv("VLLM_TORCH_PROFILER_RECORD_SHAPES", "0") != "0"),
845
+
846
+ # Enable torch profiler to profile memory if set
847
+ # VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY=1. If not set, torch profiler
848
+ # will not profile memory.
849
+ "VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY":
850
+ lambda: bool(
851
+ os.getenv("VLLM_TORCH_PROFILER_WITH_PROFILE_MEMORY", "0") != "0"),
852
+
853
+ # Enable torch profiler to profile stack if set
854
+ # VLLM_TORCH_PROFILER_WITH_STACK=1. If not set, torch profiler WILL
855
+ # profile stack by default.
856
+ "VLLM_TORCH_PROFILER_WITH_STACK":
857
+ lambda: bool(os.getenv("VLLM_TORCH_PROFILER_WITH_STACK", "1") != "0"),
858
+
859
+ # Enable torch profiler to profile flops if set
860
+ # VLLM_TORCH_PROFILER_WITH_FLOPS=1. If not set, torch profiler will
861
+ # not profile flops.
862
+ "VLLM_TORCH_PROFILER_WITH_FLOPS":
863
+ lambda: bool(os.getenv("VLLM_TORCH_PROFILER_WITH_FLOPS", "0") != "0"),
864
+
865
+ # If set, vLLM will use Triton implementations of AWQ.
866
+ "VLLM_USE_TRITON_AWQ":
867
+ lambda: bool(int(os.getenv("VLLM_USE_TRITON_AWQ", "0"))),
868
+
869
+ # If set, allow loading or unloading lora adapters in runtime,
870
+ "VLLM_ALLOW_RUNTIME_LORA_UPDATING":
871
+ lambda:
872
+ (os.environ.get("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "0").strip().lower() in
873
+ ("1", "true")),
874
+
875
+ # We assume drivers can report p2p status correctly.
876
+ # If the program hangs when using custom allreduce,
877
+ # potantially caused by a bug in the driver (535 series),
878
+ # if might be helpful to set VLLM_SKIP_P2P_CHECK=0
879
+ # so that vLLM can verify if p2p is actually working.
880
+ # See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa
881
+ "VLLM_SKIP_P2P_CHECK":
882
+ lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "1") == "1",
883
+
884
+ # List of quantization kernels that should be disabled, used for testing
885
+ # and performance comparisons. Currently only affects MPLinearKernel
886
+ # selection
887
+ # (kernels: MacheteLinearKernel, MarlinLinearKernel, ExllamaLinearKernel)
888
+ "VLLM_DISABLED_KERNELS":
889
+ lambda: [] if "VLLM_DISABLED_KERNELS" not in os.environ else os.environ[
890
+ "VLLM_DISABLED_KERNELS"].split(","),
891
+
892
+ # Swaps the all reduce backend that we use to coordinate the DP padding
893
+ # information from NCCL to gloo.
894
+ "VLLM_DISABLE_NCCL_FOR_DP_SYNCHRONIZATION":
895
+ lambda:
896
+ (os.getenv("VLLM_DISABLE_NCCL_FOR_DP_SYNCHRONIZATION", "False").lower() in
897
+ ("true", "1")),
898
+
899
+ # If set, use the V1 code path.
900
+ "VLLM_USE_V1":
901
+ lambda: bool(int(os.getenv("VLLM_USE_V1", "1"))),
902
+
903
+ # Disable aiter ops unless specifically enabled.
904
+ # Acts as a parent switch to enable the rest of the other operations.
905
+ "VLLM_ROCM_USE_AITER":
906
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER", "False").lower() in
907
+ ("true", "1")),
908
+
909
+ # Whether to use aiter paged attention.
910
+ # By default is disabled.
911
+ "VLLM_ROCM_USE_AITER_PAGED_ATTN":
912
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_PAGED_ATTN", "False").lower() in
913
+ ("true", "1")),
914
+
915
+ # use aiter linear op if aiter ops are enabled
916
+ # The following list of related ops
917
+ # - scaled_mm (per-tensor / rowwise)
918
+ "VLLM_ROCM_USE_AITER_LINEAR":
919
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_LINEAR", "True").lower() in
920
+ ("true", "1")),
921
+
922
+ # Whether to use aiter moe ops.
923
+ # By default is enabled.
924
+ "VLLM_ROCM_USE_AITER_MOE":
925
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_MOE", "True").lower() in
926
+ ("true", "1")),
927
+
928
+ # use aiter rms norm op if aiter ops are enabled.
929
+ "VLLM_ROCM_USE_AITER_RMSNORM":
930
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_RMSNORM", "True").lower() in
931
+ ("true", "1")),
932
+
933
+ # Whether to use aiter mla ops.
934
+ # By default is enabled.
935
+ "VLLM_ROCM_USE_AITER_MLA":
936
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_MLA", "True").lower() in
937
+ ("true", "1")),
938
+
939
+ # Whether to use aiter mha ops.
940
+ # By default is enabled.
941
+ "VLLM_ROCM_USE_AITER_MHA":
942
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_MHA", "True").lower() in
943
+ ("true", "1")),
944
+
945
+ # Whether to use aiter fp4 gemm asm.
946
+ # By default is disabled.
947
+ "VLLM_ROCM_USE_AITER_FP4_ASM_GEMM":
948
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_FP4_ASM_GEMM", "False").lower() in
949
+ ("true", "1")),
950
+
951
+ # Whether to use aiter rope.
952
+ # By default is disabled.
953
+ "VLLM_ROCM_USE_TRITON_ROPE":
954
+ lambda: (os.getenv("VLLM_ROCM_USE_TRITON_ROPE", "False").lower() in
955
+ ("true", "1")),
956
+
957
+ # Whether to use aiter triton fp8 bmm kernel
958
+ # By default is enabled.
959
+ "VLLM_ROCM_USE_AITER_FP8BMM":
960
+ lambda: (os.getenv("VLLM_ROCM_USE_AITER_FP8BMM", "True").lower() in
961
+ ("true", "1")),
962
+
963
+ # use rocm skinny gemms
964
+ "VLLM_ROCM_USE_SKINNY_GEMM":
965
+ lambda: (os.getenv("VLLM_ROCM_USE_SKINNY_GEMM", "True").lower() in
966
+ ("true", "1")),
967
+
968
+ # Pad the fp8 weights to 256 bytes for ROCm
969
+ "VLLM_ROCM_FP8_PADDING":
970
+ lambda: bool(int(os.getenv("VLLM_ROCM_FP8_PADDING", "1"))),
971
+
972
+ # Pad the weights for the moe kernel
973
+ "VLLM_ROCM_MOE_PADDING":
974
+ lambda: bool(int(os.getenv("VLLM_ROCM_MOE_PADDING", "1"))),
975
+
976
+ # custom paged attention kernel for MI3* cards
977
+ "VLLM_ROCM_CUSTOM_PAGED_ATTN":
978
+ lambda: (os.getenv("VLLM_ROCM_CUSTOM_PAGED_ATTN", "True").lower() in
979
+ ("true", "1")),
980
+
981
+ # Custom quick allreduce kernel for MI3* cards
982
+ # Choice of quantization level: FP, INT8, INT6, INT4 or NONE
983
+ # Recommended for large models to get allreduce
984
+ "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION":
985
+ env_with_choices("VLLM_ROCM_QUICK_REDUCE_QUANTIZATION", "NONE",
986
+ ["FP", "INT8", "INT6", "INT4", "NONE"]),
987
+
988
+ # Custom quick allreduce kernel for MI3* cards
989
+ # Due to the lack of the bfloat16 asm instruction, bfloat16
990
+ # kernels are slower than fp16,
991
+ # If environment variable is set to 1, the input is converted to fp16
992
+ "VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16":
993
+ lambda:
994
+ (os.getenv("VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16", "True").lower() in
995
+ ("true", "1")),
996
+
997
+ # Custom quick allreduce kernel for MI3* cards.
998
+ # Controls the maximum allowed number of data bytes(MB) for custom quick
999
+ # allreduce communication.
1000
+ # Default: 2048 MB.
1001
+ # Data exceeding this size will use either custom allreduce or RCCL
1002
+ # communication.
1003
+ "VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB":
1004
+ lambda: maybe_convert_int(
1005
+ os.environ.get("VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB", None)),
1006
+
1007
+ # Divisor for dynamic query scale factor calculation for FP8 KV Cache
1008
+ "Q_SCALE_CONSTANT":
1009
+ lambda: int(os.getenv("Q_SCALE_CONSTANT", "200")),
1010
+ # Divisor for dynamic key scale factor calculation for FP8 KV Cache
1011
+ "K_SCALE_CONSTANT":
1012
+ lambda: int(os.getenv("K_SCALE_CONSTANT", "200")),
1013
+ # Divisor for dynamic value scale factor calculation for FP8 KV Cache
1014
+ "V_SCALE_CONSTANT":
1015
+ lambda: int(os.getenv("V_SCALE_CONSTANT", "100")),
1016
+
1017
+ # If set, enable multiprocessing in LLM for the V1 code path.
1018
+ "VLLM_ENABLE_V1_MULTIPROCESSING":
1019
+ lambda: bool(int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1"))),
1020
+ "VLLM_LOG_BATCHSIZE_INTERVAL":
1021
+ lambda: float(os.getenv("VLLM_LOG_BATCHSIZE_INTERVAL", "-1")),
1022
+ "VLLM_DISABLE_COMPILE_CACHE":
1023
+ lambda: bool(int(os.getenv("VLLM_DISABLE_COMPILE_CACHE", "0"))),
1024
+
1025
+ # If set, vllm will run in development mode, which will enable
1026
+ # some additional endpoints for developing and debugging,
1027
+ # e.g. `/reset_prefix_cache`
1028
+ "VLLM_SERVER_DEV_MODE":
1029
+ lambda: bool(int(os.getenv("VLLM_SERVER_DEV_MODE", "0"))),
1030
+
1031
+ # Controls the maximum number of requests to handle in a
1032
+ # single asyncio task when processing per-token outputs in the
1033
+ # V1 AsyncLLM interface. It is applicable when handling a high
1034
+ # concurrency of streaming requests.
1035
+ # Setting this too high can result in a higher variance of
1036
+ # inter-message latencies. Setting it too low can negatively impact
1037
+ # TTFT and overall throughput.
1038
+ "VLLM_V1_OUTPUT_PROC_CHUNK_SIZE":
1039
+ lambda: int(os.getenv("VLLM_V1_OUTPUT_PROC_CHUNK_SIZE", "128")),
1040
+
1041
+ # If set, vLLM will disable the MLA attention optimizations.
1042
+ "VLLM_MLA_DISABLE":
1043
+ lambda: bool(int(os.getenv("VLLM_MLA_DISABLE", "0"))),
1044
+
1045
+ # If set, vLLM will pick up the provided Flash Attention MLA
1046
+ # max number splits for cuda graph decode
1047
+ "VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH":
1048
+ lambda: int(os.getenv("VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH",
1049
+ "32")),
1050
+
1051
+ # Number of GPUs per worker in Ray, if it is set to be a fraction,
1052
+ # it allows ray to schedule multiple actors on a single GPU,
1053
+ # so that users can colocate other actors on the same GPUs as vLLM.
1054
+ "VLLM_RAY_PER_WORKER_GPUS":
1055
+ lambda: float(os.getenv("VLLM_RAY_PER_WORKER_GPUS", "1.0")),
1056
+
1057
+ # Bundle indices for Ray, if it is set, it can control precisely
1058
+ # which indices are used for the Ray bundle, for every worker.
1059
+ # Format: comma-separated list of integers, e.g. "0,1,2,3"
1060
+ "VLLM_RAY_BUNDLE_INDICES":
1061
+ lambda: os.getenv("VLLM_RAY_BUNDLE_INDICES", ""),
1062
+
1063
+ # In some system, find_loaded_library() may not work. So we allow users to
1064
+ # specify the path through environment variable VLLM_CUDART_SO_PATH.
1065
+ "VLLM_CUDART_SO_PATH":
1066
+ lambda: os.getenv("VLLM_CUDART_SO_PATH", None),
1067
+
1068
+ # Rank of the process in the data parallel setting
1069
+ "VLLM_DP_RANK":
1070
+ lambda: int(os.getenv("VLLM_DP_RANK", "0")),
1071
+
1072
+ # Rank of the process in the data parallel setting.
1073
+ # Defaults to VLLM_DP_RANK when not set.
1074
+ "VLLM_DP_RANK_LOCAL":
1075
+ lambda: int(
1076
+ os.getenv("VLLM_DP_RANK_LOCAL", sys.modules[__name__].VLLM_DP_RANK)),
1077
+
1078
+ # World size of the data parallel setting
1079
+ "VLLM_DP_SIZE":
1080
+ lambda: int(os.getenv("VLLM_DP_SIZE", "1")),
1081
+
1082
+ # IP address of the master node in the data parallel setting
1083
+ "VLLM_DP_MASTER_IP":
1084
+ lambda: os.getenv("VLLM_DP_MASTER_IP", "127.0.0.1"),
1085
+
1086
+ # Port of the master node in the data parallel setting
1087
+ "VLLM_DP_MASTER_PORT":
1088
+ lambda: int(os.getenv("VLLM_DP_MASTER_PORT", "0")),
1089
+
1090
+ # In the context of executing MoE models with Data-Parallel, Expert-Parallel
1091
+ # and Batched All-to-All dispatch/combine kernels, VLLM_MOE_DP_CHUNK_SIZE
1092
+ # dictates the quantum of tokens that can be dispatched from a DP
1093
+ # rank. All DP ranks process the activations in VLLM_MOE_DP_CHUNK_SIZE
1094
+ # units.
1095
+ "VLLM_MOE_DP_CHUNK_SIZE":
1096
+ lambda: int(os.getenv("VLLM_MOE_DP_CHUNK_SIZE", "256")),
1097
+
1098
+ # Randomize inputs during dummy runs when using Data Parallel
1099
+ "VLLM_RANDOMIZE_DP_DUMMY_INPUTS":
1100
+ lambda: os.environ.get("VLLM_RANDOMIZE_DP_DUMMY_INPUTS", "0") == "1",
1101
+
1102
+ # Whether to use S3 path for model loading in CI via RunAI Streamer
1103
+ "VLLM_CI_USE_S3":
1104
+ lambda: os.environ.get("VLLM_CI_USE_S3", "0") == "1",
1105
+
1106
+ # Use model_redirect to redirect the model name to a local folder.
1107
+ # `model_redirect` can be a json file mapping the model between
1108
+ # repo_id and local folder:
1109
+ # {"meta-llama/Llama-3.2-1B": "/tmp/Llama-3.2-1B"}
1110
+ # or a space separated values table file:
1111
+ # meta-llama/Llama-3.2-1B /tmp/Llama-3.2-1B
1112
+ "VLLM_MODEL_REDIRECT_PATH":
1113
+ lambda: os.environ.get("VLLM_MODEL_REDIRECT_PATH", None),
1114
+
1115
+ # Whether to use atomicAdd reduce in gptq/awq marlin kernel.
1116
+ "VLLM_MARLIN_USE_ATOMIC_ADD":
1117
+ lambda: os.environ.get("VLLM_MARLIN_USE_ATOMIC_ADD", "0") == "1",
1118
+
1119
+ # Whether to use marlin kernel in mxfp4 quantization method
1120
+ "VLLM_MXFP4_USE_MARLIN":
1121
+ lambda: maybe_convert_bool(os.environ.get("VLLM_MXFP4_USE_MARLIN", None)),
1122
+
1123
+ # Whether to turn on the outlines cache for V0
1124
+ # This cache is unbounded and on disk, so it's not safe to use in
1125
+ # an environment with potentially malicious users.
1126
+ "VLLM_V0_USE_OUTLINES_CACHE":
1127
+ lambda: os.environ.get("VLLM_V0_USE_OUTLINES_CACHE", "0") == "1",
1128
+
1129
+ # Whether to turn on the outlines cache for V1
1130
+ # This cache is unbounded and on disk, so it's not safe to use in
1131
+ # an environment with potentially malicious users.
1132
+ "VLLM_V1_USE_OUTLINES_CACHE":
1133
+ lambda: os.environ.get("VLLM_V1_USE_OUTLINES_CACHE", "0") == "1",
1134
+
1135
+ # Gap between padding buckets for the forward pass. So we have
1136
+ # 8, we will run forward pass with [16, 24, 32, ...].
1137
+ "VLLM_TPU_BUCKET_PADDING_GAP":
1138
+ lambda: int(os.environ["VLLM_TPU_BUCKET_PADDING_GAP"])
1139
+ if "VLLM_TPU_BUCKET_PADDING_GAP" in os.environ else 0,
1140
+ "VLLM_TPU_MOST_MODEL_LEN":
1141
+ lambda: maybe_convert_int(os.environ.get("VLLM_TPU_MOST_MODEL_LEN", None)),
1142
+
1143
+ # Whether using Pathways
1144
+ "VLLM_TPU_USING_PATHWAYS":
1145
+ lambda: bool("proxy" in os.getenv("JAX_PLATFORMS", "").lower()),
1146
+
1147
+ # Allow use of DeepGemm kernels for fused moe ops.
1148
+ "VLLM_USE_DEEP_GEMM":
1149
+ lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM", "1"))),
1150
+
1151
+ # Whether to use E8M0 scaling when DeepGEMM is used on Blackwell GPUs.
1152
+ "VLLM_USE_DEEP_GEMM_E8M0":
1153
+ lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM_E8M0", "1"))),
1154
+ # TODO(wentao): unify the two E8M0 flags after verifying the correctness.
1155
+ # Whether to use E8M0 scaling when DeepGEMM is used on Hopper GPUs.
1156
+ "VLLM_USE_DEEP_GEMM_E8M0_HOPPER":
1157
+ lambda: bool(int(os.getenv("VLLM_USE_DEEP_GEMM_E8M0_HOPPER", "0"))),
1158
+ # DeepGemm JITs the kernels on-demand. The warmup attempts to make DeepGemm
1159
+ # JIT all the required kernels before model execution so there is no
1160
+ # JIT'ing in the hot-path. However, this warmup increases the engine
1161
+ # startup time by a couple of minutes.
1162
+ # Set `VLLM_SKIP_DEEP_GEMM_WARMUP` to disable the warmup.
1163
+ "VLLM_SKIP_DEEP_GEMM_WARMUP":
1164
+ lambda: bool(int(os.getenv("VLLM_SKIP_DEEP_GEMM_WARMUP", "0"))),
1165
+
1166
+ # Whether to use fused grouped_topk used for MoE expert selection.
1167
+ "VLLM_USE_FUSED_MOE_GROUPED_TOPK":
1168
+ lambda: bool(int(os.getenv("VLLM_USE_FUSED_MOE_GROUPED_TOPK", "1"))),
1169
+
1170
+ # Allow use of FlashInfer MoE kernels for fused moe ops.
1171
+ "VLLM_USE_FLASHINFER_MOE_FP16":
1172
+ lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP16", "0"))),
1173
+
1174
+ # Allow use of FlashInfer MoE kernels for fused moe ops.
1175
+ "VLLM_USE_FLASHINFER_MOE_FP8":
1176
+ lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP8", "0"))),
1177
+
1178
+ # Allow use of FlashInfer CUTLASS kernels for fused moe ops.
1179
+ "VLLM_USE_FLASHINFER_MOE_FP4":
1180
+ lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_FP4", "0"))),
1181
+
1182
+ # If set to 1, use the FlashInfer
1183
+ # MXFP8 (activation) x MXFP4 (weight) MoE backend.
1184
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8":
1185
+ lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8", "0"))),
1186
+
1187
+ # If set to 1, use the FlashInfer CUTLASS backend for
1188
+ # MXFP8 (activation) x MXFP4 (weight) MoE.
1189
+ # This is separate from the TRTLLMGEN path controlled by
1190
+ # VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8.
1191
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS":
1192
+ lambda: bool(int(
1193
+ os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS", "0")
1194
+ )),
1195
+
1196
+ # If set to 1, use the FlashInfer
1197
+ # BF16 (activation) x MXFP4 (weight) MoE backend.
1198
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_BF16":
1199
+ lambda: bool(int(os.getenv("VLLM_USE_FLASHINFER_MOE_MXFP4_BF16", "0"))),
1200
+
1201
+ # Control the cache sized used by the xgrammar compiler. The default
1202
+ # of 512 MB should be enough for roughly 1000 JSON schemas.
1203
+ # It can be changed with this variable if needed for some reason.
1204
+ "VLLM_XGRAMMAR_CACHE_MB":
1205
+ lambda: int(os.getenv("VLLM_XGRAMMAR_CACHE_MB", "512")),
1206
+
1207
+ # Control the threshold for msgspec to use 'zero copy' for
1208
+ # serialization/deserialization of tensors. Tensors below
1209
+ # this limit will be encoded into the msgpack buffer, and
1210
+ # tensors above will instead be sent via a separate message.
1211
+ # While the sending side still actually copies the tensor
1212
+ # in all cases, on the receiving side, tensors above this
1213
+ # limit will actually be zero-copy decoded.
1214
+ "VLLM_MSGPACK_ZERO_COPY_THRESHOLD":
1215
+ lambda: int(os.getenv("VLLM_MSGPACK_ZERO_COPY_THRESHOLD", "256")),
1216
+
1217
+ # If set, allow insecure serialization using pickle.
1218
+ # This is useful for environments where it is deemed safe to use the
1219
+ # insecure method and it is needed for some reason.
1220
+ "VLLM_ALLOW_INSECURE_SERIALIZATION":
1221
+ lambda: bool(int(os.getenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "0"))),
1222
+
1223
+ # IP address used for NIXL handshake between remote agents.
1224
+ "VLLM_NIXL_SIDE_CHANNEL_HOST":
1225
+ lambda: os.getenv("VLLM_NIXL_SIDE_CHANNEL_HOST", "localhost"),
1226
+
1227
+ # Port used for NIXL handshake between remote agents.
1228
+ "VLLM_NIXL_SIDE_CHANNEL_PORT":
1229
+ lambda: int(os.getenv("VLLM_NIXL_SIDE_CHANNEL_PORT", "5557")),
1230
+
1231
+ # all2all backend for vllm's expert parallel communication
1232
+ # Available options:
1233
+ # - "naive": naive all2all implementation using broadcasts
1234
+ # - "allgather_reducescatter": all2all implementation based on allgather and
1235
+ # reducescatter
1236
+ # - "pplx": use pplx kernels
1237
+ # - "deepep_high_throughput", use deepep high-throughput kernels
1238
+ # - "deepep_low_latency", use deepep low-latency kernels
1239
+ # - "flashinfer_all2allv", use flashinfer alltoallv kernels for mnnvl
1240
+ "VLLM_ALL2ALL_BACKEND":
1241
+ env_with_choices("VLLM_ALL2ALL_BACKEND", "allgather_reducescatter",
1242
+ ["naive", "pplx",
1243
+ "deepep_high_throughput",
1244
+ "deepep_low_latency",
1245
+ "allgather_reducescatter",
1246
+ "flashinfer_all2allv"]),
1247
+
1248
+ # Flashinfer MoE backend for vLLM's fused Mixture-of-Experts support.
1249
+ # Both require compute capability 10.0 or above.
1250
+ # Available options:
1251
+ # - "throughput": [default]
1252
+ # Uses CUTLASS kernels optimized for high-throughput batch inference.
1253
+ # - "latency":
1254
+ # Uses TensorRT-LLM kernels optimized for low-latency inference.
1255
+ "VLLM_FLASHINFER_MOE_BACKEND":
1256
+ env_with_choices("VLLM_FLASHINFER_MOE_BACKEND", "throughput",
1257
+ ["throughput", "latency"]),
1258
+
1259
+ # Control the maximum number of tokens per expert supported by the
1260
+ # NVFP4 MoE CUTLASS Kernel. This value is used to create a buffer for
1261
+ # the blockscale tensor of activations NVFP4 Quantization.
1262
+ # This is used to prevent the kernel from running out of memory.
1263
+ "VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE":
1264
+ lambda: int(os.getenv("VLLM_MAX_TOKENS_PER_EXPERT_FP4_MOE", "163840")),
1265
+
1266
+ # Specifies the thresholds of the communicated tensor sizes under which
1267
+ # vllm should use flashinfer fused allreduce. The variable should be a
1268
+ # JSON with the following format:
1269
+ # { <world size>: <max size in mb> }
1270
+ # Unspecified world sizes will fall back to
1271
+ # { 2: 64, 4: 1, <everything else>: 0.5 }
1272
+ "VLLM_FLASHINFER_ALLREDUCE_FUSION_THRESHOLDS_MB":
1273
+ lambda: json.loads(os.getenv(
1274
+ "VLLM_FLASHINFER_ALLREDUCE_FUSION_THRESHOLDS_MB", "{}")),
1275
+
1276
+ # MoE routing strategy selector.
1277
+ # See `RoutingSimulator.get_available_strategies()` # for available
1278
+ # strategies.
1279
+ # Cutstom routing strategies can be registered by
1280
+ # RoutingSimulator.register_strategy()
1281
+ # Note: custom strategies may not produce correct model outputs
1282
+ "VLLM_MOE_ROUTING_SIMULATION_STRATEGY":
1283
+ lambda: os.environ.get("VLLM_MOE_ROUTING_SIMULATION_STRATEGY", "").lower(),
1284
+
1285
+ # Regex timeout for use by the vLLM tool parsing plugins.
1286
+ "VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS":
1287
+ lambda: int(os.getenv("VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS", "1")),
1288
+
1289
+ # Reduce CPU usage when vLLM is idle. Enabling this will incur small
1290
+ # latency penalty when a request eventually comes.
1291
+ "VLLM_SLEEP_WHEN_IDLE":
1292
+ lambda: bool(int(os.getenv("VLLM_SLEEP_WHEN_IDLE", "0"))),
1293
+
1294
+ # Control the max chunk bytes (in MB) for the rpc message queue.
1295
+ # Object larger than this threshold will be broadcast to worker
1296
+ # processes via zmq.
1297
+ "VLLM_MQ_MAX_CHUNK_BYTES_MB":
1298
+ lambda: int(os.getenv("VLLM_MQ_MAX_CHUNK_BYTES_MB", "16")),
1299
+
1300
+ # Timeout in seconds for execute_model RPC calls in multiprocessing
1301
+ # executor (only applies when TP > 1).
1302
+ "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS":
1303
+ lambda: int(os.getenv("VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS", "300")),
1304
+
1305
+ # KV Cache layout used throughout vllm.
1306
+ # Some common values are:
1307
+ # - NHD
1308
+ # - HND
1309
+ # Where N=num_blocks, H=num_heads and D=head_size. The default value will
1310
+ # leave the layout choice to the backend. Mind that backends may only
1311
+ # implement and support a subset of all possible layouts.
1312
+ "VLLM_KV_CACHE_LAYOUT":
1313
+ env_with_choices("VLLM_KV_CACHE_LAYOUT", None, ["NHD", "HND"]),
1314
+
1315
+ # Enable checking whether the generated logits contain NaNs,
1316
+ # indicating corrupted output. Useful for debugging low level bugs
1317
+ # or bad hardware but it may add compute overhead.
1318
+ "VLLM_COMPUTE_NANS_IN_LOGITS":
1319
+ lambda: bool(int(os.getenv("VLLM_COMPUTE_NANS_IN_LOGITS", "0"))),
1320
+
1321
+ # Controls whether or not emulations are used for NVFP4
1322
+ # generations on machines < 100 for compressed-tensors
1323
+ # models
1324
+ "VLLM_USE_NVFP4_CT_EMULATIONS":
1325
+ lambda: bool(int(os.getenv("VLLM_USE_NVFP4_CT_EMULATIONS", "0"))),
1326
+
1327
+ # Time (in seconds) after which the KV cache on the producer side is
1328
+ # automatically cleared if no READ notification is received from the
1329
+ # consumer. This is only applicable when using NixlConnector in a
1330
+ # disaggregated decode-prefill setup.
1331
+ "VLLM_NIXL_ABORT_REQUEST_TIMEOUT":
1332
+ lambda: int(os.getenv("VLLM_NIXL_ABORT_REQUEST_TIMEOUT", "120")),
1333
+
1334
+ # Controls whether or not to use cudnn prefill
1335
+ "VLLM_USE_CUDNN_PREFILL":
1336
+ lambda: bool(int(os.getenv("VLLM_USE_CUDNN_PREFILL", "0"))),
1337
+
1338
+ # If set to 1/True, use the TRTLLM attention backend in flashinfer.
1339
+ # If set to 0/False, use the default attention backend in flashinfer.
1340
+ # If not set, auto-detect the attention backend in flashinfer.
1341
+ "VLLM_USE_TRTLLM_ATTENTION":
1342
+ lambda: (None if "VLLM_USE_TRTLLM_ATTENTION" not in os.environ else
1343
+ os.environ["VLLM_USE_TRTLLM_ATTENTION"].lower() in ("1", "true")),
1344
+
1345
+ # If set to 1, when we use fp8 kv, we do not quantize Q to fp8
1346
+ "VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION":
1347
+ lambda: bool(int(os.getenv("VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION", "0"))),
1348
+
1349
+ # If set, it means we pre-downloaded cubin files and flashinfer will
1350
+ # read the cubin files directly.
1351
+ "VLLM_HAS_FLASHINFER_CUBIN":
1352
+ lambda: os.getenv("VLLM_HAS_FLASHINFER_CUBIN", False),
1353
+
1354
+ # If set to 1, force the use of TRTLLM FP4 GEMM backend in flashinfer.
1355
+ # Otherwise, uses the first available of: flashinfer cutlass GEMM,
1356
+ # vllm cutlass GEMM, marlin GEMM.
1357
+ "VLLM_USE_TRTLLM_FP4_GEMM":
1358
+ lambda: bool(int(os.getenv("VLLM_USE_TRTLLM_FP4_GEMM", "0"))),
1359
+
1360
+ # Controls garbage collection during CUDA graph capture.
1361
+ # If set to 0 (default), enables GC freezing to speed up capture time.
1362
+ # If set to 1, allows GC to run during capture.
1363
+ "VLLM_ENABLE_CUDAGRAPH_GC":
1364
+ lambda: bool(int(os.getenv("VLLM_ENABLE_CUDAGRAPH_GC", "0"))),
1365
+
1366
+ # Disable padding to CUDA graph capture batch sizes.
1367
+ # TODO(wentao): https://github.com/vllm-project/vllm/issues/23378
1368
+ # After the issue is fixed, we can remove this flag.
1369
+ "VLLM_DISABLE_PAD_FOR_CUDAGRAPH":
1370
+ lambda: bool(int(os.getenv("VLLM_DISABLE_PAD_FOR_CUDAGRAPH", "0"))),
1371
+
1372
+ # Used to force set up loopback IP
1373
+ "VLLM_LOOPBACK_IP":
1374
+ lambda: os.getenv("VLLM_LOOPBACK_IP", ""),
1375
+
1376
+ # Used to set the process name prefix for vLLM processes.
1377
+ # This is useful for debugging and monitoring purposes.
1378
+ # The default value is "VLLM".
1379
+ "VLLM_PROCESS_NAME_PREFIX":
1380
+ lambda: os.getenv("VLLM_PROCESS_NAME_PREFIX", "VLLM"),
1381
+
1382
+ # Allow chunked local attention with hybrid kv cache manager.
1383
+ # Currently using the Hybrid KV cache manager with chunked local attention
1384
+ # in the Llama4 models (the only models currently using chunked local attn)
1385
+ # causes a latency regression. For this reason, we disable it by default.
1386
+ # This flag is used to allow users to enable it if they want to (to save on
1387
+ # kv-cache memory usage and enable longer contexts)
1388
+ # TODO(lucas): Remove this flag once latency regression is resolved.
1389
+ "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE":
1390
+ lambda: bool(int(os.getenv(\
1391
+ "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE", "0"))),
1392
+
1393
+ # Enables support for the "store" option in the OpenAI Responses API.
1394
+ # When set to 1, vLLM's OpenAI server will retain the input and output
1395
+ # messages for those requests in memory. By default, this is disabled (0),
1396
+ # and the "store" option is ignored.
1397
+ # NOTE/WARNING:
1398
+ # 1. Messages are kept in memory only (not persisted to disk) and will be
1399
+ # lost when the vLLM server shuts down.
1400
+ # 2. Enabling this option will cause a memory leak, as stored messages are
1401
+ # never removed from memory until the server terminates.
1402
+ "VLLM_ENABLE_RESPONSES_API_STORE":
1403
+ lambda: bool(int(os.getenv("VLLM_ENABLE_RESPONSES_API_STORE", "0"))),
1404
+
1405
+ # If set, use the fp8 mfma in rocm paged attention.
1406
+ "VLLM_ROCM_FP8_MFMA_PAGE_ATTN":
1407
+ lambda: bool(int(os.getenv("VLLM_ROCM_FP8_MFMA_PAGE_ATTN", "0"))),
1408
+
1409
+ # Whether to use pytorch symmetric memory for allreduce
1410
+ "VLLM_ALLREDUCE_USE_SYMM_MEM":
1411
+ lambda: bool(int(os.getenv("VLLM_ALLREDUCE_USE_SYMM_MEM", "1"))),
1412
+
1413
+ # Allows vllm to find tuned config under customized folder
1414
+ "VLLM_TUNED_CONFIG_FOLDER":
1415
+ lambda: os.getenv("VLLM_TUNED_CONFIG_FOLDER", None),
1416
+
1417
+ # Allows harmony instructions to be injected on system messages
1418
+ "VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS":
1419
+ lambda: bool(
1420
+ int(os.getenv("VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS", "0"))),
1421
+
1422
+ # Add optional custom scopes for profiling, disable to avoid overheads
1423
+ "VLLM_CUSTOM_SCOPES_FOR_PROFILING":
1424
+ lambda: bool(int(os.getenv("VLLM_CUSTOM_SCOPES_FOR_PROFILING", "0"))),
1425
+
1426
+ # Add optional nvtx scopes for profiling, disable to avoid overheads
1427
+ "VLLM_NVTX_SCOPES_FOR_PROFILING":
1428
+ lambda: bool(int(os.getenv("VLLM_NVTX_SCOPES_FOR_PROFILING", "0"))),
1429
+
1430
+ # Represent block hashes in KV cache events as 64-bit integers instead of
1431
+ # raw bytes. Defaults to True for backward compatibility.
1432
+ "VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES":
1433
+ lambda: bool(int(os.getenv("VLLM_KV_EVENTS_USE_INT_BLOCK_HASHES", "1"))),
1434
+
1435
+ # Name of the shared memory buffer used for object storage.
1436
+ # Only effective when mm_config.mm_processor_cache_type == "shm".
1437
+ "VLLM_OBJECT_STORAGE_SHM_BUFFER_NAME":
1438
+ lambda: os.getenv("VLLM_OBJECT_STORAGE_SHM_BUFFER_NAME",
1439
+ "VLLM_OBJECT_STORAGE_SHM_BUFFER"),
1440
+
1441
+ # The size in MB of the buffers (NVL and RDMA) used by DeepEP
1442
+ "VLLM_DEEPEP_BUFFER_SIZE_MB":
1443
+ lambda: int(os.getenv("VLLM_DEEPEP_BUFFER_SIZE_MB", "1024")),
1444
+
1445
+ # The number of SMs to allocate for communication kernels when running DBO
1446
+ # the rest of the SMs on the device will be allocated to compute
1447
+ "VLLM_DBO_COMM_SMS":
1448
+ lambda: int(os.getenv("VLLM_DBO_COMM_SMS", "20")),
1449
+
1450
+ # Valid values are container,code_interpreter,web_search_preview
1451
+ # ex GPT_OSS_SYSTEM_TOOL_MCP_LABELS=container,code_interpreter
1452
+ "GPT_OSS_SYSTEM_TOOL_MCP_LABELS":
1453
+ env_list_with_choices("GPT_OSS_SYSTEM_TOOL_MCP_LABELS", [],
1454
+ ["container",
1455
+ "code_interpreter",
1456
+ "web_search_preview"]),
1457
+
1458
+ # Enable max_autotune & coordinate_descent_tuning in inductor_config
1459
+ # to compile static shapes passed from compile_sizes in compilation_config
1460
+ # If set to 1, enable max_autotune; By default, this is enabled (1)
1461
+ "VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE":
1462
+ lambda: bool(int(os.getenv("VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE", "1"))),
1463
+ # If set to 1, enable coordinate_descent_tuning;
1464
+ # By default, this is enabled (1)
1465
+ "VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING":
1466
+ lambda: bool(int(os.getenv("VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING",
1467
+ "1"))),
1468
+
1469
+ # Flag to enable NCCL symmetric memory allocation and registration
1470
+ "VLLM_USE_NCCL_SYMM_MEM":
1471
+ lambda: bool(int(os.getenv("VLLM_USE_NCCL_SYMM_MEM", "0"))),
1472
+
1473
+ # NCCL header path
1474
+ "VLLM_NCCL_INCLUDE_PATH":
1475
+ lambda: os.environ.get("VLLM_NCCL_INCLUDE_PATH", None),
1476
+ # Flag to enable FBGemm kernels on model execution
1477
+ "VLLM_USE_FBGEMM": lambda: bool(int(os.getenv("VLLM_USE_FBGEMM", "0"))),
1478
+ }
1479
+
1480
+ # --8<-- [end:env-vars-definition]
1481
+
1482
+
1483
+ def __getattr__(name: str):
1484
+ # lazy evaluation of environment variables
1485
+ if name in environment_variables:
1486
+ return environment_variables[name]()
1487
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1488
+
1489
+
1490
+ def __dir__():
1491
+ return list(environment_variables.keys())
1492
+
1493
+
1494
+ def is_set(name: str):
1495
+ """Check if an environment variable is explicitly set."""
1496
+ if name in environment_variables:
1497
+ return name in os.environ
1498
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
1499
+
1500
+
1501
+ def set_vllm_use_v1(use_v1: bool):
1502
+ if is_set("VLLM_USE_V1"):
1503
+ raise ValueError(
1504
+ "Should not call set_vllm_use_v1() if VLLM_USE_V1 is set "
1505
+ "explicitly by the user. Please raise this as a Github "
1506
+ "Issue and explicitly set VLLM_USE_V1=0 or 1.")
1507
+ os.environ["VLLM_USE_V1"] = "1" if use_v1 else "0"
1508
+
1509
+
1510
+ def compute_hash() -> str:
1511
+ """
1512
+ WARNING: Whenever a new key is added to this environment
1513
+ variables, ensure that it is included in the factors list if
1514
+ it affects the computation graph. For example, different values
1515
+ of VLLM_PP_LAYER_PARTITION will generate different computation
1516
+ graphs, so it is included in the factors list. The env vars that
1517
+ affect the choice of different kernels or attention backends should
1518
+ also be included in the factors list.
1519
+ """
1520
+
1521
+ # The values of envs may affects the computation graph.
1522
+ # TODO(DefTruth): hash all environment variables?
1523
+ # for key in environment_variables:
1524
+ # factorize(key)
1525
+ environment_variables_to_hash = [
1526
+ "VLLM_PP_LAYER_PARTITION",
1527
+ "VLLM_MLA_DISABLE",
1528
+ "VLLM_FLASH_ATTN_MAX_NUM_SPLITS_FOR_CUDA_GRAPH",
1529
+ "VLLM_USE_TRITON_FLASH_ATTN",
1530
+ "VLLM_USE_TRITON_AWQ",
1531
+ "VLLM_DP_RANK",
1532
+ "VLLM_DP_SIZE",
1533
+ "VLLM_USE_STANDALONE_COMPILE",
1534
+ "VLLM_FUSED_MOE_CHUNK_SIZE",
1535
+ "VLLM_FLASHINFER_MOE_BACKEND",
1536
+ "VLLM_V1_USE_PREFILL_DECODE_ATTENTION",
1537
+ "VLLM_USE_AITER_UNIFIED_ATTENTION",
1538
+ "VLLM_ATTENTION_BACKEND",
1539
+ "VLLM_USE_FLASHINFER_SAMPLER",
1540
+ "VLLM_DISABLED_KERNELS",
1541
+ "VLLM_USE_DEEP_GEMM",
1542
+ "VLLM_USE_DEEP_GEMM_E8M0",
1543
+ "VLLM_USE_DEEP_GEMM_E8M0_HOPPER",
1544
+ "VLLM_USE_TRTLLM_FP4_GEMM",
1545
+ "VLLM_USE_FUSED_MOE_GROUPED_TOPK",
1546
+ "VLLM_USE_FLASHINFER_MOE_FP16",
1547
+ "VLLM_USE_FLASHINFER_MOE_FP8",
1548
+ "VLLM_USE_FLASHINFER_MOE_FP4",
1549
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8",
1550
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_MXFP8_CUTLASS",
1551
+ "VLLM_USE_FLASHINFER_MOE_MXFP4_BF16",
1552
+ "VLLM_USE_CUDNN_PREFILL",
1553
+ "VLLM_USE_TRTLLM_ATTENTION",
1554
+ "VLLM_FLASHINFER_DISABLE_Q_QUANTIZATION",
1555
+ "VLLM_ROCM_USE_AITER",
1556
+ "VLLM_ROCM_USE_AITER_PAGED_ATTN",
1557
+ "VLLM_ROCM_USE_AITER_LINEAR",
1558
+ "VLLM_ROCM_USE_AITER_MOE",
1559
+ "VLLM_ROCM_USE_AITER_RMSNORM",
1560
+ "VLLM_ROCM_USE_AITER_MLA",
1561
+ "VLLM_ROCM_USE_AITER_MHA",
1562
+ "VLLM_ROCM_USE_AITER_FP4_ASM_GEMM",
1563
+ "VLLM_ROCM_USE_TRITON_ROPE",
1564
+ "VLLM_ROCM_USE_AITER_FP8BMM",
1565
+ "VLLM_ROCM_USE_SKINNY_GEMM",
1566
+ "VLLM_ROCM_FP8_PADDING",
1567
+ "VLLM_ROCM_MOE_PADDING",
1568
+ "VLLM_ROCM_CUSTOM_PAGED_ATTN",
1569
+ "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION",
1570
+ "VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16",
1571
+ "VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB",
1572
+ "VLLM_ROCM_FP8_MFMA_PAGE_ATTN",
1573
+ "VLLM_ENABLE_INDUCTOR_MAX_AUTOTUNE",
1574
+ "VLLM_ENABLE_INDUCTOR_COORDINATE_DESCENT_TUNING",
1575
+ "VLLM_USE_FBGEMM",
1576
+ ]
1577
+ for key in environment_variables_to_hash:
1578
+ # if this goes out of sync with environment_variables,
1579
+ # it's not a user error, it's a bug
1580
+ assert key in environment_variables, \
1581
+ "Please update environment_variables_to_hash in envs.py"
1582
+
1583
+ factors = [
1584
+ environment_variables[key]() for key in environment_variables_to_hash
1585
+ ]
1586
+
1587
+ hash_str = hashlib.md5(str(factors).encode(),
1588
+ usedforsecurity=False).hexdigest()
1589
+
1590
+ return hash_str