vllm-cpu-avx512bf16 0.14.0__cp313-cp313-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1712) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +1511 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +3206 -0
  6. vllm/_ipex_ops.py +445 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +62 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +0 -0
  14. vllm/attention/layer.py +913 -0
  15. vllm/attention/utils/__init__.py +0 -0
  16. vllm/attention/utils/kv_sharing_utils.py +33 -0
  17. vllm/attention/utils/kv_transfer_utils.py +60 -0
  18. vllm/beam_search.py +88 -0
  19. vllm/benchmarks/__init__.py +0 -0
  20. vllm/benchmarks/datasets.py +3277 -0
  21. vllm/benchmarks/latency.py +172 -0
  22. vllm/benchmarks/lib/__init__.py +3 -0
  23. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  24. vllm/benchmarks/lib/ready_checker.py +72 -0
  25. vllm/benchmarks/lib/utils.py +79 -0
  26. vllm/benchmarks/mm_processor.py +363 -0
  27. vllm/benchmarks/serve.py +1761 -0
  28. vllm/benchmarks/startup.py +321 -0
  29. vllm/benchmarks/sweep/__init__.py +0 -0
  30. vllm/benchmarks/sweep/cli.py +41 -0
  31. vllm/benchmarks/sweep/param_sweep.py +159 -0
  32. vllm/benchmarks/sweep/plot.py +675 -0
  33. vllm/benchmarks/sweep/plot_pareto.py +393 -0
  34. vllm/benchmarks/sweep/serve.py +450 -0
  35. vllm/benchmarks/sweep/serve_sla.py +459 -0
  36. vllm/benchmarks/sweep/server.py +114 -0
  37. vllm/benchmarks/sweep/sla_sweep.py +138 -0
  38. vllm/benchmarks/sweep/utils.py +4 -0
  39. vllm/benchmarks/throughput.py +946 -0
  40. vllm/collect_env.py +857 -0
  41. vllm/compilation/__init__.py +0 -0
  42. vllm/compilation/activation_quant_fusion.py +214 -0
  43. vllm/compilation/backends.py +840 -0
  44. vllm/compilation/base_static_graph.py +57 -0
  45. vllm/compilation/caching.py +196 -0
  46. vllm/compilation/collective_fusion.py +1224 -0
  47. vllm/compilation/compiler_interface.py +639 -0
  48. vllm/compilation/counter.py +50 -0
  49. vllm/compilation/cuda_graph.py +309 -0
  50. vllm/compilation/decorators.py +662 -0
  51. vllm/compilation/fix_functionalization.py +266 -0
  52. vllm/compilation/fusion.py +570 -0
  53. vllm/compilation/fusion_attn.py +363 -0
  54. vllm/compilation/fx_utils.py +92 -0
  55. vllm/compilation/inductor_pass.py +145 -0
  56. vllm/compilation/matcher_utils.py +454 -0
  57. vllm/compilation/monitor.py +62 -0
  58. vllm/compilation/noop_elimination.py +130 -0
  59. vllm/compilation/partition_rules.py +75 -0
  60. vllm/compilation/pass_manager.py +164 -0
  61. vllm/compilation/piecewise_backend.py +191 -0
  62. vllm/compilation/post_cleanup.py +21 -0
  63. vllm/compilation/qk_norm_rope_fusion.py +244 -0
  64. vllm/compilation/rocm_aiter_fusion.py +401 -0
  65. vllm/compilation/sequence_parallelism.py +368 -0
  66. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  67. vllm/compilation/vllm_inductor_pass.py +180 -0
  68. vllm/compilation/wrapper.py +329 -0
  69. vllm/config/__init__.py +112 -0
  70. vllm/config/attention.py +114 -0
  71. vllm/config/cache.py +233 -0
  72. vllm/config/compilation.py +1149 -0
  73. vllm/config/device.py +75 -0
  74. vllm/config/ec_transfer.py +110 -0
  75. vllm/config/kv_events.py +56 -0
  76. vllm/config/kv_transfer.py +119 -0
  77. vllm/config/load.py +124 -0
  78. vllm/config/lora.py +102 -0
  79. vllm/config/model.py +2026 -0
  80. vllm/config/model_arch.py +57 -0
  81. vllm/config/multimodal.py +247 -0
  82. vllm/config/observability.py +157 -0
  83. vllm/config/parallel.py +703 -0
  84. vllm/config/pooler.py +188 -0
  85. vllm/config/profiler.py +199 -0
  86. vllm/config/scheduler.py +298 -0
  87. vllm/config/speculative.py +656 -0
  88. vllm/config/speech_to_text.py +39 -0
  89. vllm/config/structured_outputs.py +78 -0
  90. vllm/config/utils.py +374 -0
  91. vllm/config/vllm.py +1487 -0
  92. vllm/connections.py +189 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +301 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +43 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +509 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +303 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +346 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +190 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  106. vllm/distributed/device_communicators/pynccl.py +386 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +567 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +778 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +697 -0
  113. vllm/distributed/device_communicators/symm_mem.py +156 -0
  114. vllm/distributed/device_communicators/xpu_communicator.py +98 -0
  115. vllm/distributed/ec_transfer/__init__.py +14 -0
  116. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  117. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  118. vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
  119. vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
  120. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  121. vllm/distributed/eplb/__init__.py +3 -0
  122. vllm/distributed/eplb/async_worker.py +115 -0
  123. vllm/distributed/eplb/eplb_state.py +1192 -0
  124. vllm/distributed/eplb/policy/__init__.py +19 -0
  125. vllm/distributed/eplb/policy/abstract.py +43 -0
  126. vllm/distributed/eplb/policy/default.py +376 -0
  127. vllm/distributed/eplb/rebalance_execute.py +699 -0
  128. vllm/distributed/kv_events.py +505 -0
  129. vllm/distributed/kv_transfer/README.md +29 -0
  130. vllm/distributed/kv_transfer/__init__.py +20 -0
  131. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  132. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  133. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  134. vllm/distributed/kv_transfer/kv_connector/factory.py +203 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +459 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +607 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +344 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  142. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +395 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +211 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1431 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +941 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +916 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/moriio/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py +321 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py +1515 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py +609 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +477 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2688 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +557 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  159. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  160. vllm/distributed/parallel_state.py +1809 -0
  161. vllm/distributed/utils.py +545 -0
  162. vllm/engine/__init__.py +0 -0
  163. vllm/engine/arg_utils.py +2137 -0
  164. vllm/engine/async_llm_engine.py +6 -0
  165. vllm/engine/llm_engine.py +6 -0
  166. vllm/engine/protocol.py +194 -0
  167. vllm/entrypoints/__init__.py +0 -0
  168. vllm/entrypoints/anthropic/__init__.py +0 -0
  169. vllm/entrypoints/anthropic/protocol.py +162 -0
  170. vllm/entrypoints/anthropic/serving_messages.py +468 -0
  171. vllm/entrypoints/api_server.py +186 -0
  172. vllm/entrypoints/chat_utils.py +1912 -0
  173. vllm/entrypoints/cli/__init__.py +19 -0
  174. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/base.py +25 -0
  176. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  177. vllm/entrypoints/cli/benchmark/main.py +57 -0
  178. vllm/entrypoints/cli/benchmark/mm_processor.py +21 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  180. vllm/entrypoints/cli/benchmark/startup.py +21 -0
  181. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  182. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  183. vllm/entrypoints/cli/collect_env.py +38 -0
  184. vllm/entrypoints/cli/main.py +79 -0
  185. vllm/entrypoints/cli/openai.py +260 -0
  186. vllm/entrypoints/cli/run_batch.py +68 -0
  187. vllm/entrypoints/cli/serve.py +253 -0
  188. vllm/entrypoints/cli/types.py +29 -0
  189. vllm/entrypoints/constants.py +12 -0
  190. vllm/entrypoints/context.py +898 -0
  191. vllm/entrypoints/grpc_server.py +531 -0
  192. vllm/entrypoints/launcher.py +175 -0
  193. vllm/entrypoints/llm.py +1807 -0
  194. vllm/entrypoints/logger.py +86 -0
  195. vllm/entrypoints/openai/__init__.py +0 -0
  196. vllm/entrypoints/openai/api_server.py +1390 -0
  197. vllm/entrypoints/openai/cli_args.py +320 -0
  198. vllm/entrypoints/openai/orca_metrics.py +120 -0
  199. vllm/entrypoints/openai/parser/__init__.py +0 -0
  200. vllm/entrypoints/openai/parser/harmony_utils.py +820 -0
  201. vllm/entrypoints/openai/parser/responses_parser.py +176 -0
  202. vllm/entrypoints/openai/protocol.py +2566 -0
  203. vllm/entrypoints/openai/run_batch.py +635 -0
  204. vllm/entrypoints/openai/serving_chat.py +1897 -0
  205. vllm/entrypoints/openai/serving_chat_stream_harmony.py +101 -0
  206. vllm/entrypoints/openai/serving_completion.py +740 -0
  207. vllm/entrypoints/openai/serving_engine.py +1612 -0
  208. vllm/entrypoints/openai/serving_models.py +309 -0
  209. vllm/entrypoints/openai/serving_responses.py +2552 -0
  210. vllm/entrypoints/openai/serving_transcription.py +168 -0
  211. vllm/entrypoints/openai/speech_to_text.py +711 -0
  212. vllm/entrypoints/openai/utils.py +49 -0
  213. vllm/entrypoints/pooling/__init__.py +16 -0
  214. vllm/entrypoints/pooling/classify/__init__.py +0 -0
  215. vllm/entrypoints/pooling/classify/api_router.py +48 -0
  216. vllm/entrypoints/pooling/classify/protocol.py +181 -0
  217. vllm/entrypoints/pooling/classify/serving.py +233 -0
  218. vllm/entrypoints/pooling/embed/__init__.py +0 -0
  219. vllm/entrypoints/pooling/embed/api_router.py +65 -0
  220. vllm/entrypoints/pooling/embed/conftest.py +28 -0
  221. vllm/entrypoints/pooling/embed/protocol.py +217 -0
  222. vllm/entrypoints/pooling/embed/serving.py +684 -0
  223. vllm/entrypoints/pooling/pooling/__init__.py +0 -0
  224. vllm/entrypoints/pooling/pooling/api_router.py +62 -0
  225. vllm/entrypoints/pooling/pooling/protocol.py +146 -0
  226. vllm/entrypoints/pooling/pooling/serving.py +354 -0
  227. vllm/entrypoints/pooling/score/__init__.py +0 -0
  228. vllm/entrypoints/pooling/score/api_router.py +147 -0
  229. vllm/entrypoints/pooling/score/protocol.py +146 -0
  230. vllm/entrypoints/pooling/score/serving.py +511 -0
  231. vllm/entrypoints/renderer.py +411 -0
  232. vllm/entrypoints/responses_utils.py +218 -0
  233. vllm/entrypoints/sagemaker/__init__.py +4 -0
  234. vllm/entrypoints/sagemaker/routes.py +118 -0
  235. vllm/entrypoints/score_utils.py +271 -0
  236. vllm/entrypoints/serve/__init__.py +94 -0
  237. vllm/entrypoints/serve/cache/__init__.py +0 -0
  238. vllm/entrypoints/serve/cache/api_router.py +61 -0
  239. vllm/entrypoints/serve/disagg/__init__.py +0 -0
  240. vllm/entrypoints/serve/disagg/api_router.py +109 -0
  241. vllm/entrypoints/serve/disagg/protocol.py +90 -0
  242. vllm/entrypoints/serve/disagg/serving.py +285 -0
  243. vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
  244. vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
  245. vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
  246. vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
  247. vllm/entrypoints/serve/instrumentator/health.py +33 -0
  248. vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
  249. vllm/entrypoints/serve/instrumentator/offline_docs.py +50 -0
  250. vllm/entrypoints/serve/instrumentator/server_info.py +56 -0
  251. vllm/entrypoints/serve/instrumentator/static/swagger-ui-bundle.js +2 -0
  252. vllm/entrypoints/serve/instrumentator/static/swagger-ui.css +3 -0
  253. vllm/entrypoints/serve/lora/__init__.py +0 -0
  254. vllm/entrypoints/serve/lora/api_router.py +70 -0
  255. vllm/entrypoints/serve/profile/__init__.py +0 -0
  256. vllm/entrypoints/serve/profile/api_router.py +46 -0
  257. vllm/entrypoints/serve/rlhf/__init__.py +0 -0
  258. vllm/entrypoints/serve/rlhf/api_router.py +102 -0
  259. vllm/entrypoints/serve/rpc/__init__.py +0 -0
  260. vllm/entrypoints/serve/rpc/api_router.py +61 -0
  261. vllm/entrypoints/serve/sleep/__init__.py +0 -0
  262. vllm/entrypoints/serve/sleep/api_router.py +56 -0
  263. vllm/entrypoints/serve/tokenize/__init__.py +0 -0
  264. vllm/entrypoints/serve/tokenize/api_router.py +112 -0
  265. vllm/entrypoints/serve/tokenize/serving.py +204 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +187 -0
  268. vllm/entrypoints/tool_server.py +234 -0
  269. vllm/entrypoints/utils.py +336 -0
  270. vllm/env_override.py +402 -0
  271. vllm/envs.py +1791 -0
  272. vllm/exceptions.py +36 -0
  273. vllm/forward_context.py +375 -0
  274. vllm/grpc/__init__.py +17 -0
  275. vllm/grpc/compile_protos.py +94 -0
  276. vllm/grpc/vllm_engine.proto +195 -0
  277. vllm/grpc/vllm_engine_pb2.py +77 -0
  278. vllm/grpc/vllm_engine_pb2.pyi +213 -0
  279. vllm/grpc/vllm_engine_pb2_grpc.py +330 -0
  280. vllm/inputs/__init__.py +44 -0
  281. vllm/inputs/data.py +359 -0
  282. vllm/inputs/parse.py +147 -0
  283. vllm/inputs/preprocess.py +716 -0
  284. vllm/logger.py +303 -0
  285. vllm/logging_utils/__init__.py +13 -0
  286. vllm/logging_utils/dump_input.py +83 -0
  287. vllm/logging_utils/formatter.py +127 -0
  288. vllm/logging_utils/lazy.py +20 -0
  289. vllm/logging_utils/log_time.py +34 -0
  290. vllm/logits_process.py +121 -0
  291. vllm/logprobs.py +206 -0
  292. vllm/lora/__init__.py +0 -0
  293. vllm/lora/layers/__init__.py +43 -0
  294. vllm/lora/layers/base.py +66 -0
  295. vllm/lora/layers/base_linear.py +172 -0
  296. vllm/lora/layers/column_parallel_linear.py +577 -0
  297. vllm/lora/layers/fused_moe.py +739 -0
  298. vllm/lora/layers/logits_processor.py +203 -0
  299. vllm/lora/layers/replicated_linear.py +70 -0
  300. vllm/lora/layers/row_parallel_linear.py +176 -0
  301. vllm/lora/layers/utils.py +115 -0
  302. vllm/lora/layers/vocal_parallel_embedding.py +140 -0
  303. vllm/lora/lora_model.py +221 -0
  304. vllm/lora/lora_weights.py +227 -0
  305. vllm/lora/model_manager.py +858 -0
  306. vllm/lora/ops/__init__.py +0 -0
  307. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  308. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  309. vllm/lora/ops/torch_ops/__init__.py +20 -0
  310. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  311. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  312. vllm/lora/ops/triton_ops/__init__.py +21 -0
  313. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +677 -0
  314. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  315. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  316. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  317. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  318. vllm/lora/ops/triton_ops/utils.py +313 -0
  319. vllm/lora/peft_helper.py +128 -0
  320. vllm/lora/punica_wrapper/__init__.py +10 -0
  321. vllm/lora/punica_wrapper/punica_base.py +493 -0
  322. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  323. vllm/lora/punica_wrapper/punica_gpu.py +413 -0
  324. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  325. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  326. vllm/lora/punica_wrapper/utils.py +150 -0
  327. vllm/lora/request.py +60 -0
  328. vllm/lora/resolver.py +88 -0
  329. vllm/lora/utils.py +281 -0
  330. vllm/lora/worker_manager.py +278 -0
  331. vllm/model_executor/__init__.py +9 -0
  332. vllm/model_executor/custom_op.py +203 -0
  333. vllm/model_executor/layers/__init__.py +0 -0
  334. vllm/model_executor/layers/activation.py +628 -0
  335. vllm/model_executor/layers/attention/__init__.py +0 -0
  336. vllm/model_executor/layers/attention/chunked_local_attention.py +130 -0
  337. vllm/model_executor/layers/attention/cross_attention.py +182 -0
  338. vllm/model_executor/layers/attention/encoder_only_attention.py +103 -0
  339. vllm/model_executor/layers/attention/mm_encoder_attention.py +234 -0
  340. vllm/model_executor/layers/attention/static_sink_attention.py +254 -0
  341. vllm/model_executor/layers/attention_layer_base.py +34 -0
  342. vllm/model_executor/layers/batch_invariant.py +1063 -0
  343. vllm/model_executor/layers/conv.py +262 -0
  344. vllm/model_executor/layers/fla/__init__.py +8 -0
  345. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  346. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  347. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  348. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  349. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  350. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  351. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  352. vllm/model_executor/layers/fla/ops/index.py +41 -0
  353. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  354. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  355. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  356. vllm/model_executor/layers/fla/ops/op.py +60 -0
  357. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  358. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  359. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  360. vllm/model_executor/layers/fused_moe/__init__.py +120 -0
  361. vllm/model_executor/layers/fused_moe/all2all_utils.py +173 -0
  362. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +411 -0
  363. vllm/model_executor/layers/fused_moe/config.py +1111 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200.json +147 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=129,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=160,N=768,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=64,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  625. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  626. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  627. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  628. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  629. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  630. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  631. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  632. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  633. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  634. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  635. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  636. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  637. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  638. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  639. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  640. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  641. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  642. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  643. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  644. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  645. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  646. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  647. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  648. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  649. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  650. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  651. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +444 -0
  652. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1086 -0
  653. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +364 -0
  654. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
  655. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  656. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +436 -0
  657. vllm/model_executor/layers/fused_moe/fallback.py +127 -0
  658. vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +338 -0
  659. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +310 -0
  660. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +371 -0
  661. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  662. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1018 -0
  663. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +824 -0
  664. vllm/model_executor/layers/fused_moe/fused_moe.py +2638 -0
  665. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +119 -0
  666. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +117 -0
  667. vllm/model_executor/layers/fused_moe/fused_moe_router.py +40 -0
  668. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +531 -0
  669. vllm/model_executor/layers/fused_moe/layer.py +2169 -0
  670. vllm/model_executor/layers/fused_moe/modular_kernel.py +1251 -0
  671. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
  672. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  673. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  674. vllm/model_executor/layers/fused_moe/oracle/__init__.py +2 -0
  675. vllm/model_executor/layers/fused_moe/oracle/fp8.py +358 -0
  676. vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +280 -0
  677. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  678. vllm/model_executor/layers/fused_moe/prepare_finalize.py +87 -0
  679. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +347 -0
  680. vllm/model_executor/layers/fused_moe/routed_experts_capturer.py +324 -0
  681. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  682. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
  683. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  684. vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py +78 -0
  685. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +75 -0
  686. vllm/model_executor/layers/fused_moe/trtllm_moe.py +144 -0
  687. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +403 -0
  688. vllm/model_executor/layers/fused_moe/utils.py +382 -0
  689. vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py +189 -0
  690. vllm/model_executor/layers/kda.py +442 -0
  691. vllm/model_executor/layers/layernorm.py +451 -0
  692. vllm/model_executor/layers/lightning_attn.py +735 -0
  693. vllm/model_executor/layers/linear.py +1478 -0
  694. vllm/model_executor/layers/logits_processor.py +109 -0
  695. vllm/model_executor/layers/mamba/__init__.py +0 -0
  696. vllm/model_executor/layers/mamba/abstract.py +68 -0
  697. vllm/model_executor/layers/mamba/linear_attn.py +410 -0
  698. vllm/model_executor/layers/mamba/mamba_mixer.py +541 -0
  699. vllm/model_executor/layers/mamba/mamba_mixer2.py +936 -0
  700. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  701. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  702. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  703. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  704. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
  705. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  706. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  707. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  708. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  709. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  710. vllm/model_executor/layers/mamba/short_conv.py +254 -0
  711. vllm/model_executor/layers/mla.py +179 -0
  712. vllm/model_executor/layers/pooler/__init__.py +5 -0
  713. vllm/model_executor/layers/pooler/abstract.py +39 -0
  714. vllm/model_executor/layers/pooler/activations.py +162 -0
  715. vllm/model_executor/layers/pooler/common.py +32 -0
  716. vllm/model_executor/layers/pooler/seqwise/__init__.py +45 -0
  717. vllm/model_executor/layers/pooler/seqwise/heads.py +151 -0
  718. vllm/model_executor/layers/pooler/seqwise/methods.py +93 -0
  719. vllm/model_executor/layers/pooler/seqwise/poolers.py +127 -0
  720. vllm/model_executor/layers/pooler/special.py +128 -0
  721. vllm/model_executor/layers/pooler/tokwise/__init__.py +39 -0
  722. vllm/model_executor/layers/pooler/tokwise/heads.py +133 -0
  723. vllm/model_executor/layers/pooler/tokwise/methods.py +122 -0
  724. vllm/model_executor/layers/pooler/tokwise/poolers.py +127 -0
  725. vllm/model_executor/layers/quantization/__init__.py +195 -0
  726. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  727. vllm/model_executor/layers/quantization/awq.py +277 -0
  728. vllm/model_executor/layers/quantization/awq_marlin.py +795 -0
  729. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  730. vllm/model_executor/layers/quantization/base_config.py +170 -0
  731. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  732. vllm/model_executor/layers/quantization/bitsandbytes.py +631 -0
  733. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  734. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +982 -0
  735. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2368 -0
  736. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +37 -0
  737. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  738. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  739. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  740. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py +106 -0
  741. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  742. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  743. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
  744. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  745. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  746. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +203 -0
  747. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  748. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
  749. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  750. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  751. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  752. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  753. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  754. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  755. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  756. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  757. vllm/model_executor/layers/quantization/cpu_wna16.py +299 -0
  758. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  759. vllm/model_executor/layers/quantization/experts_int8.py +209 -0
  760. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  761. vllm/model_executor/layers/quantization/fp8.py +1224 -0
  762. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  763. vllm/model_executor/layers/quantization/gguf.py +682 -0
  764. vllm/model_executor/layers/quantization/gptq.py +393 -0
  765. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  766. vllm/model_executor/layers/quantization/gptq_marlin.py +934 -0
  767. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  768. vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
  769. vllm/model_executor/layers/quantization/inc.py +65 -0
  770. vllm/model_executor/layers/quantization/input_quant_fp8.py +212 -0
  771. vllm/model_executor/layers/quantization/ipex_quant.py +403 -0
  772. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  773. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  774. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +113 -0
  775. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  776. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  777. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  778. vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py +126 -0
  779. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
  780. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  781. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +168 -0
  782. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  783. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
  784. vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
  785. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
  786. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +77 -0
  787. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
  788. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
  789. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
  790. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +88 -0
  791. vllm/model_executor/layers/quantization/kv_cache.py +153 -0
  792. vllm/model_executor/layers/quantization/modelopt.py +1665 -0
  793. vllm/model_executor/layers/quantization/moe_wna16.py +518 -0
  794. vllm/model_executor/layers/quantization/mxfp4.py +1145 -0
  795. vllm/model_executor/layers/quantization/petit.py +319 -0
  796. vllm/model_executor/layers/quantization/ptpc_fp8.py +140 -0
  797. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  798. vllm/model_executor/layers/quantization/quark/quark.py +570 -0
  799. vllm/model_executor/layers/quantization/quark/quark_moe.py +797 -0
  800. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  801. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
  802. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  803. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  804. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  805. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  806. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  807. vllm/model_executor/layers/quantization/rtn.py +626 -0
  808. vllm/model_executor/layers/quantization/schema.py +90 -0
  809. vllm/model_executor/layers/quantization/torchao.py +380 -0
  810. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  811. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  812. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  976. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  977. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  978. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  979. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  980. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  981. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  982. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  983. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  984. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  985. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  986. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  987. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  988. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  989. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  990. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  991. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  992. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  993. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  994. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  995. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  996. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  997. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  998. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  999. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1000. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1001. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1002. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1003. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1004. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1005. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1006. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1007. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1008. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1009. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1010. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1011. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1012. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1013. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1014. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1015. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1016. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1017. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1018. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1019. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1020. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1021. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1022. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1023. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1024. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1025. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1026. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1027. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  1028. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +514 -0
  1029. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +370 -0
  1030. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1658 -0
  1031. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  1032. vllm/model_executor/layers/quantization/utils/int8_utils.py +477 -0
  1033. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  1034. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  1035. vllm/model_executor/layers/quantization/utils/marlin_utils.py +720 -0
  1036. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +565 -0
  1037. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
  1038. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
  1039. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  1040. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
  1041. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  1042. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  1043. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  1044. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
  1045. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  1046. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  1047. vllm/model_executor/layers/quantization/utils/quant_utils.py +767 -0
  1048. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
  1049. vllm/model_executor/layers/resampler.py +283 -0
  1050. vllm/model_executor/layers/rotary_embedding/__init__.py +291 -0
  1051. vllm/model_executor/layers/rotary_embedding/base.py +282 -0
  1052. vllm/model_executor/layers/rotary_embedding/common.py +289 -0
  1053. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +184 -0
  1054. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +218 -0
  1055. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1056. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1057. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
  1058. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1059. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1060. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +83 -0
  1061. vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
  1062. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1063. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1064. vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
  1065. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
  1066. vllm/model_executor/layers/utils.py +251 -0
  1067. vllm/model_executor/layers/vocab_parallel_embedding.py +564 -0
  1068. vllm/model_executor/model_loader/__init__.py +150 -0
  1069. vllm/model_executor/model_loader/base_loader.py +71 -0
  1070. vllm/model_executor/model_loader/bitsandbytes_loader.py +821 -0
  1071. vllm/model_executor/model_loader/default_loader.py +304 -0
  1072. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1073. vllm/model_executor/model_loader/gguf_loader.py +371 -0
  1074. vllm/model_executor/model_loader/online_quantization.py +275 -0
  1075. vllm/model_executor/model_loader/runai_streamer_loader.py +115 -0
  1076. vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
  1077. vllm/model_executor/model_loader/tensorizer.py +793 -0
  1078. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1079. vllm/model_executor/model_loader/utils.py +299 -0
  1080. vllm/model_executor/model_loader/weight_utils.py +1183 -0
  1081. vllm/model_executor/models/__init__.py +44 -0
  1082. vllm/model_executor/models/adapters.py +592 -0
  1083. vllm/model_executor/models/afmoe.py +697 -0
  1084. vllm/model_executor/models/aimv2.py +248 -0
  1085. vllm/model_executor/models/apertus.py +567 -0
  1086. vllm/model_executor/models/arcee.py +428 -0
  1087. vllm/model_executor/models/arctic.py +633 -0
  1088. vllm/model_executor/models/aria.py +663 -0
  1089. vllm/model_executor/models/audioflamingo3.py +639 -0
  1090. vllm/model_executor/models/aya_vision.py +448 -0
  1091. vllm/model_executor/models/bagel.py +591 -0
  1092. vllm/model_executor/models/baichuan.py +493 -0
  1093. vllm/model_executor/models/bailing_moe.py +643 -0
  1094. vllm/model_executor/models/bamba.py +511 -0
  1095. vllm/model_executor/models/bee.py +157 -0
  1096. vllm/model_executor/models/bert.py +911 -0
  1097. vllm/model_executor/models/bert_with_rope.py +729 -0
  1098. vllm/model_executor/models/blip.py +350 -0
  1099. vllm/model_executor/models/blip2.py +736 -0
  1100. vllm/model_executor/models/bloom.py +390 -0
  1101. vllm/model_executor/models/chameleon.py +1095 -0
  1102. vllm/model_executor/models/chatglm.py +502 -0
  1103. vllm/model_executor/models/clip.py +1045 -0
  1104. vllm/model_executor/models/cohere2_vision.py +470 -0
  1105. vllm/model_executor/models/commandr.py +469 -0
  1106. vllm/model_executor/models/config.py +571 -0
  1107. vllm/model_executor/models/dbrx.py +484 -0
  1108. vllm/model_executor/models/deepencoder.py +679 -0
  1109. vllm/model_executor/models/deepseek_eagle.py +253 -0
  1110. vllm/model_executor/models/deepseek_mtp.py +447 -0
  1111. vllm/model_executor/models/deepseek_ocr.py +601 -0
  1112. vllm/model_executor/models/deepseek_v2.py +1727 -0
  1113. vllm/model_executor/models/deepseek_vl2.py +642 -0
  1114. vllm/model_executor/models/dots1.py +566 -0
  1115. vllm/model_executor/models/dots_ocr.py +830 -0
  1116. vllm/model_executor/models/ernie45.py +53 -0
  1117. vllm/model_executor/models/ernie45_moe.py +755 -0
  1118. vllm/model_executor/models/ernie45_vl.py +1702 -0
  1119. vllm/model_executor/models/ernie45_vl_moe.py +801 -0
  1120. vllm/model_executor/models/ernie_mtp.py +278 -0
  1121. vllm/model_executor/models/exaone.py +524 -0
  1122. vllm/model_executor/models/exaone4.py +518 -0
  1123. vllm/model_executor/models/exaone_moe.py +579 -0
  1124. vllm/model_executor/models/exaone_moe_mtp.py +255 -0
  1125. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1126. vllm/model_executor/models/falcon.py +543 -0
  1127. vllm/model_executor/models/falcon_h1.py +675 -0
  1128. vllm/model_executor/models/flex_olmo.py +155 -0
  1129. vllm/model_executor/models/fuyu.py +371 -0
  1130. vllm/model_executor/models/gemma.py +425 -0
  1131. vllm/model_executor/models/gemma2.py +435 -0
  1132. vllm/model_executor/models/gemma3.py +520 -0
  1133. vllm/model_executor/models/gemma3_mm.py +664 -0
  1134. vllm/model_executor/models/gemma3n.py +1166 -0
  1135. vllm/model_executor/models/gemma3n_audio_utils.py +57 -0
  1136. vllm/model_executor/models/gemma3n_mm.py +820 -0
  1137. vllm/model_executor/models/glm.py +24 -0
  1138. vllm/model_executor/models/glm4.py +295 -0
  1139. vllm/model_executor/models/glm4_1v.py +1823 -0
  1140. vllm/model_executor/models/glm4_moe.py +725 -0
  1141. vllm/model_executor/models/glm4_moe_mtp.py +365 -0
  1142. vllm/model_executor/models/glm4v.py +783 -0
  1143. vllm/model_executor/models/glmasr.py +1154 -0
  1144. vllm/model_executor/models/glmasr_utils.py +188 -0
  1145. vllm/model_executor/models/gpt2.py +385 -0
  1146. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1147. vllm/model_executor/models/gpt_j.py +346 -0
  1148. vllm/model_executor/models/gpt_neox.py +340 -0
  1149. vllm/model_executor/models/gpt_oss.py +745 -0
  1150. vllm/model_executor/models/granite.py +475 -0
  1151. vllm/model_executor/models/granite_speech.py +919 -0
  1152. vllm/model_executor/models/granitemoe.py +561 -0
  1153. vllm/model_executor/models/granitemoehybrid.py +703 -0
  1154. vllm/model_executor/models/granitemoeshared.py +328 -0
  1155. vllm/model_executor/models/gritlm.py +242 -0
  1156. vllm/model_executor/models/grok1.py +803 -0
  1157. vllm/model_executor/models/h2ovl.py +554 -0
  1158. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1159. vllm/model_executor/models/hunyuan_vision.py +1034 -0
  1160. vllm/model_executor/models/hyperclovax_vision.py +1163 -0
  1161. vllm/model_executor/models/idefics2_vision_model.py +427 -0
  1162. vllm/model_executor/models/idefics3.py +734 -0
  1163. vllm/model_executor/models/interfaces.py +1180 -0
  1164. vllm/model_executor/models/interfaces_base.py +252 -0
  1165. vllm/model_executor/models/intern_vit.py +454 -0
  1166. vllm/model_executor/models/internlm2.py +451 -0
  1167. vllm/model_executor/models/internlm2_ve.py +139 -0
  1168. vllm/model_executor/models/interns1.py +828 -0
  1169. vllm/model_executor/models/interns1_vit.py +433 -0
  1170. vllm/model_executor/models/internvl.py +1436 -0
  1171. vllm/model_executor/models/iquest_loopcoder.py +595 -0
  1172. vllm/model_executor/models/isaac.py +1503 -0
  1173. vllm/model_executor/models/jais.py +397 -0
  1174. vllm/model_executor/models/jais2.py +508 -0
  1175. vllm/model_executor/models/jamba.py +599 -0
  1176. vllm/model_executor/models/jina_vl.py +145 -0
  1177. vllm/model_executor/models/kanana_v.py +756 -0
  1178. vllm/model_executor/models/keye.py +1709 -0
  1179. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1180. vllm/model_executor/models/kimi_linear.py +659 -0
  1181. vllm/model_executor/models/kimi_vl.py +577 -0
  1182. vllm/model_executor/models/lfm2.py +515 -0
  1183. vllm/model_executor/models/lfm2_moe.py +746 -0
  1184. vllm/model_executor/models/lfm2_vl.py +732 -0
  1185. vllm/model_executor/models/lightonocr.py +197 -0
  1186. vllm/model_executor/models/llama.py +724 -0
  1187. vllm/model_executor/models/llama4.py +860 -0
  1188. vllm/model_executor/models/llama4_eagle.py +225 -0
  1189. vllm/model_executor/models/llama_eagle.py +213 -0
  1190. vllm/model_executor/models/llama_eagle3.py +375 -0
  1191. vllm/model_executor/models/llava.py +879 -0
  1192. vllm/model_executor/models/llava_next.py +583 -0
  1193. vllm/model_executor/models/llava_next_video.py +467 -0
  1194. vllm/model_executor/models/llava_onevision.py +922 -0
  1195. vllm/model_executor/models/longcat_flash.py +767 -0
  1196. vllm/model_executor/models/longcat_flash_mtp.py +348 -0
  1197. vllm/model_executor/models/mamba.py +276 -0
  1198. vllm/model_executor/models/mamba2.py +288 -0
  1199. vllm/model_executor/models/medusa.py +179 -0
  1200. vllm/model_executor/models/midashenglm.py +826 -0
  1201. vllm/model_executor/models/mimo.py +188 -0
  1202. vllm/model_executor/models/mimo_mtp.py +294 -0
  1203. vllm/model_executor/models/mimo_v2_flash.py +718 -0
  1204. vllm/model_executor/models/minicpm.py +660 -0
  1205. vllm/model_executor/models/minicpm3.py +233 -0
  1206. vllm/model_executor/models/minicpm_eagle.py +386 -0
  1207. vllm/model_executor/models/minicpmo.py +768 -0
  1208. vllm/model_executor/models/minicpmv.py +1742 -0
  1209. vllm/model_executor/models/minimax_m2.py +552 -0
  1210. vllm/model_executor/models/minimax_text_01.py +1008 -0
  1211. vllm/model_executor/models/minimax_vl_01.py +395 -0
  1212. vllm/model_executor/models/mistral3.py +638 -0
  1213. vllm/model_executor/models/mistral_large_3.py +63 -0
  1214. vllm/model_executor/models/mistral_large_3_eagle.py +137 -0
  1215. vllm/model_executor/models/mixtral.py +599 -0
  1216. vllm/model_executor/models/mllama4.py +1170 -0
  1217. vllm/model_executor/models/mlp_speculator.py +235 -0
  1218. vllm/model_executor/models/modernbert.py +458 -0
  1219. vllm/model_executor/models/module_mapping.py +74 -0
  1220. vllm/model_executor/models/molmo.py +1592 -0
  1221. vllm/model_executor/models/moonvit.py +601 -0
  1222. vllm/model_executor/models/mpt.py +335 -0
  1223. vllm/model_executor/models/nano_nemotron_vl.py +1725 -0
  1224. vllm/model_executor/models/nemotron.py +499 -0
  1225. vllm/model_executor/models/nemotron_h.py +902 -0
  1226. vllm/model_executor/models/nemotron_nas.py +474 -0
  1227. vllm/model_executor/models/nemotron_parse.py +958 -0
  1228. vllm/model_executor/models/nemotron_vl.py +651 -0
  1229. vllm/model_executor/models/nvlm_d.py +216 -0
  1230. vllm/model_executor/models/olmo.py +412 -0
  1231. vllm/model_executor/models/olmo2.py +454 -0
  1232. vllm/model_executor/models/olmoe.py +498 -0
  1233. vllm/model_executor/models/opencua.py +262 -0
  1234. vllm/model_executor/models/openpangu.py +1378 -0
  1235. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1236. vllm/model_executor/models/opt.py +426 -0
  1237. vllm/model_executor/models/orion.py +365 -0
  1238. vllm/model_executor/models/ouro.py +507 -0
  1239. vllm/model_executor/models/ovis.py +557 -0
  1240. vllm/model_executor/models/ovis2_5.py +661 -0
  1241. vllm/model_executor/models/paddleocr_vl.py +1261 -0
  1242. vllm/model_executor/models/paligemma.py +429 -0
  1243. vllm/model_executor/models/persimmon.py +373 -0
  1244. vllm/model_executor/models/phi.py +363 -0
  1245. vllm/model_executor/models/phi3.py +18 -0
  1246. vllm/model_executor/models/phi3v.py +729 -0
  1247. vllm/model_executor/models/phi4mm.py +1250 -0
  1248. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1249. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1250. vllm/model_executor/models/phimoe.py +671 -0
  1251. vllm/model_executor/models/pixtral.py +1437 -0
  1252. vllm/model_executor/models/plamo2.py +993 -0
  1253. vllm/model_executor/models/plamo3.py +437 -0
  1254. vllm/model_executor/models/qwen.py +377 -0
  1255. vllm/model_executor/models/qwen2.py +600 -0
  1256. vllm/model_executor/models/qwen2_5_omni_thinker.py +1200 -0
  1257. vllm/model_executor/models/qwen2_5_vl.py +1598 -0
  1258. vllm/model_executor/models/qwen2_audio.py +478 -0
  1259. vllm/model_executor/models/qwen2_moe.py +604 -0
  1260. vllm/model_executor/models/qwen2_rm.py +120 -0
  1261. vllm/model_executor/models/qwen2_vl.py +1588 -0
  1262. vllm/model_executor/models/qwen3.py +331 -0
  1263. vllm/model_executor/models/qwen3_moe.py +752 -0
  1264. vllm/model_executor/models/qwen3_next.py +1410 -0
  1265. vllm/model_executor/models/qwen3_next_mtp.py +293 -0
  1266. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1814 -0
  1267. vllm/model_executor/models/qwen3_vl.py +2120 -0
  1268. vllm/model_executor/models/qwen3_vl_moe.py +474 -0
  1269. vllm/model_executor/models/qwen_vl.py +821 -0
  1270. vllm/model_executor/models/radio.py +573 -0
  1271. vllm/model_executor/models/registry.py +1218 -0
  1272. vllm/model_executor/models/roberta.py +239 -0
  1273. vllm/model_executor/models/rvl.py +107 -0
  1274. vllm/model_executor/models/seed_oss.py +492 -0
  1275. vllm/model_executor/models/siglip.py +1259 -0
  1276. vllm/model_executor/models/siglip2.py +495 -0
  1277. vllm/model_executor/models/siglip2navit.py +660 -0
  1278. vllm/model_executor/models/skyworkr1v.py +951 -0
  1279. vllm/model_executor/models/smolvlm.py +38 -0
  1280. vllm/model_executor/models/solar.py +484 -0
  1281. vllm/model_executor/models/stablelm.py +354 -0
  1282. vllm/model_executor/models/starcoder2.py +365 -0
  1283. vllm/model_executor/models/step3_text.py +554 -0
  1284. vllm/model_executor/models/step3_vl.py +1147 -0
  1285. vllm/model_executor/models/swin.py +500 -0
  1286. vllm/model_executor/models/tarsier.py +624 -0
  1287. vllm/model_executor/models/telechat2.py +153 -0
  1288. vllm/model_executor/models/teleflm.py +78 -0
  1289. vllm/model_executor/models/terratorch.py +318 -0
  1290. vllm/model_executor/models/transformers/__init__.py +127 -0
  1291. vllm/model_executor/models/transformers/base.py +523 -0
  1292. vllm/model_executor/models/transformers/causal.py +65 -0
  1293. vllm/model_executor/models/transformers/legacy.py +90 -0
  1294. vllm/model_executor/models/transformers/moe.py +329 -0
  1295. vllm/model_executor/models/transformers/multimodal.py +441 -0
  1296. vllm/model_executor/models/transformers/pooling.py +102 -0
  1297. vllm/model_executor/models/transformers/utils.py +253 -0
  1298. vllm/model_executor/models/ultravox.py +786 -0
  1299. vllm/model_executor/models/utils.py +832 -0
  1300. vllm/model_executor/models/vision.py +546 -0
  1301. vllm/model_executor/models/voxtral.py +867 -0
  1302. vllm/model_executor/models/voxtral_streaming.py +304 -0
  1303. vllm/model_executor/models/whisper.py +993 -0
  1304. vllm/model_executor/models/whisper_utils.py +299 -0
  1305. vllm/model_executor/models/zamba2.py +986 -0
  1306. vllm/model_executor/parameter.py +642 -0
  1307. vllm/model_executor/utils.py +113 -0
  1308. vllm/model_executor/warmup/__init__.py +0 -0
  1309. vllm/model_executor/warmup/deep_gemm_warmup.py +371 -0
  1310. vllm/model_executor/warmup/kernel_warmup.py +97 -0
  1311. vllm/model_inspection.py +136 -0
  1312. vllm/multimodal/__init__.py +38 -0
  1313. vllm/multimodal/audio.py +287 -0
  1314. vllm/multimodal/base.py +60 -0
  1315. vllm/multimodal/cache.py +829 -0
  1316. vllm/multimodal/evs.py +294 -0
  1317. vllm/multimodal/hasher.py +123 -0
  1318. vllm/multimodal/image.py +155 -0
  1319. vllm/multimodal/inputs.py +1027 -0
  1320. vllm/multimodal/parse.py +674 -0
  1321. vllm/multimodal/processing.py +2469 -0
  1322. vllm/multimodal/profiling.py +351 -0
  1323. vllm/multimodal/registry.py +375 -0
  1324. vllm/multimodal/utils.py +550 -0
  1325. vllm/multimodal/video.py +512 -0
  1326. vllm/outputs.py +347 -0
  1327. vllm/platforms/__init__.py +277 -0
  1328. vllm/platforms/cpu.py +423 -0
  1329. vllm/platforms/cuda.py +618 -0
  1330. vllm/platforms/interface.py +707 -0
  1331. vllm/platforms/rocm.py +586 -0
  1332. vllm/platforms/tpu.py +20 -0
  1333. vllm/platforms/xpu.py +262 -0
  1334. vllm/plugins/__init__.py +81 -0
  1335. vllm/plugins/io_processors/__init__.py +68 -0
  1336. vllm/plugins/io_processors/interface.py +77 -0
  1337. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1338. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1339. vllm/pooling_params.py +229 -0
  1340. vllm/profiler/__init__.py +0 -0
  1341. vllm/profiler/layerwise_profile.py +392 -0
  1342. vllm/profiler/utils.py +151 -0
  1343. vllm/profiler/wrapper.py +241 -0
  1344. vllm/py.typed +2 -0
  1345. vllm/ray/__init__.py +0 -0
  1346. vllm/ray/lazy_utils.py +30 -0
  1347. vllm/ray/ray_env.py +79 -0
  1348. vllm/reasoning/__init__.py +96 -0
  1349. vllm/reasoning/abs_reasoning_parsers.py +318 -0
  1350. vllm/reasoning/basic_parsers.py +175 -0
  1351. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1352. vllm/reasoning/deepseek_v3_reasoning_parser.py +69 -0
  1353. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1354. vllm/reasoning/glm4_moe_reasoning_parser.py +13 -0
  1355. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1356. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1357. vllm/reasoning/holo2_reasoning_parser.py +89 -0
  1358. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1359. vllm/reasoning/identity_reasoning_parser.py +63 -0
  1360. vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
  1361. vllm/reasoning/mistral_reasoning_parser.py +154 -0
  1362. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1363. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1364. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1365. vllm/reasoning/step3_reasoning_parser.py +113 -0
  1366. vllm/sampling_params.py +629 -0
  1367. vllm/scalar_type.py +355 -0
  1368. vllm/scripts.py +17 -0
  1369. vllm/sequence.py +64 -0
  1370. vllm/tasks.py +13 -0
  1371. vllm/third_party/__init__.py +0 -0
  1372. vllm/third_party/pynvml.py +6140 -0
  1373. vllm/tokenizers/__init__.py +18 -0
  1374. vllm/tokenizers/deepseek_v32.py +187 -0
  1375. vllm/tokenizers/deepseek_v32_encoding.py +463 -0
  1376. vllm/tokenizers/detokenizer_utils.py +198 -0
  1377. vllm/tokenizers/grok2.py +443 -0
  1378. vllm/tokenizers/hf.py +119 -0
  1379. vllm/tokenizers/mistral.py +543 -0
  1380. vllm/tokenizers/protocol.py +123 -0
  1381. vllm/tokenizers/registry.py +238 -0
  1382. vllm/tool_parsers/__init__.py +158 -0
  1383. vllm/tool_parsers/abstract_tool_parser.py +274 -0
  1384. vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
  1385. vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
  1386. vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
  1387. vllm/tool_parsers/ernie45_tool_parser.py +210 -0
  1388. vllm/tool_parsers/functiongemma_tool_parser.py +321 -0
  1389. vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
  1390. vllm/tool_parsers/glm47_moe_tool_parser.py +23 -0
  1391. vllm/tool_parsers/glm4_moe_tool_parser.py +215 -0
  1392. vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  1393. vllm/tool_parsers/granite_tool_parser.py +253 -0
  1394. vllm/tool_parsers/hermes_tool_parser.py +495 -0
  1395. vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  1396. vllm/tool_parsers/internlm2_tool_parser.py +227 -0
  1397. vllm/tool_parsers/jamba_tool_parser.py +323 -0
  1398. vllm/tool_parsers/kimi_k2_tool_parser.py +598 -0
  1399. vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  1400. vllm/tool_parsers/llama_tool_parser.py +324 -0
  1401. vllm/tool_parsers/longcat_tool_parser.py +37 -0
  1402. vllm/tool_parsers/minimax_m2_tool_parser.py +776 -0
  1403. vllm/tool_parsers/minimax_tool_parser.py +849 -0
  1404. vllm/tool_parsers/mistral_tool_parser.py +612 -0
  1405. vllm/tool_parsers/olmo3_tool_parser.py +366 -0
  1406. vllm/tool_parsers/openai_tool_parser.py +111 -0
  1407. vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
  1408. vllm/tool_parsers/pythonic_tool_parser.py +332 -0
  1409. vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
  1410. vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  1411. vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
  1412. vllm/tool_parsers/step3_tool_parser.py +303 -0
  1413. vllm/tool_parsers/utils.py +229 -0
  1414. vllm/tool_parsers/xlam_tool_parser.py +556 -0
  1415. vllm/tracing.py +135 -0
  1416. vllm/transformers_utils/__init__.py +26 -0
  1417. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1418. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1419. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1420. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1421. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1422. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1423. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1424. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1425. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1426. vllm/transformers_utils/config.py +1169 -0
  1427. vllm/transformers_utils/config_parser_base.py +20 -0
  1428. vllm/transformers_utils/configs/__init__.py +106 -0
  1429. vllm/transformers_utils/configs/afmoe.py +87 -0
  1430. vllm/transformers_utils/configs/arctic.py +216 -0
  1431. vllm/transformers_utils/configs/bagel.py +53 -0
  1432. vllm/transformers_utils/configs/chatglm.py +75 -0
  1433. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1434. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1435. vllm/transformers_utils/configs/eagle.py +90 -0
  1436. vllm/transformers_utils/configs/falcon.py +89 -0
  1437. vllm/transformers_utils/configs/flex_olmo.py +82 -0
  1438. vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
  1439. vllm/transformers_utils/configs/isaac.py +100 -0
  1440. vllm/transformers_utils/configs/jais.py +243 -0
  1441. vllm/transformers_utils/configs/kimi_linear.py +148 -0
  1442. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1443. vllm/transformers_utils/configs/lfm2_moe.py +163 -0
  1444. vllm/transformers_utils/configs/medusa.py +65 -0
  1445. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1446. vllm/transformers_utils/configs/mistral.py +263 -0
  1447. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1448. vllm/transformers_utils/configs/moonvit.py +33 -0
  1449. vllm/transformers_utils/configs/nemotron.py +220 -0
  1450. vllm/transformers_utils/configs/nemotron_h.py +284 -0
  1451. vllm/transformers_utils/configs/olmo3.py +83 -0
  1452. vllm/transformers_utils/configs/ovis.py +182 -0
  1453. vllm/transformers_utils/configs/qwen3_next.py +277 -0
  1454. vllm/transformers_utils/configs/radio.py +98 -0
  1455. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1456. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1457. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1458. vllm/transformers_utils/configs/step3_vl.py +178 -0
  1459. vllm/transformers_utils/configs/tarsier2.py +24 -0
  1460. vllm/transformers_utils/configs/ultravox.py +120 -0
  1461. vllm/transformers_utils/dynamic_module.py +70 -0
  1462. vllm/transformers_utils/gguf_utils.py +280 -0
  1463. vllm/transformers_utils/model_arch_config_convertor.py +402 -0
  1464. vllm/transformers_utils/processor.py +424 -0
  1465. vllm/transformers_utils/processors/__init__.py +25 -0
  1466. vllm/transformers_utils/processors/bagel.py +78 -0
  1467. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1468. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1469. vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
  1470. vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
  1471. vllm/transformers_utils/processors/ovis.py +453 -0
  1472. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1473. vllm/transformers_utils/repo_utils.py +287 -0
  1474. vllm/transformers_utils/runai_utils.py +102 -0
  1475. vllm/transformers_utils/s3_utils.py +95 -0
  1476. vllm/transformers_utils/tokenizer.py +19 -0
  1477. vllm/transformers_utils/utils.py +112 -0
  1478. vllm/triton_utils/__init__.py +20 -0
  1479. vllm/triton_utils/importing.py +103 -0
  1480. vllm/usage/__init__.py +0 -0
  1481. vllm/usage/usage_lib.py +278 -0
  1482. vllm/utils/__init__.py +36 -0
  1483. vllm/utils/argparse_utils.py +491 -0
  1484. vllm/utils/async_utils.py +310 -0
  1485. vllm/utils/cache.py +214 -0
  1486. vllm/utils/collection_utils.py +112 -0
  1487. vllm/utils/counter.py +45 -0
  1488. vllm/utils/deep_gemm.py +424 -0
  1489. vllm/utils/flashinfer.py +602 -0
  1490. vllm/utils/func_utils.py +236 -0
  1491. vllm/utils/gc_utils.py +151 -0
  1492. vllm/utils/hashing.py +117 -0
  1493. vllm/utils/import_utils.py +438 -0
  1494. vllm/utils/jsontree.py +158 -0
  1495. vllm/utils/math_utils.py +32 -0
  1496. vllm/utils/mem_constants.py +13 -0
  1497. vllm/utils/mem_utils.py +285 -0
  1498. vllm/utils/nccl.py +64 -0
  1499. vllm/utils/network_utils.py +331 -0
  1500. vllm/utils/nvtx_pytorch_hooks.py +286 -0
  1501. vllm/utils/platform_utils.py +59 -0
  1502. vllm/utils/profiling.py +56 -0
  1503. vllm/utils/registry.py +51 -0
  1504. vllm/utils/serial_utils.py +214 -0
  1505. vllm/utils/system_utils.py +296 -0
  1506. vllm/utils/tensor_schema.py +255 -0
  1507. vllm/utils/torch_utils.py +781 -0
  1508. vllm/v1/__init__.py +0 -0
  1509. vllm/v1/attention/__init__.py +0 -0
  1510. vllm/v1/attention/backend.py +736 -0
  1511. vllm/v1/attention/backends/__init__.py +0 -0
  1512. vllm/v1/attention/backends/cpu_attn.py +501 -0
  1513. vllm/v1/attention/backends/fa_utils.py +126 -0
  1514. vllm/v1/attention/backends/flash_attn.py +1092 -0
  1515. vllm/v1/attention/backends/flash_attn_diffkv.py +277 -0
  1516. vllm/v1/attention/backends/flashinfer.py +1713 -0
  1517. vllm/v1/attention/backends/flex_attention.py +1024 -0
  1518. vllm/v1/attention/backends/gdn_attn.py +382 -0
  1519. vllm/v1/attention/backends/linear_attn.py +77 -0
  1520. vllm/v1/attention/backends/mamba1_attn.py +28 -0
  1521. vllm/v1/attention/backends/mamba2_attn.py +256 -0
  1522. vllm/v1/attention/backends/mamba_attn.py +313 -0
  1523. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1524. vllm/v1/attention/backends/mla/aiter_triton_mla.py +66 -0
  1525. vllm/v1/attention/backends/mla/common.py +2156 -0
  1526. vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
  1527. vllm/v1/attention/backends/mla/flashattn_mla.py +348 -0
  1528. vllm/v1/attention/backends/mla/flashinfer_mla.py +175 -0
  1529. vllm/v1/attention/backends/mla/flashmla.py +321 -0
  1530. vllm/v1/attention/backends/mla/flashmla_sparse.py +1021 -0
  1531. vllm/v1/attention/backends/mla/indexer.py +345 -0
  1532. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +284 -0
  1533. vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +321 -0
  1534. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1535. vllm/v1/attention/backends/registry.py +258 -0
  1536. vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
  1537. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
  1538. vllm/v1/attention/backends/rocm_attn.py +405 -0
  1539. vllm/v1/attention/backends/short_conv_attn.py +26 -0
  1540. vllm/v1/attention/backends/tree_attn.py +430 -0
  1541. vllm/v1/attention/backends/triton_attn.py +578 -0
  1542. vllm/v1/attention/backends/utils.py +978 -0
  1543. vllm/v1/attention/ops/__init__.py +0 -0
  1544. vllm/v1/attention/ops/chunked_prefill_paged_decode.py +459 -0
  1545. vllm/v1/attention/ops/common.py +469 -0
  1546. vllm/v1/attention/ops/flashmla.py +254 -0
  1547. vllm/v1/attention/ops/merge_attn_states.py +47 -0
  1548. vllm/v1/attention/ops/paged_attn.py +51 -0
  1549. vllm/v1/attention/ops/pallas_kv_cache_update.py +130 -0
  1550. vllm/v1/attention/ops/prefix_prefill.py +862 -0
  1551. vllm/v1/attention/ops/rocm_aiter_mla_sparse.py +210 -0
  1552. vllm/v1/attention/ops/triton_decode_attention.py +709 -0
  1553. vllm/v1/attention/ops/triton_merge_attn_states.py +116 -0
  1554. vllm/v1/attention/ops/triton_prefill_attention.py +272 -0
  1555. vllm/v1/attention/ops/triton_reshape_and_cache_flash.py +395 -0
  1556. vllm/v1/attention/ops/triton_unified_attention.py +1088 -0
  1557. vllm/v1/attention/ops/vit_attn_wrappers.py +185 -0
  1558. vllm/v1/attention/selector.py +145 -0
  1559. vllm/v1/core/__init__.py +0 -0
  1560. vllm/v1/core/block_pool.py +489 -0
  1561. vllm/v1/core/encoder_cache_manager.py +402 -0
  1562. vllm/v1/core/kv_cache_coordinator.py +560 -0
  1563. vllm/v1/core/kv_cache_manager.py +485 -0
  1564. vllm/v1/core/kv_cache_metrics.py +96 -0
  1565. vllm/v1/core/kv_cache_utils.py +1642 -0
  1566. vllm/v1/core/sched/__init__.py +0 -0
  1567. vllm/v1/core/sched/async_scheduler.py +66 -0
  1568. vllm/v1/core/sched/interface.py +205 -0
  1569. vllm/v1/core/sched/output.py +261 -0
  1570. vllm/v1/core/sched/request_queue.py +208 -0
  1571. vllm/v1/core/sched/scheduler.py +1936 -0
  1572. vllm/v1/core/sched/utils.py +64 -0
  1573. vllm/v1/core/single_type_kv_cache_manager.py +926 -0
  1574. vllm/v1/cudagraph_dispatcher.py +183 -0
  1575. vllm/v1/engine/__init__.py +224 -0
  1576. vllm/v1/engine/async_llm.py +874 -0
  1577. vllm/v1/engine/coordinator.py +396 -0
  1578. vllm/v1/engine/core.py +1614 -0
  1579. vllm/v1/engine/core_client.py +1422 -0
  1580. vllm/v1/engine/detokenizer.py +351 -0
  1581. vllm/v1/engine/exceptions.py +18 -0
  1582. vllm/v1/engine/input_processor.py +713 -0
  1583. vllm/v1/engine/llm_engine.py +415 -0
  1584. vllm/v1/engine/logprobs.py +245 -0
  1585. vllm/v1/engine/output_processor.py +715 -0
  1586. vllm/v1/engine/parallel_sampling.py +150 -0
  1587. vllm/v1/engine/utils.py +1086 -0
  1588. vllm/v1/executor/__init__.py +6 -0
  1589. vllm/v1/executor/abstract.py +352 -0
  1590. vllm/v1/executor/multiproc_executor.py +888 -0
  1591. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1592. vllm/v1/executor/ray_executor.py +623 -0
  1593. vllm/v1/executor/ray_utils.py +468 -0
  1594. vllm/v1/executor/uniproc_executor.py +186 -0
  1595. vllm/v1/kv_cache_interface.py +485 -0
  1596. vllm/v1/kv_offload/__init__.py +0 -0
  1597. vllm/v1/kv_offload/abstract.py +161 -0
  1598. vllm/v1/kv_offload/arc_manager.py +237 -0
  1599. vllm/v1/kv_offload/backend.py +97 -0
  1600. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1601. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1602. vllm/v1/kv_offload/cpu.py +109 -0
  1603. vllm/v1/kv_offload/factory.py +58 -0
  1604. vllm/v1/kv_offload/lru_manager.py +139 -0
  1605. vllm/v1/kv_offload/mediums.py +39 -0
  1606. vllm/v1/kv_offload/spec.py +70 -0
  1607. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1608. vllm/v1/kv_offload/worker/cpu_gpu.py +287 -0
  1609. vllm/v1/kv_offload/worker/worker.py +163 -0
  1610. vllm/v1/metrics/__init__.py +0 -0
  1611. vllm/v1/metrics/loggers.py +1320 -0
  1612. vllm/v1/metrics/perf.py +1244 -0
  1613. vllm/v1/metrics/prometheus.py +82 -0
  1614. vllm/v1/metrics/ray_wrappers.py +194 -0
  1615. vllm/v1/metrics/reader.py +257 -0
  1616. vllm/v1/metrics/stats.py +440 -0
  1617. vllm/v1/outputs.py +242 -0
  1618. vllm/v1/pool/__init__.py +0 -0
  1619. vllm/v1/pool/metadata.py +124 -0
  1620. vllm/v1/request.py +281 -0
  1621. vllm/v1/sample/__init__.py +0 -0
  1622. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1623. vllm/v1/sample/logits_processor/builtin.py +278 -0
  1624. vllm/v1/sample/logits_processor/interface.py +106 -0
  1625. vllm/v1/sample/logits_processor/state.py +165 -0
  1626. vllm/v1/sample/metadata.py +44 -0
  1627. vllm/v1/sample/ops/__init__.py +0 -0
  1628. vllm/v1/sample/ops/bad_words.py +57 -0
  1629. vllm/v1/sample/ops/logprobs.py +25 -0
  1630. vllm/v1/sample/ops/penalties.py +57 -0
  1631. vllm/v1/sample/ops/topk_topp_sampler.py +388 -0
  1632. vllm/v1/sample/rejection_sampler.py +822 -0
  1633. vllm/v1/sample/sampler.py +319 -0
  1634. vllm/v1/sample/tpu/__init__.py +0 -0
  1635. vllm/v1/sample/tpu/metadata.py +120 -0
  1636. vllm/v1/sample/tpu/sampler.py +215 -0
  1637. vllm/v1/serial_utils.py +514 -0
  1638. vllm/v1/spec_decode/__init__.py +0 -0
  1639. vllm/v1/spec_decode/eagle.py +1346 -0
  1640. vllm/v1/spec_decode/medusa.py +73 -0
  1641. vllm/v1/spec_decode/metadata.py +66 -0
  1642. vllm/v1/spec_decode/metrics.py +225 -0
  1643. vllm/v1/spec_decode/ngram_proposer.py +281 -0
  1644. vllm/v1/spec_decode/suffix_decoding.py +95 -0
  1645. vllm/v1/spec_decode/utils.py +109 -0
  1646. vllm/v1/structured_output/__init__.py +337 -0
  1647. vllm/v1/structured_output/backend_guidance.py +291 -0
  1648. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1649. vllm/v1/structured_output/backend_outlines.py +324 -0
  1650. vllm/v1/structured_output/backend_types.py +136 -0
  1651. vllm/v1/structured_output/backend_xgrammar.py +378 -0
  1652. vllm/v1/structured_output/request.py +91 -0
  1653. vllm/v1/structured_output/utils.py +457 -0
  1654. vllm/v1/utils.py +466 -0
  1655. vllm/v1/worker/__init__.py +0 -0
  1656. vllm/v1/worker/block_table.py +343 -0
  1657. vllm/v1/worker/cp_utils.py +42 -0
  1658. vllm/v1/worker/cpu_model_runner.py +122 -0
  1659. vllm/v1/worker/cpu_worker.py +192 -0
  1660. vllm/v1/worker/dp_utils.py +240 -0
  1661. vllm/v1/worker/ec_connector_model_runner_mixin.py +85 -0
  1662. vllm/v1/worker/gpu/README.md +4 -0
  1663. vllm/v1/worker/gpu/__init__.py +0 -0
  1664. vllm/v1/worker/gpu/async_utils.py +98 -0
  1665. vllm/v1/worker/gpu/attn_utils.py +183 -0
  1666. vllm/v1/worker/gpu/block_table.py +222 -0
  1667. vllm/v1/worker/gpu/buffer_utils.py +224 -0
  1668. vllm/v1/worker/gpu/cudagraph_utils.py +264 -0
  1669. vllm/v1/worker/gpu/dp_utils.py +31 -0
  1670. vllm/v1/worker/gpu/input_batch.py +526 -0
  1671. vllm/v1/worker/gpu/metrics/__init__.py +0 -0
  1672. vllm/v1/worker/gpu/metrics/logits.py +42 -0
  1673. vllm/v1/worker/gpu/mm/__init__.py +0 -0
  1674. vllm/v1/worker/gpu/mm/mrope_utils.py +127 -0
  1675. vllm/v1/worker/gpu/model_runner.py +1005 -0
  1676. vllm/v1/worker/gpu/sample/__init__.py +0 -0
  1677. vllm/v1/worker/gpu/sample/gumbel.py +106 -0
  1678. vllm/v1/worker/gpu/sample/logit_bias.py +270 -0
  1679. vllm/v1/worker/gpu/sample/logprob.py +167 -0
  1680. vllm/v1/worker/gpu/sample/metadata.py +79 -0
  1681. vllm/v1/worker/gpu/sample/min_p.py +58 -0
  1682. vllm/v1/worker/gpu/sample/output.py +14 -0
  1683. vllm/v1/worker/gpu/sample/penalties.py +155 -0
  1684. vllm/v1/worker/gpu/sample/sampler.py +88 -0
  1685. vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
  1686. vllm/v1/worker/gpu/spec_decode/eagle.py +566 -0
  1687. vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
  1688. vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
  1689. vllm/v1/worker/gpu/states.py +282 -0
  1690. vllm/v1/worker/gpu/structured_outputs.py +100 -0
  1691. vllm/v1/worker/gpu_input_batch.py +1030 -0
  1692. vllm/v1/worker/gpu_model_runner.py +5761 -0
  1693. vllm/v1/worker/gpu_ubatch_wrapper.py +475 -0
  1694. vllm/v1/worker/gpu_worker.py +968 -0
  1695. vllm/v1/worker/kv_connector_model_runner_mixin.py +300 -0
  1696. vllm/v1/worker/lora_model_runner_mixin.py +225 -0
  1697. vllm/v1/worker/tpu_input_batch.py +574 -0
  1698. vllm/v1/worker/tpu_worker.py +18 -0
  1699. vllm/v1/worker/ubatch_utils.py +112 -0
  1700. vllm/v1/worker/ubatching.py +242 -0
  1701. vllm/v1/worker/utils.py +400 -0
  1702. vllm/v1/worker/worker_base.py +372 -0
  1703. vllm/v1/worker/workspace.py +253 -0
  1704. vllm/v1/worker/xpu_model_runner.py +48 -0
  1705. vllm/v1/worker/xpu_worker.py +174 -0
  1706. vllm/version.py +39 -0
  1707. vllm/vllm_flash_attn/.gitkeep +0 -0
  1708. vllm_cpu_avx512bf16-0.14.0.dist-info/METADATA +348 -0
  1709. vllm_cpu_avx512bf16-0.14.0.dist-info/RECORD +1712 -0
  1710. vllm_cpu_avx512bf16-0.14.0.dist-info/WHEEL +5 -0
  1711. vllm_cpu_avx512bf16-0.14.0.dist-info/entry_points.txt +5 -0
  1712. vllm_cpu_avx512bf16-0.14.0.dist-info/top_level.txt +1 -0
vllm/config/model.py ADDED
@@ -0,0 +1,2026 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import warnings
5
+ from collections.abc import Callable
6
+ from dataclasses import InitVar, field
7
+ from functools import cached_property
8
+ from typing import TYPE_CHECKING, Any, Literal, cast, get_args
9
+
10
+ import torch
11
+ from pydantic import ConfigDict, Field, field_validator, model_validator
12
+ from pydantic.dataclasses import dataclass
13
+
14
+ import vllm.envs as envs
15
+ from vllm.config.model_arch import (
16
+ ModelArchitectureConfig,
17
+ )
18
+ from vllm.config.multimodal import MMCacheType, MMEncoderTPMode, MultiModalConfig
19
+ from vllm.config.pooler import PoolerConfig
20
+ from vllm.config.scheduler import RunnerType
21
+ from vllm.config.utils import config, getattr_iter
22
+ from vllm.logger import init_logger
23
+ from vllm.platforms import current_platform
24
+ from vllm.transformers_utils.config import (
25
+ ConfigFormat,
26
+ get_config,
27
+ get_hf_image_processor_config,
28
+ get_hf_text_config,
29
+ get_pooling_config,
30
+ get_sentence_transformer_tokenizer_config,
31
+ is_encoder_decoder,
32
+ is_rope_parameters_nested,
33
+ try_get_dense_modules,
34
+ try_get_generation_config,
35
+ try_get_tokenizer_config,
36
+ uses_mrope,
37
+ uses_xdrope_dim,
38
+ )
39
+ from vllm.transformers_utils.gguf_utils import (
40
+ is_gguf,
41
+ is_remote_gguf,
42
+ maybe_patch_hf_config_from_gguf,
43
+ split_remote_gguf,
44
+ )
45
+ from vllm.transformers_utils.model_arch_config_convertor import (
46
+ MODEL_ARCH_CONFIG_CONVERTORS,
47
+ ModelArchConfigConvertorBase,
48
+ )
49
+ from vllm.transformers_utils.runai_utils import ObjectStorageModel, is_runai_obj_uri
50
+ from vllm.transformers_utils.utils import maybe_model_redirect
51
+ from vllm.utils.import_utils import LazyLoader
52
+ from vllm.v1.attention.backends.registry import AttentionBackendEnum
53
+
54
+ if TYPE_CHECKING:
55
+ from transformers import PretrainedConfig
56
+
57
+ import vllm.model_executor.layers.quantization as me_quant
58
+ import vllm.model_executor.models as me_models
59
+ from vllm.config.load import LoadConfig
60
+ from vllm.config.parallel import ParallelConfig
61
+ from vllm.model_executor.layers.quantization import QuantizationMethods
62
+ from vllm.v1.sample.logits_processor import LogitsProcessor
63
+ else:
64
+ PretrainedConfig = Any
65
+
66
+ me_quant = LazyLoader(
67
+ "model_executor", globals(), "vllm.model_executor.layers.quantization"
68
+ )
69
+ me_models = LazyLoader("model_executor", globals(), "vllm.model_executor.models")
70
+ LoadConfig = Any
71
+ ParallelConfig = Any
72
+ QuantizationMethods = Any
73
+ LogitsProcessor = Any
74
+
75
+ logger = init_logger(__name__)
76
+
77
+ RunnerOption = Literal["auto", RunnerType]
78
+ ConvertType = Literal["none", "embed", "classify", "reward", "mm_encoder_only"]
79
+ ConvertOption = Literal["auto", ConvertType]
80
+ TokenizerMode = Literal["auto", "hf", "slow", "mistral", "deepseek_v32"]
81
+ ModelDType = Literal["auto", "half", "float16", "bfloat16", "float", "float32"]
82
+ LogprobsMode = Literal[
83
+ "raw_logits", "raw_logprobs", "processed_logits", "processed_logprobs"
84
+ ]
85
+ HfOverrides = dict[str, Any] | Callable[[PretrainedConfig], PretrainedConfig]
86
+ ModelImpl = Literal["auto", "vllm", "transformers", "terratorch"]
87
+ LayerBlockType = Literal["attention", "linear_attention", "mamba"]
88
+
89
+ _RUNNER_CONVERTS: dict[RunnerType, list[ConvertType]] = {
90
+ "generate": [],
91
+ "pooling": ["embed", "classify", "reward"],
92
+ "draft": [],
93
+ }
94
+
95
+ AttnTypeStr = Literal[
96
+ "decoder", "encoder", "encoder_only", "encoder_decoder", "attention_free", "hybrid"
97
+ ]
98
+
99
+
100
+ @config
101
+ @dataclass(config=ConfigDict(arbitrary_types_allowed=True))
102
+ class ModelConfig:
103
+ """Configuration for the model."""
104
+
105
+ model: str = "Qwen/Qwen3-0.6B"
106
+ """Name or path of the Hugging Face model to use. It is also used as the
107
+ content for `model_name` tag in metrics output when `served_model_name` is
108
+ not specified."""
109
+ model_weights: str = ""
110
+ """Original model weights path. Used when the model is pulled from object
111
+ storage (e.g., RunAI) to preserve the original URI while `model` points to
112
+ the local directory."""
113
+ runner: RunnerOption = "auto"
114
+ """The type of model runner to use. Each vLLM instance only supports one
115
+ model runner, even if the same model can be used for multiple types."""
116
+ convert: ConvertOption = "auto"
117
+ """Convert the model using adapters defined in
118
+ [vllm.model_executor.models.adapters][]. The most common use case is to
119
+ adapt a text generation model to be used for pooling tasks."""
120
+ tokenizer: str = Field(default=None)
121
+ """Name or path of the Hugging Face tokenizer to use. If unspecified, model
122
+ name or path will be used."""
123
+ tokenizer_mode: TokenizerMode | str = "auto"
124
+ """Tokenizer mode:\n
125
+ - "auto" will use the tokenizer from `mistral_common` for Mistral models
126
+ if available, otherwise it will use the "hf" tokenizer.\n
127
+ - "hf" will use the fast tokenizer if available.\n
128
+ - "slow" will always use the slow tokenizer.\n
129
+ - "mistral" will always use the tokenizer from `mistral_common`.\n
130
+ - "deepseek_v32" will always use the tokenizer from `deepseek_v32`.\n
131
+ - Other custom values can be supported via plugins."""
132
+ trust_remote_code: bool = False
133
+ """Trust remote code (e.g., from HuggingFace) when downloading the model
134
+ and tokenizer."""
135
+ dtype: ModelDType | torch.dtype = "auto"
136
+ """Data type for model weights and activations:\n
137
+ - "auto" will use FP16 precision for FP32 and FP16 models, and BF16
138
+ precision for BF16 models.\n
139
+ - "half" for FP16. Recommended for AWQ quantization.\n
140
+ - "float16" is the same as "half".\n
141
+ - "bfloat16" for a balance between precision and range.\n
142
+ - "float" is shorthand for FP32 precision.\n
143
+ - "float32" for FP32 precision."""
144
+ seed: int = 0
145
+ """Random seed for reproducibility.
146
+
147
+ We must set the global seed because otherwise,
148
+ different tensor parallel workers would sample different tokens,
149
+ leading to inconsistent results."""
150
+ hf_config: PretrainedConfig = field(init=False)
151
+ """The Hugging Face config of the model."""
152
+ hf_text_config: PretrainedConfig = field(init=False)
153
+ """The Hugging Face config of the text model (same as hf_config for text models)."""
154
+ hf_config_path: str | None = None
155
+ """Name or path of the Hugging Face config to use. If unspecified, model
156
+ name or path will be used."""
157
+ allowed_local_media_path: str = ""
158
+ """Allowing API requests to read local images or videos from directories
159
+ specified by the server file system. This is a security risk. Should only
160
+ be enabled in trusted environments."""
161
+ allowed_media_domains: list[str] | None = None
162
+ """If set, only media URLs that belong to this domain can be used for
163
+ multi-modal inputs. """
164
+ revision: str | None = None
165
+ """The specific model version to use. It can be a branch name, a tag name,
166
+ or a commit id. If unspecified, will use the default version."""
167
+ code_revision: str | None = None
168
+ """The specific revision to use for the model code on the Hugging Face Hub.
169
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
170
+ use the default version."""
171
+ tokenizer_revision: str | None = None
172
+ """The specific revision to use for the tokenizer on the Hugging Face Hub.
173
+ It can be a branch name, a tag name, or a commit id. If unspecified, will
174
+ use the default version."""
175
+ max_model_len: int = Field(default=None, ge=-1)
176
+ """Model context length (prompt and output). If unspecified, will be
177
+ automatically derived from the model config.
178
+
179
+ When passing via `--max-model-len`, supports k/m/g/K/M/G in human-readable
180
+ format. Examples:\n
181
+ - 1k -> 1000\n
182
+ - 1K -> 1024\n
183
+ - 25.6k -> 25,600\n
184
+ - -1 or 'auto' -> Automatically choose the maximum model length that fits in
185
+ GPU memory. This will use the model's maximum context length if it fits,
186
+ otherwise it will find the largest length that can be accommodated."""
187
+ spec_target_max_model_len: int | None = None
188
+ """Specify the maximum length for spec decoding draft models."""
189
+ quantization: QuantizationMethods | str | None = None
190
+ """Method used to quantize the weights. If `None`, we first check the
191
+ `quantization_config` attribute in the model config file. If that is
192
+ `None`, we assume the model weights are not quantized and use `dtype` to
193
+ determine the data type of the weights."""
194
+ allow_deprecated_quantization: bool = False
195
+ """Whether to allow deprecated quantization methods."""
196
+ enforce_eager: bool = False
197
+ """Whether to always use eager-mode PyTorch. If True, we will disable CUDA
198
+ graph and always execute the model in eager mode. If False, we will use
199
+ CUDA graph and eager execution in hybrid for maximal performance and
200
+ flexibility."""
201
+ enable_return_routed_experts: bool = False
202
+ """Whether to return routed experts."""
203
+ max_logprobs: int = 20
204
+ """Maximum number of log probabilities to return when `logprobs` is
205
+ specified in `SamplingParams`. The default value comes the default for the
206
+ OpenAI Chat Completions API. -1 means no cap, i.e. all (output_length *
207
+ vocab_size) logprobs are allowed to be returned and it may cause OOM."""
208
+ logprobs_mode: LogprobsMode = "raw_logprobs"
209
+ """Indicates the content returned in the logprobs and prompt_logprobs.
210
+ Supported mode:
211
+ 1) raw_logprobs, 2) processed_logprobs, 3) raw_logits, 4) processed_logits.
212
+ Raw means the values before applying any logit processors, like bad words.
213
+ Processed means the values after applying all processors, including
214
+ temperature and top_k/top_p.
215
+ """
216
+ disable_sliding_window: bool = False
217
+ """Whether to disable sliding window. If True, we will disable the sliding
218
+ window functionality of the model, capping to sliding window size. If the
219
+ model does not support sliding window, this argument is ignored."""
220
+ disable_cascade_attn: bool = False
221
+ """Disable cascade attention for V1. While cascade attention does not
222
+ change the mathematical correctness, disabling it could be useful for
223
+ preventing potential numerical issues. Note that even if this is set to
224
+ False, cascade attention will be only used when the heuristic tells that
225
+ it's beneficial."""
226
+ skip_tokenizer_init: bool = False
227
+ """Skip initialization of tokenizer and detokenizer. Expects valid
228
+ `prompt_token_ids` and `None` for prompt from the input. The generated
229
+ output will contain token ids."""
230
+ enable_prompt_embeds: bool = False
231
+ """If `True`, enables passing text embeddings as inputs via the
232
+ `prompt_embeds` key.
233
+
234
+ WARNING: The vLLM engine may crash if incorrect shape of embeddings is passed.
235
+ Only enable this flag for trusted users!"""
236
+ served_model_name: str | list[str] | None = None
237
+ """The model name(s) used in the API. If multiple names are provided, the
238
+ server will respond to any of the provided names. The model name in the
239
+ model field of a response will be the first name in this list. If not
240
+ specified, the model name will be the same as the `--model` argument. Noted
241
+ that this name(s) will also be used in `model_name` tag content of
242
+ prometheus metrics, if multiple names provided, metrics tag will take the
243
+ first one."""
244
+ config_format: str | ConfigFormat = "auto"
245
+ """The format of the model config to load:\n
246
+ - "auto" will try to load the config in hf format if available after trying
247
+ to load in mistral format.\n
248
+ - "hf" will load the config in hf format.\n
249
+ - "mistral" will load the config in mistral format."""
250
+ hf_token: bool | str | None = None
251
+ """The token to use as HTTP bearer authorization for remote files . If
252
+ `True`, will use the token generated when running `huggingface-cli login`
253
+ (stored in `~/.huggingface`)."""
254
+ hf_overrides: HfOverrides = field(default_factory=dict)
255
+ """If a dictionary, contains arguments to be forwarded to the Hugging Face
256
+ config. If a callable, it is called to update the HuggingFace config."""
257
+ logits_processor_pattern: str | None = None
258
+ """Optional regex pattern specifying valid logits processor qualified names
259
+ that can be passed with the `logits_processors` extra completion argument.
260
+ Defaults to `None`, which allows no processors."""
261
+ generation_config: str = "auto"
262
+ """The folder path to the generation config. Defaults to `"auto"`, the
263
+ generation config will be loaded from model path. If set to `"vllm"`, no
264
+ generation config is loaded, vLLM defaults will be used. If set to a folder
265
+ path, the generation config will be loaded from the specified folder path.
266
+ If `max_new_tokens` is specified in generation config, then it sets a
267
+ server-wide limit on the number of output tokens for all requests."""
268
+ override_generation_config: dict[str, Any] = field(default_factory=dict)
269
+ """Overrides or sets generation config. e.g. `{"temperature": 0.5}`. If
270
+ used with `--generation-config auto`, the override parameters will be
271
+ merged with the default config from the model. If used with
272
+ `--generation-config vllm`, only the override parameters are used."""
273
+ enable_sleep_mode: bool = False
274
+ """Enable sleep mode for the engine (only cuda and
275
+ hip platforms are supported)."""
276
+ model_impl: str | ModelImpl = "auto"
277
+ """Which implementation of the model to use:\n
278
+ - "auto" will try to use the vLLM implementation, if it exists, and fall
279
+ back to the Transformers implementation if no vLLM implementation is
280
+ available.\n
281
+ - "vllm" will use the vLLM model implementation.\n
282
+ - "transformers" will use the Transformers model implementation.\n
283
+ - "terratorch" will use the TerraTorch model implementation.
284
+ """
285
+ override_attention_dtype: str | None = None
286
+ """Override dtype for attention"""
287
+ logits_processors: list[str | type[LogitsProcessor]] | None = None
288
+ """One or more logits processors' fully-qualified class names or class
289
+ definitions"""
290
+ io_processor_plugin: str | None = None
291
+ """IOProcessor plugin name to load at model startup"""
292
+
293
+ # Pooler config
294
+ pooler_config: PoolerConfig | None = None
295
+ """Pooler config which controls the behaviour of output pooling in pooling
296
+ models."""
297
+
298
+ # Multimodal config and init vars
299
+ multimodal_config: MultiModalConfig | None = None
300
+ """Configuration for multimodal model. If `None`, this will be inferred
301
+ from the architecture of `self.model`."""
302
+ limit_mm_per_prompt: InitVar[dict[str, int | dict[str, int]] | None] = None
303
+ enable_mm_embeds: InitVar[bool | None] = None
304
+ media_io_kwargs: InitVar[dict[str, dict[str, Any]] | None] = None
305
+ mm_processor_kwargs: InitVar[dict[str, Any] | None] = None
306
+ mm_processor_cache_gb: InitVar[float | None] = None
307
+ mm_processor_cache_type: InitVar[MMCacheType | None] = None
308
+ mm_shm_cache_max_object_size_mb: InitVar[int | None] = None
309
+ mm_encoder_tp_mode: InitVar[MMEncoderTPMode | None] = None
310
+ mm_encoder_attn_backend: InitVar[AttentionBackendEnum | str | None] = None
311
+ interleave_mm_strings: InitVar[bool | None] = None
312
+ skip_mm_profiling: InitVar[bool | None] = None
313
+ video_pruning_rate: InitVar[float | None] = None
314
+
315
+ def compute_hash(self) -> str:
316
+ """
317
+ WARNING: Whenever a new field is added to this config,
318
+ ensure that it is included in the factors list if
319
+ it affects the computation graph.
320
+
321
+ Provide a hash that uniquely identifies all the configs
322
+ that affect the structure of the computation
323
+ graph from input ids/embeddings to the final hidden states,
324
+ excluding anything before input ids/embeddings and after
325
+ the final hidden states.
326
+ """
327
+ ignored_factors = {
328
+ "runner",
329
+ "convert",
330
+ "tokenizer",
331
+ "tokenizer_mode",
332
+ "seed",
333
+ "hf_config_path",
334
+ "allowed_local_media_path",
335
+ "allowed_media_domains",
336
+ "tokenizer_revision",
337
+ "spec_target_max_model_len",
338
+ "enforce_eager",
339
+ "logprobs_mode",
340
+ "disable_cascade_attn",
341
+ "skip_tokenizer_init",
342
+ "served_model_name",
343
+ "config_format",
344
+ "hf_token",
345
+ "hf_overrides",
346
+ "logits_processor_pattern",
347
+ "override_attention_dtype",
348
+ "logits_processors",
349
+ "io_processor_plugin",
350
+ "pooler_config",
351
+ "multimodal_config",
352
+ "limit_mm_per_prompt",
353
+ "media_io_kwargs",
354
+ "mm_processor_kwargs",
355
+ "mm_processor_cache_gb",
356
+ "mm_processor_cache_type",
357
+ "mm_shm_cache_max_object_size_mb",
358
+ "mm_encoder_tp_mode",
359
+ "interleave_mm_strings",
360
+ "skip_mm_profiling",
361
+ }
362
+
363
+ from vllm.config.utils import get_hash_factors, hash_factors
364
+
365
+ factors = get_hash_factors(self, ignored_factors)
366
+ return hash_factors(factors)
367
+
368
+ def _update_nested(
369
+ self,
370
+ target: PretrainedConfig | dict[str, Any],
371
+ updates: dict[str, Any],
372
+ ) -> None:
373
+ """Recursively updates a config or dict with nested updates."""
374
+ for key, value in updates.items():
375
+ if isinstance(value, dict):
376
+ # Get the nested target
377
+ if isinstance(target, dict):
378
+ nested_target = target.get(key)
379
+ else:
380
+ nested_target = getattr(target, key, None)
381
+
382
+ # If nested target exists and can be updated recursively
383
+ if nested_target is not None and (
384
+ isinstance(nested_target, dict)
385
+ or hasattr(nested_target, "__dict__")
386
+ ):
387
+ self._update_nested(nested_target, value)
388
+ continue
389
+
390
+ # Set the value (base case)
391
+ if isinstance(target, dict):
392
+ target[key] = value
393
+ else:
394
+ setattr(target, key, value)
395
+
396
+ def _apply_dict_overrides(
397
+ self,
398
+ config: PretrainedConfig,
399
+ overrides: dict[str, Any],
400
+ ) -> None:
401
+ """Apply dict overrides, handling both nested configs and dict values."""
402
+ from transformers import PretrainedConfig
403
+
404
+ for key, value in overrides.items():
405
+ attr = getattr(config, key, None)
406
+ if attr is not None and isinstance(attr, PretrainedConfig):
407
+ # It's a nested config - recursively update it
408
+ self._update_nested(attr, value)
409
+ else:
410
+ # It's a dict-valued parameter - set it directly
411
+ setattr(config, key, value)
412
+
413
+ def __post_init__(
414
+ self,
415
+ # Multimodal config init vars
416
+ limit_mm_per_prompt: dict[str, int | dict[str, int]] | None,
417
+ enable_mm_embeds: bool | None,
418
+ media_io_kwargs: dict[str, dict[str, Any]] | None,
419
+ mm_processor_kwargs: dict[str, Any] | None,
420
+ mm_processor_cache_gb: float | None,
421
+ mm_processor_cache_type: MMCacheType | None,
422
+ mm_shm_cache_max_object_size_mb: int | None,
423
+ mm_encoder_tp_mode: MMEncoderTPMode | None,
424
+ mm_encoder_attn_backend: AttentionBackendEnum | str | None,
425
+ interleave_mm_strings: bool | None,
426
+ skip_mm_profiling: bool | None,
427
+ video_pruning_rate: float | None,
428
+ ) -> None:
429
+ # Keep set served_model_name before maybe_model_redirect(self.model)
430
+ self.served_model_name = get_served_model_name(
431
+ self.model, self.served_model_name
432
+ )
433
+ self.model = maybe_model_redirect(self.model)
434
+ # The tokenizer is consistent with the model by default.
435
+ if self.tokenizer is None:
436
+ self.tokenizer = self.model
437
+ if self.tokenizer_revision is None:
438
+ self.tokenizer_revision = self.revision
439
+ self.tokenizer = maybe_model_redirect(self.tokenizer)
440
+
441
+ if isinstance(self.hf_config_path, str):
442
+ self.hf_config_path = maybe_model_redirect(self.hf_config_path)
443
+
444
+ if callable(self.hf_overrides):
445
+ hf_overrides_kw = {}
446
+ hf_overrides_fn = self.hf_overrides
447
+ dict_overrides: dict[str, Any] = {}
448
+ else:
449
+ # Separate dict overrides from flat ones
450
+ # We'll determine how to apply dict overrides after loading the config
451
+ hf_overrides_kw = {}
452
+ dict_overrides = {}
453
+ for key, value in self.hf_overrides.items():
454
+ if isinstance(value, dict):
455
+ dict_overrides[key] = value
456
+ else:
457
+ hf_overrides_kw[key] = value
458
+ hf_overrides_fn = None
459
+
460
+ self.maybe_pull_model_tokenizer_for_runai(self.model, self.tokenizer)
461
+
462
+ from vllm.platforms import current_platform
463
+
464
+ if self.override_attention_dtype is not None and not current_platform.is_rocm():
465
+ warnings.warn(
466
+ "override-attention-dtype is set but not using ROCm platform",
467
+ stacklevel=2,
468
+ )
469
+
470
+ if self.enable_sleep_mode and not current_platform.is_sleep_mode_available():
471
+ raise ValueError("Sleep mode is not supported on current platform.")
472
+
473
+ hf_config = get_config(
474
+ self.hf_config_path or self.model,
475
+ self.trust_remote_code,
476
+ self.revision,
477
+ self.code_revision,
478
+ self.config_format,
479
+ hf_overrides_kw=hf_overrides_kw,
480
+ hf_overrides_fn=hf_overrides_fn,
481
+ )
482
+ hf_config = maybe_patch_hf_config_from_gguf(
483
+ self.model,
484
+ hf_config,
485
+ )
486
+
487
+ self.hf_config = hf_config
488
+ if dict_overrides:
489
+ self._apply_dict_overrides(hf_config, dict_overrides)
490
+ self.hf_text_config = get_hf_text_config(self.hf_config)
491
+ self.attention_chunk_size = getattr(
492
+ self.hf_text_config, "attention_chunk_size", None
493
+ )
494
+ self.encoder_config = self._get_encoder_config()
495
+ self.hf_image_processor_config = get_hf_image_processor_config(
496
+ self.model, hf_token=self.hf_token, revision=self.revision
497
+ )
498
+ self.model_arch_config = self.get_model_arch_config()
499
+
500
+ architectures = self.architectures
501
+ registry = self.registry
502
+ is_generative_model = registry.is_text_generation_model(architectures, self)
503
+ is_pooling_model = registry.is_pooling_model(architectures, self)
504
+
505
+ self.runner_type = self._get_runner_type(architectures, self.runner)
506
+ self.convert_type = self._get_convert_type(
507
+ architectures, self.runner_type, self.convert
508
+ )
509
+
510
+ if self.runner_type == "generate" and not is_generative_model:
511
+ generate_converts = _RUNNER_CONVERTS["generate"]
512
+ if self.convert_type not in generate_converts:
513
+ # Currently we don't have any converters for generative models
514
+ raise ValueError("This model does not support `--runner generate`.")
515
+ if self.runner_type == "pooling" and not is_pooling_model:
516
+ pooling_converts = _RUNNER_CONVERTS["pooling"]
517
+ if self.convert_type not in pooling_converts:
518
+ convert_option = "<" + "|".join(pooling_converts) + ">"
519
+ raise ValueError(
520
+ "This model does not support `--runner pooling`. "
521
+ f"You can pass `--convert {convert_option} to adapt "
522
+ "it into a pooling model."
523
+ )
524
+
525
+ # Note: Initialize these attributes early because transformers fallback
526
+ # may fail to load dynamic modules in child processes
527
+ model_info, arch = registry.inspect_model_cls(architectures, self)
528
+ self._model_info = model_info
529
+ self._architecture = arch
530
+ logger.info("Resolved architecture: %s", arch)
531
+
532
+ # Init pooler config if needed
533
+ if self.runner_type == "pooling":
534
+ if self.pooler_config is None:
535
+ self.pooler_config = PoolerConfig()
536
+
537
+ base_config = get_pooling_config(self.model, self.revision)
538
+ if base_config is not None:
539
+ # Only set values that are not overridden by the user
540
+ for k, v in base_config.items():
541
+ if getattr(self.pooler_config, k) is None:
542
+ setattr(self.pooler_config, k, v)
543
+
544
+ default_seq_pooling_type = self._model_info.default_seq_pooling_type
545
+ if self.pooler_config.seq_pooling_type is None:
546
+ self.pooler_config.seq_pooling_type = default_seq_pooling_type
547
+ default_tok_pooling_type = self._model_info.default_tok_pooling_type
548
+ if self.pooler_config.tok_pooling_type is None:
549
+ self.pooler_config.tok_pooling_type = default_tok_pooling_type
550
+
551
+ self.dtype: torch.dtype = _get_and_verify_dtype(
552
+ self.model,
553
+ self.hf_config,
554
+ self.dtype,
555
+ is_pooling_model=self.runner_type == "pooling",
556
+ revision=self.revision,
557
+ )
558
+
559
+ self.original_max_model_len = self.max_model_len
560
+ self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
561
+
562
+ if self.is_encoder_decoder:
563
+ self.mm_processor_cache_gb = 0
564
+ logger.info("Encoder-decoder model detected, disabling mm processor cache.")
565
+
566
+ # Init multimodal config if needed
567
+ if self._model_info.supports_multimodal:
568
+ if (
569
+ mm_encoder_tp_mode == "data"
570
+ and not self._model_info.supports_multimodal_encoder_tp_data
571
+ ):
572
+ logger.warning_once(
573
+ "This model does not support `--mm-encoder-tp-mode data`. "
574
+ "Falling back to `--mm-encoder-tp-mode weights`."
575
+ )
576
+ mm_encoder_tp_mode = "weights"
577
+
578
+ mm_config_kwargs = dict(
579
+ limit_per_prompt=limit_mm_per_prompt,
580
+ enable_mm_embeds=enable_mm_embeds,
581
+ media_io_kwargs=media_io_kwargs,
582
+ mm_processor_kwargs=mm_processor_kwargs,
583
+ mm_processor_cache_gb=mm_processor_cache_gb,
584
+ mm_processor_cache_type=mm_processor_cache_type,
585
+ mm_shm_cache_max_object_size_mb=mm_shm_cache_max_object_size_mb,
586
+ mm_encoder_tp_mode=mm_encoder_tp_mode,
587
+ mm_encoder_attn_backend=mm_encoder_attn_backend,
588
+ interleave_mm_strings=interleave_mm_strings,
589
+ skip_mm_profiling=skip_mm_profiling,
590
+ video_pruning_rate=video_pruning_rate,
591
+ )
592
+
593
+ mm_config_kwargs = {
594
+ k: v for k, v in mm_config_kwargs.items() if v is not None
595
+ }
596
+
597
+ self.multimodal_config = MultiModalConfig(**mm_config_kwargs)
598
+
599
+ # Multimodal GGUF models must use original repo for mm processing
600
+ if is_gguf(self.tokenizer) and self.is_multimodal_model:
601
+ raise ValueError(
602
+ "Loading a multimodal GGUF model needs to use original "
603
+ "tokenizer. Please specify the unquantized hf model's "
604
+ "repo name or path using the --tokenizer argument."
605
+ )
606
+
607
+ if self.disable_sliding_window:
608
+ # Set after get_and_verify_max_len to ensure that max_model_len
609
+ # can be correctly capped to sliding window size
610
+ self.hf_text_config.sliding_window = None
611
+
612
+ # Avoid running try_verify_and_update_config multiple times
613
+ self.config_updated = False
614
+ self._try_verify_and_update_model_config()
615
+ self._verify_quantization()
616
+ self._verify_cuda_graph()
617
+ self._verify_bnb_config()
618
+
619
+ def get_model_arch_config(
620
+ self,
621
+ ) -> ModelArchitectureConfig:
622
+ convertor_cls = MODEL_ARCH_CONFIG_CONVERTORS.get(
623
+ self.hf_config.model_type, ModelArchConfigConvertorBase
624
+ )
625
+ convertor = convertor_cls(self.hf_config, self.hf_text_config)
626
+ return convertor.convert()
627
+
628
+ @field_validator("tokenizer", "max_model_len", mode="wrap")
629
+ @classmethod
630
+ def _skip_none_validation(cls, value: Any, handler: Callable) -> Any:
631
+ """Skip validation if the value is `None` when initialisation is delayed."""
632
+ if value is None:
633
+ return value
634
+ return handler(value)
635
+
636
+ @field_validator("tokenizer_mode", mode="after")
637
+ def _lowercase_tokenizer_mode(cls, tokenizer_mode: str) -> str:
638
+ return tokenizer_mode.lower()
639
+
640
+ @field_validator("quantization", mode="before")
641
+ @classmethod
642
+ def validate_quantization_before(cls, value: Any) -> Any:
643
+ if isinstance(value, str):
644
+ return value.lower()
645
+ return value
646
+
647
+ @model_validator(mode="after")
648
+ def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
649
+ """Called after __post_init__"""
650
+ if not isinstance(self.tokenizer, str):
651
+ raise ValueError(
652
+ f"tokenizer must be a string, got "
653
+ f"{type(self.tokenizer).__name__}: {self.tokenizer!r}. "
654
+ "Please provide a valid tokenizer path or HuggingFace model ID."
655
+ )
656
+ if not isinstance(self.max_model_len, int):
657
+ raise ValueError(
658
+ f"max_model_len must be a positive integer, "
659
+ f"got {type(self.max_model_len).__name__}: {self.max_model_len!r}. "
660
+ "Example: max_model_len=2048"
661
+ )
662
+ return self
663
+
664
+ def _get_transformers_backend_cls(self) -> str:
665
+ """Determine which Transformers modeling backend class will be used if
666
+ `model_impl` is set to `transformers` or `auto`."""
667
+ cls = "Transformers"
668
+ # If 'hf_config != hf_text_config' it's a nested config, i.e. multimodal
669
+ cls += "MultiModal" if self.hf_config != self.hf_text_config else ""
670
+ cls += "MoE" if self.is_moe else ""
671
+ # Check if the architecture we're wrapping has defaults
672
+ runner = None
673
+ task = None
674
+ if defaults := try_match_architecture_defaults(self.architectures[0]):
675
+ _, (runner, task) = defaults
676
+ # User specified value take precedence
677
+ if self.runner != "auto":
678
+ runner = self.runner
679
+ # Only consider Transformers modeling backend pooling classes if we're wrapping
680
+ # an architecture that defaults to pooling. Otherwise, we return the LM class
681
+ # and use adapters.
682
+ if runner == "pooling" and task in {"embed", "classify"}:
683
+ if task == "embed":
684
+ cls += "EmbeddingModel"
685
+ elif task == "classify":
686
+ cls += "ForSequenceClassification"
687
+ else:
688
+ cls += "ForCausalLM"
689
+ return cls
690
+
691
+ def using_transformers_backend(self) -> bool:
692
+ """Check if the model is using the Transformers modeling backend class."""
693
+ used_cls = self._model_info.architecture
694
+ transformers_backend_cls = self._get_transformers_backend_cls()
695
+ return used_cls == transformers_backend_cls
696
+
697
+ @property
698
+ def registry(self):
699
+ return me_models.ModelRegistry
700
+
701
+ @property
702
+ def architectures(self) -> list[str]:
703
+ return self.model_arch_config.architectures
704
+
705
+ @property
706
+ def architecture(self) -> str:
707
+ """The architecture vllm actually used."""
708
+ return self._architecture
709
+
710
+ def maybe_pull_model_tokenizer_for_runai(self, model: str, tokenizer: str) -> None:
711
+ """Pull model/tokenizer from Object Storage to temporary
712
+ directory when needed.
713
+
714
+ Args:
715
+ model: Model name or path
716
+ tokenizer: Tokenizer name or path
717
+ """
718
+
719
+ # Skip if model_weights is already set (model already pulled)
720
+ if self.model_weights:
721
+ return
722
+
723
+ if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
724
+ return
725
+
726
+ if is_runai_obj_uri(model):
727
+ object_storage_model = ObjectStorageModel(url=model)
728
+ object_storage_model.pull_files(
729
+ model, allow_pattern=["*.model", "*.py", "*.json"]
730
+ )
731
+ self.model_weights = model
732
+ self.model = object_storage_model.dir
733
+
734
+ # If tokenizer is same as model, download to same directory
735
+ if model == tokenizer:
736
+ object_storage_model.pull_files(
737
+ model,
738
+ ignore_pattern=[
739
+ "*.pt",
740
+ "*.safetensors",
741
+ "*.bin",
742
+ "*.tensors",
743
+ "*.pth",
744
+ ],
745
+ )
746
+ self.tokenizer = object_storage_model.dir
747
+ return
748
+
749
+ # Only download tokenizer if needed and not already handled
750
+ if is_runai_obj_uri(tokenizer):
751
+ object_storage_tokenizer = ObjectStorageModel(url=tokenizer)
752
+ object_storage_tokenizer.pull_files(
753
+ model,
754
+ ignore_pattern=["*.pt", "*.safetensors", "*.bin", "*.tensors", "*.pth"],
755
+ )
756
+ self.tokenizer = object_storage_tokenizer.dir
757
+
758
+ def _get_encoder_config(self):
759
+ model = self.model
760
+ if is_remote_gguf(model):
761
+ model, _ = split_remote_gguf(model)
762
+ return get_sentence_transformer_tokenizer_config(model, self.revision)
763
+
764
+ def _get_default_runner_type(
765
+ self,
766
+ architectures: list[str],
767
+ ) -> RunnerType:
768
+ registry = self.registry
769
+
770
+ # Some Sentence Transformers models use *ForCausalLM archs
771
+ if get_pooling_config(self.model, self.revision):
772
+ return "pooling"
773
+
774
+ for arch in architectures:
775
+ if arch in registry.get_supported_archs():
776
+ if registry.is_pooling_model(architectures, self):
777
+ return "pooling"
778
+ if registry.is_text_generation_model(architectures, self):
779
+ return "generate"
780
+
781
+ match = try_match_architecture_defaults(arch)
782
+ if match:
783
+ _, (runner_type, _) = match
784
+ return runner_type
785
+
786
+ return "generate"
787
+
788
+ def _get_runner_type(
789
+ self,
790
+ architectures: list[str],
791
+ runner: RunnerOption,
792
+ ) -> RunnerType:
793
+ if runner != "auto":
794
+ return runner
795
+
796
+ runner_type = self._get_default_runner_type(architectures)
797
+
798
+ # Don't log the most common case
799
+ if runner_type != "generate":
800
+ logger.info(
801
+ "Resolved `--runner auto` to `--runner %s`. "
802
+ "Pass the value explicitly to silence this message.",
803
+ runner_type,
804
+ )
805
+
806
+ return runner_type
807
+
808
+ def _get_default_convert_type(
809
+ self,
810
+ architectures: list[str],
811
+ runner_type: RunnerType,
812
+ ) -> ConvertType:
813
+ registry = self.registry
814
+
815
+ for arch in architectures:
816
+ if arch in registry.get_supported_archs():
817
+ if runner_type == "generate" and registry.is_text_generation_model(
818
+ architectures, self
819
+ ):
820
+ return "none"
821
+ if runner_type == "pooling" and registry.is_pooling_model(
822
+ architectures, self
823
+ ):
824
+ return "none"
825
+
826
+ match = try_match_architecture_defaults(arch, runner_type=runner_type)
827
+ if match:
828
+ _, (_, convert_type) = match
829
+ return convert_type
830
+
831
+ # This is to handle Sentence Transformers models that use *ForCausalLM
832
+ # and also multi-modal pooling models which are not defined as
833
+ # Sentence Transformers models
834
+ if runner_type == "pooling":
835
+ return "embed"
836
+
837
+ return "none"
838
+
839
+ def _get_convert_type(
840
+ self,
841
+ architectures: list[str],
842
+ runner_type: RunnerType,
843
+ convert: ConvertOption,
844
+ ) -> ConvertType:
845
+ if convert == "reward":
846
+ logger.warning(
847
+ "`--convert reward` is deprecated and will be removed in v0.15. "
848
+ "Please use `--convert embed` instead."
849
+ )
850
+ return "embed"
851
+
852
+ if convert != "auto":
853
+ return convert
854
+
855
+ convert_type = self._get_default_convert_type(architectures, runner_type)
856
+
857
+ # Don't log the most common case
858
+ if convert_type != "none":
859
+ logger.info(
860
+ "Resolved `--convert auto` to `--convert %s`. "
861
+ "Pass the value explicitly to silence this message.",
862
+ convert_type,
863
+ )
864
+
865
+ return convert_type
866
+
867
+ def _verify_quantization(self) -> None:
868
+ supported_quantization = me_quant.QUANTIZATION_METHODS
869
+ if self.quantization is not None:
870
+ self.quantization = cast(me_quant.QuantizationMethods, self.quantization)
871
+
872
+ # Parse quantization method from the HF model config, if available.
873
+ quant_cfg = self.model_arch_config.quantization_config
874
+
875
+ if quant_cfg is not None:
876
+ quant_method = quant_cfg["quant_method"]
877
+ # Quantization methods which are overrides (i.e. they have a
878
+ # `override_quantization_method` method) must be checked in order
879
+ # of preference (this is particularly important for GPTQ).
880
+ overrides = [
881
+ "bitblas",
882
+ "gptq_marlin_24",
883
+ "gptq_marlin",
884
+ "gptq_bitblas",
885
+ "awq_marlin",
886
+ "ipex",
887
+ "moe_wna16",
888
+ "modelopt",
889
+ "modelopt_fp4",
890
+ "petit_nvfp4",
891
+ # Ensure heavy backends are probed last to avoid unnecessary
892
+ # imports during override detection (e.g., MXFP4 imports Triton)
893
+ "mxfp4",
894
+ "cpu_awq",
895
+ ]
896
+ quantization_methods = [
897
+ q for q in supported_quantization if q not in overrides
898
+ ]
899
+ # Any custom overrides will be in quantization_methods so we place
900
+ # them at the start of the list so custom overrides have preference
901
+ # over the built-in ones.
902
+ quantization_methods = quantization_methods + overrides
903
+
904
+ # Detect which checkpoint is it
905
+ for name in quantization_methods:
906
+ method = me_quant.get_quantization_config(name)
907
+ quantization_override = method.override_quantization_method(
908
+ quant_cfg, self.quantization
909
+ )
910
+ if quantization_override is not None:
911
+ # Raise error if the override is not custom (custom would
912
+ # be in QUANTIZATION_METHODS but not QuantizationMethods)
913
+ # and hasn't been added to the overrides list.
914
+ if (
915
+ name in get_args(me_quant.QuantizationMethods)
916
+ and name not in overrides
917
+ ):
918
+ raise ValueError(
919
+ f"Quantization method {name} is an override but "
920
+ "is has not been added to the `overrides` list "
921
+ "above. This is necessary to ensure that the "
922
+ "overrides are checked in order of preference."
923
+ )
924
+ quant_method = quantization_override
925
+ self.quantization = quantization_override
926
+ break
927
+
928
+ quant_method = quant_method if quant_method != "" else None
929
+ # Verify quantization configurations.
930
+ if self.quantization is None:
931
+ self.quantization = quant_method
932
+ elif self.quantization != quant_method:
933
+ raise ValueError(
934
+ "Quantization method specified in the model config "
935
+ f"({quant_method}) does not match the quantization "
936
+ f"method specified in the `quantization` argument "
937
+ f"({self.quantization})."
938
+ )
939
+
940
+ if self.quantization is not None:
941
+ if self.quantization not in supported_quantization:
942
+ raise ValueError(
943
+ f"Unknown quantization method: {self.quantization}. Must "
944
+ f"be one of {supported_quantization}."
945
+ )
946
+ from vllm.platforms import current_platform
947
+
948
+ current_platform.verify_quantization(self.quantization)
949
+
950
+ if self.quantization in me_quant.DEPRECATED_QUANTIZATION_METHODS:
951
+ if self.allow_deprecated_quantization:
952
+ logger.warning(
953
+ "The quantization method %s is deprecated "
954
+ "and will be removed in future versions of vLLM.",
955
+ self.quantization,
956
+ )
957
+ else:
958
+ raise ValueError(
959
+ "The quantization method %s is deprecated "
960
+ "and will be removed in future versions of vLLM. To bypass, "
961
+ "set `--allow-deprecated-quantization`.",
962
+ self.quantization,
963
+ )
964
+
965
+ def _verify_cuda_graph(self) -> None:
966
+ # CUDAGraph capture not supported for encoder-decoder models on ROCm
967
+ unsupported_rocm = self.is_encoder_decoder
968
+ if unsupported_rocm and not self.enforce_eager and current_platform.is_rocm():
969
+ logger.warning(
970
+ "CUDA graph is not supported for %s on ROCm yet, fallback "
971
+ "to eager mode.",
972
+ self.model_arch_config.model_type,
973
+ )
974
+ self.enforce_eager = True
975
+
976
+ def _verify_bnb_config(self) -> None:
977
+ """
978
+ The current version of bitsandbytes (0.46.1) with 8-bit models does not
979
+ yet support CUDA graph.
980
+ # TODO Remove this when bitsandbytes supports.
981
+ """
982
+ is_bitsandbytes = self.quantization == "bitsandbytes"
983
+ has_quantization_config = self.model_arch_config.quantization_config is not None
984
+ is_8bit = (
985
+ self.model_arch_config.quantization_config.get("load_in_8bit", False)
986
+ if has_quantization_config
987
+ else False
988
+ )
989
+ if all(
990
+ [
991
+ is_bitsandbytes,
992
+ has_quantization_config,
993
+ is_8bit,
994
+ not self.enforce_eager,
995
+ ]
996
+ ):
997
+ logger.warning(
998
+ "CUDA graph is not supported on BitsAndBytes 8bit yet, "
999
+ "fallback to the eager mode."
1000
+ )
1001
+
1002
+ self.enforce_eager = True
1003
+
1004
+ def _verify_with_expert_parallelism(self) -> None:
1005
+ if not self.is_moe:
1006
+ raise ValueError(
1007
+ "Number of experts in the model must be greater than 0 "
1008
+ "when expert parallelism is enabled."
1009
+ )
1010
+
1011
+ def _try_verify_and_update_model_config(self):
1012
+ # Avoid running try_verify_and_update_config multiple times
1013
+ if getattr(self, "config_updated", False):
1014
+ return
1015
+
1016
+ architecture = self.architecture
1017
+ if architecture is None:
1018
+ return
1019
+
1020
+ from vllm.model_executor.models.config import (
1021
+ MODELS_CONFIG_MAP,
1022
+ )
1023
+
1024
+ cls = MODELS_CONFIG_MAP.get(architecture, None)
1025
+ if cls is not None:
1026
+ cls.verify_and_update_model_config(self)
1027
+
1028
+ def verify_dual_chunk_attention_config(
1029
+ self,
1030
+ load_config: LoadConfig,
1031
+ ) -> None:
1032
+ if hasattr(self.hf_config, "dual_chunk_attention_config"):
1033
+ # Try loading the sparse attention config
1034
+ from vllm.model_executor.model_loader.weight_utils import (
1035
+ get_sparse_attention_config,
1036
+ )
1037
+
1038
+ sparse_attn_config = get_sparse_attention_config(self, load_config)
1039
+ if sparse_attn_config:
1040
+ self.hf_config.dual_chunk_attention_config[
1041
+ "sparse_attention_config"
1042
+ ] = sparse_attn_config
1043
+ if (
1044
+ "sparse_attention_enabled"
1045
+ not in self.hf_config.dual_chunk_attention_config
1046
+ ):
1047
+ self.hf_config.dual_chunk_attention_config[
1048
+ "sparse_attention_enabled"
1049
+ ] = True
1050
+
1051
+ def verify_with_parallel_config(
1052
+ self,
1053
+ parallel_config: ParallelConfig,
1054
+ ) -> None:
1055
+ total_num_attention_heads = self.model_arch_config.total_num_attention_heads
1056
+ tensor_parallel_size = parallel_config.tensor_parallel_size
1057
+ if total_num_attention_heads % tensor_parallel_size != 0:
1058
+ raise ValueError(
1059
+ f"Total number of attention heads ({total_num_attention_heads})"
1060
+ " must be divisible by tensor parallel size "
1061
+ f"({tensor_parallel_size})."
1062
+ )
1063
+
1064
+ if parallel_config.enable_expert_parallel:
1065
+ self._verify_with_expert_parallelism()
1066
+
1067
+ pipeline_parallel_size = parallel_config.pipeline_parallel_size
1068
+ if pipeline_parallel_size > 1 and not self.registry.is_pp_supported_model(
1069
+ self.architectures, self
1070
+ ):
1071
+ raise NotImplementedError(
1072
+ "Pipeline parallelism is not supported for this model. "
1073
+ "Supported models implement the `SupportsPP` interface."
1074
+ )
1075
+
1076
+ decode_context_parallel_size = parallel_config.decode_context_parallel_size
1077
+ if decode_context_parallel_size > 1 and not self.use_mla:
1078
+ total_num_kv_heads = self.get_total_num_kv_heads()
1079
+ assert tensor_parallel_size > total_num_kv_heads, (
1080
+ f"tensor parallel size {tensor_parallel_size} must be greater "
1081
+ f"than total num kv heads {total_num_kv_heads} when enable "
1082
+ f"decode context parallel for GQA/MQA"
1083
+ )
1084
+
1085
+ max_dcp_size = tensor_parallel_size // total_num_kv_heads
1086
+ assert decode_context_parallel_size <= max_dcp_size, (
1087
+ f"decode context parallel size must less than or equal to "
1088
+ f"(tensor parallel size {tensor_parallel_size} // total "
1089
+ f"num kv heads {total_num_kv_heads}) = {max_dcp_size}, "
1090
+ f"but got {decode_context_parallel_size}"
1091
+ )
1092
+
1093
+ num_q_per_kv = total_num_attention_heads // total_num_kv_heads
1094
+ assert num_q_per_kv % decode_context_parallel_size == 0, (
1095
+ f"Total number of q per kv attn heads ({num_q_per_kv})"
1096
+ " must be divisible by dcp world size when enable "
1097
+ "decode context parallel for GQA "
1098
+ f"({parallel_config.decode_context_parallel_size})."
1099
+ )
1100
+
1101
+ def get_sliding_window(self) -> int | None:
1102
+ """Get the sliding window size from the HF text config if present."""
1103
+ return getattr(self.hf_text_config, "sliding_window", None)
1104
+
1105
+ def get_vocab_size(self) -> int:
1106
+ return self.model_arch_config.vocab_size
1107
+
1108
+ def get_hidden_size(self) -> int:
1109
+ return self.model_arch_config.hidden_size
1110
+
1111
+ def get_inputs_embeds_size(self) -> int:
1112
+ # The size of inputs_embeds is usually identical to the size
1113
+ # of the hidden states, however there are exceptions, such as
1114
+ # embedding models like CLIP and SigLIP
1115
+ names = ("projection_dim", "projection_size")
1116
+ return getattr_iter(
1117
+ self.hf_text_config, names, default_factory=self.get_hidden_size
1118
+ )
1119
+
1120
+ @property
1121
+ def is_deepseek_mla(self) -> bool:
1122
+ return self.model_arch_config.is_deepseek_mla
1123
+
1124
+ @cached_property
1125
+ def is_mm_prefix_lm(self) -> bool:
1126
+ """Whether to use bidirectional attention for mm positions."""
1127
+ MM_PREFIX_LM_MODELS = (
1128
+ "gemma3",
1129
+ "paligemma",
1130
+ )
1131
+ if not hasattr(self.hf_config, "model_type"):
1132
+ return False
1133
+ return self.hf_config.model_type in MM_PREFIX_LM_MODELS
1134
+
1135
+ def get_head_size(self) -> int:
1136
+ return self.model_arch_config.head_size
1137
+
1138
+ def get_total_num_kv_heads(self) -> int:
1139
+ """Returns the total number of KV heads."""
1140
+ return self.model_arch_config.total_num_kv_heads
1141
+
1142
+ def get_num_kv_heads(self, parallel_config: ParallelConfig) -> int:
1143
+ """Returns the number of KV heads per GPU."""
1144
+ if self.use_mla:
1145
+ # When using MLA during decode it becomes MQA
1146
+ return 1
1147
+
1148
+ total_num_kv_heads = self.get_total_num_kv_heads()
1149
+ # If tensor parallelism is used, we divide the number of KV heads by
1150
+ # the tensor parallel size. We will replicate the KV heads in the
1151
+ # case where the number of KV heads is smaller than the tensor
1152
+ # parallel size so each GPU has at least one KV head.
1153
+ return max(1, total_num_kv_heads // parallel_config.tensor_parallel_size)
1154
+
1155
+ def get_num_attention_heads(self, parallel_config: ParallelConfig) -> int:
1156
+ num_heads = self.model_arch_config.total_num_attention_heads
1157
+ return num_heads // parallel_config.tensor_parallel_size
1158
+
1159
+ def get_num_experts(self) -> int:
1160
+ return self.model_arch_config.num_experts
1161
+
1162
+ def get_total_num_hidden_layers(self) -> int:
1163
+ return self.model_arch_config.total_num_hidden_layers
1164
+
1165
+ def get_layers_start_end_indices(
1166
+ self, parallel_config: ParallelConfig
1167
+ ) -> tuple[int, int]:
1168
+ from vllm.distributed.utils import get_pp_indices
1169
+
1170
+ total_num_hidden_layers = self.get_total_num_hidden_layers()
1171
+
1172
+ # the layout order is: DP x PP x TP
1173
+ pp_rank = (
1174
+ parallel_config.rank // parallel_config.tensor_parallel_size
1175
+ ) % parallel_config.pipeline_parallel_size
1176
+ pp_size = parallel_config.pipeline_parallel_size
1177
+ start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
1178
+ return start, end
1179
+
1180
+ def get_num_layers(self, parallel_config: ParallelConfig) -> int:
1181
+ start, end = self.get_layers_start_end_indices(parallel_config)
1182
+ return end - start
1183
+
1184
+ def get_num_layers_by_block_type(
1185
+ self,
1186
+ parallel_config: ParallelConfig,
1187
+ block_type: LayerBlockType = "attention",
1188
+ ) -> int:
1189
+ # This function relies on 'layers_block_type' in hf_config,
1190
+ # for w/o this attribute, we will need to have workarounds like so
1191
+ attn_block_type = block_type == "attention"
1192
+ is_transformer = (
1193
+ not self.is_hybrid and not self.has_noops and not self.is_attention_free
1194
+ )
1195
+ start, end = self.get_layers_start_end_indices(parallel_config)
1196
+
1197
+ if is_transformer:
1198
+ # Handle the basic case first
1199
+ return end - start if attn_block_type else 0
1200
+ elif self.is_attention_free:
1201
+ # Attention free
1202
+ # Note that this code assumes there
1203
+ # is only one type of attention-free block type.
1204
+ return 0 if attn_block_type else end - start
1205
+ elif self.has_noops:
1206
+ block_configs = self.hf_config.block_configs
1207
+ return sum(not bc.attention.no_op for bc in block_configs[start:end])
1208
+ else:
1209
+ # Hybrid model Jamba
1210
+ layers_block_type_value = getattr(
1211
+ self.hf_text_config, "layers_block_type", None
1212
+ )
1213
+ if layers_block_type_value is not None:
1214
+ if self.model_arch_config.text_model_type == "zamba2":
1215
+ if attn_block_type:
1216
+ return sum(
1217
+ t == "hybrid" for t in layers_block_type_value[start:end]
1218
+ )
1219
+ else:
1220
+ return self.get_num_layers(parallel_config)
1221
+ return sum(t == block_type for t in layers_block_type_value[start:end])
1222
+
1223
+ # Hybrid model Minimax
1224
+ attn_type_list = getattr(self.hf_config, "attn_type_list", None)
1225
+ if attn_type_list:
1226
+ return sum(t == 1 for t in attn_type_list[start:end])
1227
+
1228
+ # Hybrid model Qwen3Next
1229
+ layer_types_value = getattr(self.hf_config, "layer_types", None)
1230
+ if layer_types_value is not None:
1231
+ if block_type == "attention":
1232
+ return sum(
1233
+ t == "full_attention" for t in layer_types_value[start:end]
1234
+ )
1235
+ elif block_type == "linear_attention":
1236
+ return sum(
1237
+ t == "linear_attention" for t in layer_types_value[start:end]
1238
+ )
1239
+ else:
1240
+ return sum(t == block_type for t in layer_types_value[start:end])
1241
+
1242
+ if (
1243
+ layers_block_type_value is None
1244
+ and attn_type_list is None
1245
+ and layer_types_value is None
1246
+ ):
1247
+ raise ValueError(
1248
+ "The model is an hybrid without a layers_block_type or an "
1249
+ "attn_type_list, or a layer_types in the hf_config, "
1250
+ f"cannot determine the num of {block_type} layers"
1251
+ )
1252
+
1253
+ def get_mamba_chunk_size(self) -> int | None:
1254
+ """
1255
+ Returns the mamba chunk size if it exists
1256
+ """
1257
+ # used by e.g. Bamba, FalconH1, Granite, PLaMo2
1258
+ chunk_size = getattr(self.hf_text_config, "mamba_chunk_size", None)
1259
+ if chunk_size is None:
1260
+ # used by e.g. Mamba2, NemotronH, Zamba
1261
+ chunk_size = getattr(self.hf_text_config, "chunk_size", None)
1262
+
1263
+ # Since Mamba1 does not have a chunk notion
1264
+ # we use a default chunk size of 1024.
1265
+ if chunk_size is None:
1266
+ chunk_size = 2048
1267
+
1268
+ return chunk_size
1269
+
1270
+ def get_multimodal_config(self) -> MultiModalConfig:
1271
+ """
1272
+ Get the multimodal configuration of the model.
1273
+
1274
+ Raises:
1275
+ ValueError: If the model is not multimodal.
1276
+ """
1277
+ if self.multimodal_config is None:
1278
+ raise ValueError("The model is not multimodal.")
1279
+
1280
+ return self.multimodal_config
1281
+
1282
+ def try_get_generation_config(self) -> dict[str, Any]:
1283
+ """
1284
+ This method attempts to retrieve the non-default values of the
1285
+ generation config for this model.
1286
+
1287
+ The generation config can contain information about special tokens, as
1288
+ well as sampling parameters. Which is why this method exists separately
1289
+ to `get_diff_sampling_param`.
1290
+
1291
+ Returns:
1292
+ A dictionary containing the non-default generation config.
1293
+ """
1294
+ if self.generation_config in {"auto", "vllm"}:
1295
+ config = try_get_generation_config(
1296
+ self.hf_config_path or self.model,
1297
+ trust_remote_code=self.trust_remote_code,
1298
+ revision=self.revision,
1299
+ config_format=self.config_format,
1300
+ )
1301
+ else:
1302
+ config = try_get_generation_config(
1303
+ self.generation_config,
1304
+ trust_remote_code=self.trust_remote_code,
1305
+ config_format=self.config_format,
1306
+ )
1307
+
1308
+ if config is None:
1309
+ return {}
1310
+
1311
+ return config.to_diff_dict()
1312
+
1313
+ def get_diff_sampling_param(self) -> dict[str, Any]:
1314
+ """
1315
+ This method returns a dictionary containing the non-default sampling
1316
+ parameters with `override_generation_config` applied.
1317
+
1318
+ The default sampling parameters are:
1319
+
1320
+ - vLLM's neutral defaults if `self.generation_config="vllm"`
1321
+ - the model's defaults if `self.generation_config="auto"`
1322
+ - as defined in `generation_config.json` if
1323
+ `self.generation_config="path/to/generation_config/dir"`
1324
+
1325
+ Returns:
1326
+ A dictionary containing the non-default sampling parameters.
1327
+ """
1328
+ if self.generation_config == "vllm":
1329
+ config = {}
1330
+ else:
1331
+ config = self.try_get_generation_config()
1332
+
1333
+ # Overriding with given generation config
1334
+ config.update(self.override_generation_config)
1335
+
1336
+ available_params = [
1337
+ "repetition_penalty",
1338
+ "temperature",
1339
+ "top_k",
1340
+ "top_p",
1341
+ "min_p",
1342
+ "max_new_tokens",
1343
+ ]
1344
+ if any(p in config for p in available_params):
1345
+ diff_sampling_param = {
1346
+ p: config.get(p) for p in available_params if config.get(p) is not None
1347
+ }
1348
+ # Huggingface definition of max_new_tokens is equivalent
1349
+ # to vLLM's max_tokens
1350
+ if "max_new_tokens" in diff_sampling_param:
1351
+ diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
1352
+ "max_new_tokens"
1353
+ )
1354
+ else:
1355
+ diff_sampling_param = {}
1356
+
1357
+ if diff_sampling_param:
1358
+ logger.warning_once(
1359
+ "Default sampling parameters have been overridden by the "
1360
+ "model's Hugging Face generation config recommended from the "
1361
+ "model creator. If this is not intended, please relaunch "
1362
+ "vLLM instance with `--generation-config vllm`."
1363
+ )
1364
+ return diff_sampling_param
1365
+
1366
+ @property
1367
+ def is_encoder_decoder(self) -> bool:
1368
+ """Extract the HF encoder/decoder model flag."""
1369
+ return is_encoder_decoder(self.hf_config)
1370
+
1371
+ @property
1372
+ def uses_alibi(self) -> bool:
1373
+ cfg = self.hf_text_config
1374
+
1375
+ return (
1376
+ getattr(cfg, "alibi", False) # Falcon
1377
+ or "BloomForCausalLM" in self.architectures # Bloom
1378
+ or getattr(cfg, "position_encoding_type", "") == "alibi" # codellm_1b_alibi
1379
+ or (
1380
+ hasattr(cfg, "attn_config") # MPT
1381
+ and (
1382
+ (
1383
+ isinstance(cfg.attn_config, dict)
1384
+ and cfg.attn_config.get("alibi", False)
1385
+ )
1386
+ or (
1387
+ not isinstance(cfg.attn_config, dict)
1388
+ and getattr(cfg.attn_config, "alibi", False)
1389
+ )
1390
+ )
1391
+ )
1392
+ )
1393
+
1394
+ @property
1395
+ def uses_mrope(self) -> bool:
1396
+ return uses_mrope(self.hf_config)
1397
+
1398
+ @property
1399
+ def uses_xdrope_dim(self) -> int:
1400
+ return uses_xdrope_dim(self.hf_config)
1401
+
1402
+ @property
1403
+ def is_multimodal_model(self) -> bool:
1404
+ return self.multimodal_config is not None
1405
+
1406
+ @property
1407
+ def is_multimodal_raw_input_only_model(self) -> bool:
1408
+ return self._model_info.supports_multimodal_raw_input_only
1409
+
1410
+ @property
1411
+ def requires_raw_input_tokens(self) -> bool:
1412
+ return self._model_info.requires_raw_input_tokens
1413
+
1414
+ @property
1415
+ def is_cross_encoder(self) -> bool:
1416
+ return (
1417
+ self._model_info.supports_cross_encoding or self.convert_type == "classify"
1418
+ )
1419
+
1420
+ @property
1421
+ def is_pp_supported(self) -> bool:
1422
+ return self._model_info.supports_pp
1423
+
1424
+ @property
1425
+ def is_attention_free(self) -> bool:
1426
+ return self._model_info.is_attention_free
1427
+
1428
+ @property
1429
+ def is_hybrid(self) -> bool:
1430
+ if not self._model_info.is_hybrid:
1431
+ return False
1432
+ # Handle granite-4.0-micro case which uses hybrid config but does not
1433
+ # actually contain any non-attention layers.
1434
+ layer_types = getattr(self.hf_config, "layer_types", None)
1435
+ return layer_types is None or not all(
1436
+ layer == "attention" for layer in layer_types
1437
+ )
1438
+
1439
+ @property
1440
+ def has_noops(self) -> bool:
1441
+ return self._model_info.has_noops
1442
+
1443
+ @property
1444
+ def has_inner_state(self):
1445
+ return self._model_info.has_inner_state
1446
+
1447
+ @property
1448
+ def supports_mamba_prefix_caching(self) -> bool:
1449
+ return self._model_info.supports_mamba_prefix_caching
1450
+
1451
+ @property
1452
+ def use_mla(self) -> bool:
1453
+ return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE
1454
+
1455
+ @property
1456
+ def is_matryoshka(self) -> bool:
1457
+ return bool(getattr(self.hf_config, "matryoshka_dimensions", None)) or getattr(
1458
+ self.hf_config, "is_matryoshka", False
1459
+ )
1460
+
1461
+ @property
1462
+ def matryoshka_dimensions(self):
1463
+ return getattr(self.hf_config, "matryoshka_dimensions", None)
1464
+
1465
+ @property
1466
+ def use_sep_token(self) -> bool:
1467
+ # cross_encoder models defaults to using separating token.
1468
+ # `llm as reranker` defaults to not using separating token.
1469
+
1470
+ use_pad_token = getattr(self.hf_config, "use_pad_token", None)
1471
+ if use_pad_token is not None:
1472
+ logger.warning_once(
1473
+ "use_pad_token has been deprecated; please use use_sep_token instead."
1474
+ )
1475
+ return use_pad_token
1476
+
1477
+ return getattr(self.hf_config, "use_sep_token", True)
1478
+
1479
+ @property
1480
+ def head_dtype(self) -> torch.dtype:
1481
+ """
1482
+ "head" refers to the last Linear layer(s) of an LLM,
1483
+ such as the lm_head in a generation model,
1484
+ or the score or classifier in a classification model.
1485
+
1486
+ `head_dtype` currently only supports pooling models.\n
1487
+ - The pooling model defaults to using fp32 head,
1488
+ you can use --hf-overrides '{"head_dtype": "model"}' to disable it.
1489
+ """
1490
+
1491
+ head_dtype = _get_head_dtype(
1492
+ config=self.hf_config, dtype=self.dtype, runner_type=self.runner_type
1493
+ )
1494
+
1495
+ if self.runner_type != "pooling" and head_dtype != self.dtype:
1496
+ logger.warning_once(
1497
+ "`head_dtype` currently only supports pooling models, "
1498
+ "fallback to model dtype [%s].",
1499
+ self.dtype,
1500
+ )
1501
+ return self.dtype
1502
+
1503
+ if head_dtype not in current_platform.supported_dtypes:
1504
+ logger.warning_once(
1505
+ "The current platform does not support [%s] head dtype, "
1506
+ "fallback to model dtype [%s].",
1507
+ head_dtype,
1508
+ self.dtype,
1509
+ )
1510
+ return self.dtype
1511
+
1512
+ logger.debug_once("head dtype: %s", head_dtype)
1513
+ return head_dtype
1514
+
1515
+ @property
1516
+ def embedding_size(self):
1517
+ dense_modules = try_get_dense_modules(self.model, revision=self.revision)
1518
+ if dense_modules is not None:
1519
+ return dense_modules[-1]["out_features"]
1520
+ return self.get_hidden_size()
1521
+
1522
+ def get_and_verify_max_len(self, max_model_len: int):
1523
+ # Consider max_model_len in tokenizer_config only when
1524
+ # pooling models use absolute position_embedding.
1525
+ tokenizer_config = None
1526
+ if (
1527
+ self.runner_type == "pooling"
1528
+ and getattr(self.hf_config, "position_embedding_type", "") == "absolute"
1529
+ ):
1530
+ tokenizer_config = try_get_tokenizer_config(
1531
+ self.tokenizer,
1532
+ trust_remote_code=self.trust_remote_code,
1533
+ revision=self.tokenizer_revision,
1534
+ )
1535
+ max_model_len = _get_and_verify_max_len(
1536
+ hf_config=self.hf_text_config,
1537
+ model_arch_config=self.model_arch_config,
1538
+ tokenizer_config=tokenizer_config,
1539
+ max_model_len=max_model_len,
1540
+ disable_sliding_window=self.disable_sliding_window,
1541
+ sliding_window=self.get_sliding_window(),
1542
+ spec_target_max_model_len=self.spec_target_max_model_len,
1543
+ encoder_config=self.encoder_config,
1544
+ )
1545
+ logger.info("Using max model len %s", max_model_len)
1546
+ return max_model_len
1547
+
1548
+ @property
1549
+ def attn_type(self) -> AttnTypeStr:
1550
+ if self.pooler_config is not None:
1551
+ seq_pooling_type = self._model_info.default_seq_pooling_type
1552
+ if seq_pooling_type == "CLS":
1553
+ return "encoder_only"
1554
+ else:
1555
+ is_causal = getattr(self.hf_config, "is_causal", True)
1556
+ return "encoder_only" if not is_causal else self._model_info.attn_type
1557
+ elif self.is_hybrid:
1558
+ return "hybrid"
1559
+ elif self.is_attention_free:
1560
+ return "attention_free"
1561
+ elif self.is_encoder_decoder:
1562
+ return "encoder_decoder"
1563
+ else:
1564
+ return "decoder"
1565
+
1566
+ @property
1567
+ def is_chunked_prefill_supported(self) -> bool:
1568
+ attn_type = self.attn_type
1569
+
1570
+ if pooler_config := self.pooler_config:
1571
+ # for pooling models
1572
+ if attn_type == "encoder_only":
1573
+ logger.debug(
1574
+ "Pooling models with bidirectional attn "
1575
+ "do not support chunked prefill."
1576
+ )
1577
+ return False
1578
+
1579
+ if attn_type == "decoder":
1580
+ if (
1581
+ pooler_config.seq_pooling_type in ("MEAN", "CLS")
1582
+ or pooler_config.tok_pooling_type == "STEP"
1583
+ ):
1584
+ logger.debug(
1585
+ "Pooling models with causal attn and %s/%s pooling "
1586
+ "do not support chunked prefill.",
1587
+ pooler_config.seq_pooling_type,
1588
+ pooler_config.tok_pooling_type,
1589
+ )
1590
+ return False
1591
+ else:
1592
+ logger.debug(
1593
+ "Pooling models with causal attn and %s/%s pooling "
1594
+ "support chunked prefill.",
1595
+ pooler_config.seq_pooling_type,
1596
+ pooler_config.tok_pooling_type,
1597
+ )
1598
+ return True
1599
+
1600
+ # vllm currently does not have pooling models using hybrid,
1601
+ # attention_free or encoder_decoder attn types.
1602
+ return attn_type != "encoder_decoder"
1603
+ else:
1604
+ # for generative models
1605
+ if attn_type == "encoder_decoder":
1606
+ logger.debug("Encoder decoder models do not support chunked prefill.")
1607
+ return False
1608
+
1609
+ logger.debug("Generative models support chunked prefill.")
1610
+ return True
1611
+
1612
+ @property
1613
+ def is_prefix_caching_supported(self) -> bool:
1614
+ attn_type = self.attn_type
1615
+
1616
+ if pooler_config := self.pooler_config:
1617
+ # for pooling models
1618
+ if attn_type == "encoder_only":
1619
+ logger.debug(
1620
+ "Pooling models with bidirectional attn "
1621
+ "do not support prefix caching."
1622
+ )
1623
+ return False
1624
+
1625
+ if attn_type == "decoder":
1626
+ if (
1627
+ pooler_config.seq_pooling_type in ("MEAN", "CLS")
1628
+ or pooler_config.tok_pooling_type == "STEP"
1629
+ ):
1630
+ logger.debug(
1631
+ "Pooling models with causal attn and %s/%s pooling "
1632
+ "do not support prefix caching.",
1633
+ pooler_config.seq_pooling_type,
1634
+ pooler_config.tok_pooling_type,
1635
+ )
1636
+ return False
1637
+ else:
1638
+ logger.debug(
1639
+ "Pooling models with causal attn and %s/%s pooling "
1640
+ "support prefix caching.",
1641
+ pooler_config.seq_pooling_type,
1642
+ pooler_config.tok_pooling_type,
1643
+ )
1644
+ return True
1645
+
1646
+ # vllm currently does not have pooling models using hybrid,
1647
+ # attention_free or encoder_decoder attn types.
1648
+ return False
1649
+ else:
1650
+ # for generative models
1651
+ if attn_type == "hybrid":
1652
+ logger.debug(
1653
+ "Hybrid models do not support prefix caching since the feature "
1654
+ "is still experimental."
1655
+ )
1656
+ return False
1657
+ elif attn_type == "attention_free":
1658
+ logger.debug(
1659
+ "Attention free models do not support prefix caching since the "
1660
+ "feature is still experimental."
1661
+ )
1662
+ return False
1663
+ elif attn_type == "encoder_decoder":
1664
+ logger.debug("Encoder decoder models do not support prefix caching.")
1665
+ return False
1666
+ else: # attn_type == "decoder"
1667
+ logger.debug("Generative models support prefix caching.")
1668
+ return True
1669
+
1670
+ @property
1671
+ def is_moe(self) -> bool:
1672
+ return self.get_num_experts() > 0
1673
+
1674
+ @property
1675
+ def is_quantized(self) -> bool:
1676
+ return getattr(self.hf_config, "quantization_config", None) is not None
1677
+
1678
+
1679
+ def get_served_model_name(model: str, served_model_name: str | list[str] | None):
1680
+ """
1681
+ If the input is a non-empty list, the first model_name in
1682
+ `served_model_name` is taken.
1683
+ If the input is a non-empty string, it is used directly.
1684
+ For cases where the input is either an empty string or an
1685
+ empty list, the fallback is to use `self.model`.
1686
+ """
1687
+ if not served_model_name:
1688
+ return model
1689
+ if isinstance(served_model_name, list):
1690
+ return served_model_name[0]
1691
+ return served_model_name
1692
+
1693
+
1694
+ # Some model suffixes are based on auto classes from Transformers:
1695
+ # https://huggingface.co/docs/transformers/en/model_doc/auto
1696
+ # NOTE: Items higher on this list priority over lower ones
1697
+ _SUFFIX_TO_DEFAULTS: list[tuple[str, tuple[RunnerType, ConvertType]]] = [
1698
+ ("ForCausalLM", ("generate", "none")),
1699
+ ("ForConditionalGeneration", ("generate", "none")),
1700
+ ("ChatModel", ("generate", "none")),
1701
+ ("LMHeadModel", ("generate", "none")),
1702
+ ("ForTextEncoding", ("pooling", "embed")),
1703
+ ("EmbeddingModel", ("pooling", "embed")),
1704
+ ("ForSequenceClassification", ("pooling", "classify")),
1705
+ ("ForTokenClassification", ("pooling", "classify")),
1706
+ ("ForAudioClassification", ("pooling", "classify")),
1707
+ ("ForImageClassification", ("pooling", "classify")),
1708
+ ("ForVideoClassification", ("pooling", "classify")),
1709
+ ("ClassificationModel", ("pooling", "classify")),
1710
+ ("ForRewardModeling", ("pooling", "embed")),
1711
+ ("RewardModel", ("pooling", "embed")),
1712
+ # Let other `*Model`s take priority
1713
+ ("Model", ("pooling", "embed")),
1714
+ ]
1715
+
1716
+
1717
+ def iter_architecture_defaults():
1718
+ yield from _SUFFIX_TO_DEFAULTS
1719
+
1720
+
1721
+ def try_match_architecture_defaults(
1722
+ architecture: str,
1723
+ *,
1724
+ runner_type: RunnerType | None = None,
1725
+ convert_type: ConvertType | None = None,
1726
+ ) -> tuple[str, tuple[RunnerType, ConvertType]] | None:
1727
+ for suffix, (
1728
+ default_runner_type,
1729
+ default_convert_type,
1730
+ ) in iter_architecture_defaults():
1731
+ if (
1732
+ (runner_type is None or runner_type == default_runner_type)
1733
+ and (convert_type is None or convert_type == default_convert_type)
1734
+ and architecture.endswith(suffix)
1735
+ ):
1736
+ return suffix, (default_runner_type, default_convert_type)
1737
+
1738
+ return None
1739
+
1740
+
1741
+ _STR_DTYPE_TO_TORCH_DTYPE = {
1742
+ "half": torch.float16,
1743
+ "float16": torch.float16,
1744
+ "float": torch.float32,
1745
+ "float32": torch.float32,
1746
+ "bfloat16": torch.bfloat16,
1747
+ }
1748
+
1749
+
1750
+ def str_dtype_to_torch_dtype(type: str):
1751
+ return _STR_DTYPE_TO_TORCH_DTYPE.get(type)
1752
+
1753
+
1754
+ # model_type -> reason
1755
+ _FLOAT16_NOT_SUPPORTED_MODELS = {
1756
+ "gemma2": "Numerical instability. Please use bfloat16 or float32 instead.",
1757
+ "gemma3": "Numerical instability. Please use bfloat16 or float32 instead.",
1758
+ "gemma3_text": "Numerical instability. Please use bfloat16 or float32 instead.",
1759
+ "plamo2": "Numerical instability. Please use bfloat16 or float32 instead.",
1760
+ "glm4": "Numerical instability. Please use bfloat16 or float32 instead.",
1761
+ }
1762
+
1763
+
1764
+ def _is_valid_dtype(model_type: str, dtype: torch.dtype):
1765
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16: # noqa: E501, SIM103
1766
+ return False
1767
+
1768
+ return True
1769
+
1770
+
1771
+ def _check_valid_dtype(model_type: str, dtype: torch.dtype):
1772
+ if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16:
1773
+ reason = _FLOAT16_NOT_SUPPORTED_MODELS[model_type]
1774
+ raise ValueError(
1775
+ f"The model type {model_type!r} does not support float16. Reason: {reason}"
1776
+ )
1777
+
1778
+ return True
1779
+
1780
+
1781
+ def _resolve_auto_dtype(
1782
+ model_type: str,
1783
+ config_dtype: torch.dtype,
1784
+ *,
1785
+ is_pooling_model: bool,
1786
+ ):
1787
+ from vllm.platforms import current_platform
1788
+
1789
+ supported_dtypes = [
1790
+ dtype
1791
+ for dtype in current_platform.supported_dtypes
1792
+ if _is_valid_dtype(model_type, dtype)
1793
+ ]
1794
+
1795
+ if is_pooling_model and torch.float16 in supported_dtypes:
1796
+ preferred_dtype = torch.float16
1797
+ else:
1798
+ preferred_dtype = supported_dtypes[0]
1799
+
1800
+ # Downcast for float32 models
1801
+ if config_dtype == torch.float32:
1802
+ config_dtype = preferred_dtype
1803
+
1804
+ if config_dtype in supported_dtypes:
1805
+ return config_dtype
1806
+
1807
+ # Ensure device compatibility
1808
+ device_name = current_platform.get_device_name()
1809
+ device_capability = current_platform.get_device_capability()
1810
+
1811
+ if device_capability is None:
1812
+ device_str = f"{device_name!r}"
1813
+ else:
1814
+ version_str = device_capability.as_version_str()
1815
+ device_str = f"{device_name!r} (with compute capability {version_str})"
1816
+
1817
+ logger.warning(
1818
+ "Your device %s doesn't support %s. Falling back to %s for compatibility.",
1819
+ device_str,
1820
+ config_dtype,
1821
+ preferred_dtype,
1822
+ )
1823
+
1824
+ return preferred_dtype
1825
+
1826
+
1827
+ def _get_and_verify_dtype(
1828
+ model_id: str,
1829
+ config: PretrainedConfig,
1830
+ dtype: str | torch.dtype,
1831
+ *,
1832
+ is_pooling_model: bool,
1833
+ revision: str | None = None,
1834
+ ) -> torch.dtype:
1835
+ config_dtype = ModelArchConfigConvertorBase.get_torch_dtype(
1836
+ config, model_id, revision=revision
1837
+ )
1838
+ model_type = config.model_type
1839
+
1840
+ if isinstance(dtype, str):
1841
+ dtype = dtype.lower()
1842
+ if dtype == "auto":
1843
+ # Set default dtype from model config
1844
+ torch_dtype = _resolve_auto_dtype(
1845
+ model_type,
1846
+ config_dtype,
1847
+ is_pooling_model=is_pooling_model,
1848
+ )
1849
+ else:
1850
+ if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
1851
+ raise ValueError(f"Unknown dtype: {dtype!r}")
1852
+ torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
1853
+ elif isinstance(dtype, torch.dtype):
1854
+ torch_dtype = dtype
1855
+ else:
1856
+ raise ValueError(f"Unknown dtype: {dtype}")
1857
+
1858
+ _check_valid_dtype(model_type, torch_dtype)
1859
+
1860
+ if torch_dtype != config_dtype:
1861
+ if torch_dtype == torch.float32:
1862
+ # Upcasting to float32 is allowed.
1863
+ logger.info("Upcasting %s to %s.", config_dtype, torch_dtype)
1864
+ elif config_dtype == torch.float32:
1865
+ # Downcasting from float32 to float16 or bfloat16 is allowed.
1866
+ logger.info("Downcasting %s to %s.", config_dtype, torch_dtype)
1867
+ else:
1868
+ # Casting between float16 and bfloat16 is allowed with a warning.
1869
+ logger.warning("Casting %s to %s.", config_dtype, torch_dtype)
1870
+
1871
+ return torch_dtype
1872
+
1873
+
1874
+ def _get_head_dtype(
1875
+ config: PretrainedConfig, dtype: torch.dtype, runner_type: str
1876
+ ) -> torch.dtype:
1877
+ head_dtype: str | torch.dtype | None = getattr(config, "head_dtype", None)
1878
+
1879
+ if head_dtype == "model":
1880
+ return dtype
1881
+ elif isinstance(head_dtype, str):
1882
+ head_dtype = head_dtype.lower()
1883
+ if head_dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
1884
+ raise ValueError(f"Unknown dtype: {head_dtype!r}")
1885
+ return _STR_DTYPE_TO_TORCH_DTYPE[head_dtype]
1886
+ elif isinstance(head_dtype, torch.dtype):
1887
+ return head_dtype
1888
+ elif head_dtype is None:
1889
+ if torch.float32 not in current_platform.supported_dtypes:
1890
+ return dtype
1891
+ if runner_type == "pooling":
1892
+ return torch.float32
1893
+ return dtype
1894
+ else:
1895
+ raise ValueError(f"Unknown dtype: {head_dtype}")
1896
+
1897
+
1898
+ def _get_and_verify_max_len(
1899
+ hf_config: PretrainedConfig,
1900
+ model_arch_config: ModelArchitectureConfig,
1901
+ tokenizer_config: dict | None,
1902
+ max_model_len: int | None,
1903
+ disable_sliding_window: bool,
1904
+ sliding_window: int | None,
1905
+ spec_target_max_model_len: int | None = None,
1906
+ encoder_config: Any | None = None,
1907
+ ) -> int:
1908
+ """Get and verify the model's maximum length."""
1909
+ (derived_max_model_len, max_len_key) = (
1910
+ model_arch_config.derived_max_model_len_and_key
1911
+ )
1912
+
1913
+ # If sliding window is manually disabled, max_length should be less
1914
+ # than the sliding window length in the model config.
1915
+ if (
1916
+ disable_sliding_window
1917
+ and sliding_window is not None
1918
+ and sliding_window < derived_max_model_len
1919
+ ):
1920
+ max_len_key = "sliding_window"
1921
+ derived_max_model_len = sliding_window
1922
+
1923
+ # Consider model_max_length in tokenizer_config
1924
+ if tokenizer_config:
1925
+ tokenizer_model_max_length = tokenizer_config.get(
1926
+ "model_max_length", derived_max_model_len
1927
+ )
1928
+ derived_max_model_len = min(derived_max_model_len, tokenizer_model_max_length)
1929
+
1930
+ # If none of the keys were found in the config, use a default and
1931
+ # log a warning.
1932
+ if derived_max_model_len == float("inf"):
1933
+ if max_model_len is not None:
1934
+ # If max_model_len is specified, we use it.
1935
+ return max_model_len
1936
+
1937
+ if spec_target_max_model_len is not None:
1938
+ # If this is a speculative draft model, we use the max model len
1939
+ # from the target model.
1940
+ return spec_target_max_model_len
1941
+
1942
+ default_max_len = 2048
1943
+ logger.warning(
1944
+ "The model's config.json does not contain any of the keys "
1945
+ "to determine the original maximum length of the model. "
1946
+ "Assuming the model's maximum length is %d.",
1947
+ default_max_len,
1948
+ )
1949
+ derived_max_model_len = default_max_len
1950
+
1951
+ # In Transformers v5 rope_parameters could be TypedDict or dict[str, TypedDict].
1952
+ # To simplify the verification, we convert it to dict[str, TypedDict].
1953
+ rope_parameters = getattr(hf_config, "rope_parameters", None)
1954
+ if rope_parameters and not is_rope_parameters_nested(rope_parameters):
1955
+ rope_parameters = {"": rope_parameters}
1956
+
1957
+ # NOTE(woosuk): Gemma3's max_model_len (128K) is already scaled by RoPE
1958
+ # scaling, so we skip applying the scaling factor again.
1959
+ if rope_parameters is not None and "gemma3" not in hf_config.model_type:
1960
+ scaling_factor = 1.0
1961
+ for rp in rope_parameters.values():
1962
+ # No need to consider "type" key because of patch_rope_parameters when
1963
+ # loading HF config
1964
+ rope_type = rp["rope_type"]
1965
+
1966
+ if rope_type not in ("su", "longrope", "llama3"):
1967
+ # NOTE: rope_type == "default" does not define factor https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/modeling_rope_utils.py
1968
+ # NOTE: This assumes all layer types have the same scaling factor.
1969
+ scaling_factor = rp.get("factor", scaling_factor)
1970
+
1971
+ if rope_type == "yarn":
1972
+ derived_max_model_len = rp["original_max_position_embeddings"]
1973
+ # Do this outside loop since all layer types should have the same scaling
1974
+ derived_max_model_len *= scaling_factor
1975
+
1976
+ if encoder_config and "max_seq_length" in encoder_config:
1977
+ derived_max_model_len = encoder_config["max_seq_length"]
1978
+
1979
+ # If the user didn't specify `max_model_len` or specified -1 (auto-fit),
1980
+ # then use that derived from the model config as a default value.
1981
+ # When -1 is specified, the engine will later auto-fit to available memory.
1982
+ if max_model_len is None or max_model_len == -1:
1983
+ # For LongRoPE, default to original_max_position_embeddings to avoid
1984
+ # performance degradation for shorter sequences
1985
+ if rope_parameters is not None and any(
1986
+ rp["rope_type"] == "longrope" for rp in rope_parameters.values()
1987
+ ):
1988
+ max_model_len = int(
1989
+ getattr(
1990
+ hf_config, "original_max_position_embeddings", derived_max_model_len
1991
+ )
1992
+ )
1993
+ else:
1994
+ max_model_len = int(derived_max_model_len)
1995
+ max_model_len = current_platform.check_max_model_len(max_model_len)
1996
+
1997
+ # If the user specified a max length, make sure it is smaller than the
1998
+ # derived length from the HF model config.
1999
+ elif max_model_len > derived_max_model_len:
2000
+ # Some models might have a separate key for specifying model_max_length
2001
+ # that will be bigger than derived_max_model_len. We compare user input
2002
+ # with model_max_length and allow this override when it's smaller.
2003
+ model_max_length = getattr(hf_config, "model_max_length", None)
2004
+ if model_max_length is None or max_model_len > model_max_length:
2005
+ msg = (
2006
+ f"User-specified max_model_len ({max_model_len}) is greater "
2007
+ f"than the derived max_model_len ({max_len_key}="
2008
+ f"{derived_max_model_len} or model_max_length="
2009
+ f"{model_max_length} in model's config.json)."
2010
+ )
2011
+ warning = (
2012
+ "VLLM_ALLOW_LONG_MAX_MODEL_LEN must be used with extreme "
2013
+ "caution. If the model uses relative position encoding (RoPE), "
2014
+ "positions exceeding derived_max_model_len lead to nan. If the "
2015
+ "model uses absolute position encoding, positions exceeding "
2016
+ "derived_max_model_len will cause a CUDA array out-of-bounds "
2017
+ "error."
2018
+ )
2019
+ if envs.VLLM_ALLOW_LONG_MAX_MODEL_LEN:
2020
+ logger.warning_once("%s %s", msg, warning)
2021
+ else:
2022
+ raise ValueError(
2023
+ f"{msg} To allow overriding this maximum, set "
2024
+ f"the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1. {warning}"
2025
+ )
2026
+ return int(max_model_len)