vllm-cpu-avx512vnni 0.13.0__cp313-cp313-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.

Files changed (1641) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +1260 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +3080 -0
  6. vllm/_ipex_ops.py +457 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +59 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +0 -0
  14. vllm/attention/backends/__init__.py +0 -0
  15. vllm/attention/backends/abstract.py +443 -0
  16. vllm/attention/backends/registry.py +254 -0
  17. vllm/attention/backends/utils.py +33 -0
  18. vllm/attention/layer.py +969 -0
  19. vllm/attention/layers/__init__.py +0 -0
  20. vllm/attention/layers/chunked_local_attention.py +120 -0
  21. vllm/attention/layers/cross_attention.py +178 -0
  22. vllm/attention/layers/encoder_only_attention.py +103 -0
  23. vllm/attention/layers/mm_encoder_attention.py +284 -0
  24. vllm/attention/ops/__init__.py +0 -0
  25. vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
  26. vllm/attention/ops/common.py +469 -0
  27. vllm/attention/ops/flashmla.py +251 -0
  28. vllm/attention/ops/merge_attn_states.py +47 -0
  29. vllm/attention/ops/paged_attn.py +51 -0
  30. vllm/attention/ops/pallas_kv_cache_update.py +130 -0
  31. vllm/attention/ops/prefix_prefill.py +814 -0
  32. vllm/attention/ops/rocm_aiter_mla_sparse.py +210 -0
  33. vllm/attention/ops/triton_decode_attention.py +712 -0
  34. vllm/attention/ops/triton_merge_attn_states.py +116 -0
  35. vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
  36. vllm/attention/ops/triton_unified_attention.py +1047 -0
  37. vllm/attention/ops/vit_attn_wrappers.py +139 -0
  38. vllm/attention/selector.py +145 -0
  39. vllm/attention/utils/__init__.py +0 -0
  40. vllm/attention/utils/fa_utils.py +118 -0
  41. vllm/attention/utils/kv_sharing_utils.py +33 -0
  42. vllm/attention/utils/kv_transfer_utils.py +60 -0
  43. vllm/beam_search.py +88 -0
  44. vllm/benchmarks/__init__.py +0 -0
  45. vllm/benchmarks/datasets.py +3228 -0
  46. vllm/benchmarks/latency.py +170 -0
  47. vllm/benchmarks/lib/__init__.py +3 -0
  48. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  49. vllm/benchmarks/lib/ready_checker.py +72 -0
  50. vllm/benchmarks/lib/utils.py +79 -0
  51. vllm/benchmarks/serve.py +1538 -0
  52. vllm/benchmarks/startup.py +326 -0
  53. vllm/benchmarks/sweep/__init__.py +0 -0
  54. vllm/benchmarks/sweep/cli.py +41 -0
  55. vllm/benchmarks/sweep/param_sweep.py +158 -0
  56. vllm/benchmarks/sweep/plot.py +675 -0
  57. vllm/benchmarks/sweep/plot_pareto.py +393 -0
  58. vllm/benchmarks/sweep/serve.py +450 -0
  59. vllm/benchmarks/sweep/serve_sla.py +492 -0
  60. vllm/benchmarks/sweep/server.py +114 -0
  61. vllm/benchmarks/sweep/sla_sweep.py +132 -0
  62. vllm/benchmarks/sweep/utils.py +4 -0
  63. vllm/benchmarks/throughput.py +808 -0
  64. vllm/collect_env.py +857 -0
  65. vllm/compilation/__init__.py +0 -0
  66. vllm/compilation/activation_quant_fusion.py +209 -0
  67. vllm/compilation/backends.py +839 -0
  68. vllm/compilation/base_static_graph.py +57 -0
  69. vllm/compilation/caching.py +180 -0
  70. vllm/compilation/collective_fusion.py +1215 -0
  71. vllm/compilation/compiler_interface.py +639 -0
  72. vllm/compilation/counter.py +48 -0
  73. vllm/compilation/cuda_graph.py +302 -0
  74. vllm/compilation/decorators.py +626 -0
  75. vllm/compilation/fix_functionalization.py +266 -0
  76. vllm/compilation/fusion.py +550 -0
  77. vllm/compilation/fusion_attn.py +359 -0
  78. vllm/compilation/fx_utils.py +91 -0
  79. vllm/compilation/inductor_pass.py +138 -0
  80. vllm/compilation/matcher_utils.py +361 -0
  81. vllm/compilation/monitor.py +62 -0
  82. vllm/compilation/noop_elimination.py +130 -0
  83. vllm/compilation/partition_rules.py +72 -0
  84. vllm/compilation/pass_manager.py +155 -0
  85. vllm/compilation/piecewise_backend.py +178 -0
  86. vllm/compilation/post_cleanup.py +21 -0
  87. vllm/compilation/qk_norm_rope_fusion.py +238 -0
  88. vllm/compilation/rocm_aiter_fusion.py +242 -0
  89. vllm/compilation/sequence_parallelism.py +364 -0
  90. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  91. vllm/compilation/vllm_inductor_pass.py +173 -0
  92. vllm/compilation/wrapper.py +319 -0
  93. vllm/config/__init__.py +108 -0
  94. vllm/config/attention.py +114 -0
  95. vllm/config/cache.py +232 -0
  96. vllm/config/compilation.py +1140 -0
  97. vllm/config/device.py +75 -0
  98. vllm/config/ec_transfer.py +110 -0
  99. vllm/config/kv_events.py +56 -0
  100. vllm/config/kv_transfer.py +119 -0
  101. vllm/config/load.py +124 -0
  102. vllm/config/lora.py +96 -0
  103. vllm/config/model.py +2190 -0
  104. vllm/config/multimodal.py +247 -0
  105. vllm/config/observability.py +140 -0
  106. vllm/config/parallel.py +660 -0
  107. vllm/config/pooler.py +126 -0
  108. vllm/config/profiler.py +199 -0
  109. vllm/config/scheduler.py +299 -0
  110. vllm/config/speculative.py +644 -0
  111. vllm/config/speech_to_text.py +38 -0
  112. vllm/config/structured_outputs.py +78 -0
  113. vllm/config/utils.py +370 -0
  114. vllm/config/vllm.py +1434 -0
  115. vllm/connections.py +189 -0
  116. vllm/device_allocator/__init__.py +0 -0
  117. vllm/device_allocator/cumem.py +327 -0
  118. vllm/distributed/__init__.py +6 -0
  119. vllm/distributed/communication_op.py +43 -0
  120. vllm/distributed/device_communicators/__init__.py +0 -0
  121. vllm/distributed/device_communicators/all2all.py +490 -0
  122. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  123. vllm/distributed/device_communicators/base_device_communicator.py +297 -0
  124. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  125. vllm/distributed/device_communicators/cuda_communicator.py +340 -0
  126. vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
  127. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  128. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  129. vllm/distributed/device_communicators/pynccl.py +386 -0
  130. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  131. vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
  132. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  133. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  134. vllm/distributed/device_communicators/shm_broadcast.py +778 -0
  135. vllm/distributed/device_communicators/shm_object_storage.py +697 -0
  136. vllm/distributed/device_communicators/symm_mem.py +156 -0
  137. vllm/distributed/device_communicators/tpu_communicator.py +99 -0
  138. vllm/distributed/device_communicators/xpu_communicator.py +95 -0
  139. vllm/distributed/ec_transfer/__init__.py +14 -0
  140. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  141. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  142. vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
  143. vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
  144. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  145. vllm/distributed/eplb/__init__.py +3 -0
  146. vllm/distributed/eplb/async_worker.py +115 -0
  147. vllm/distributed/eplb/eplb_state.py +1164 -0
  148. vllm/distributed/eplb/policy/__init__.py +19 -0
  149. vllm/distributed/eplb/policy/abstract.py +40 -0
  150. vllm/distributed/eplb/policy/default.py +267 -0
  151. vllm/distributed/eplb/rebalance_execute.py +529 -0
  152. vllm/distributed/kv_events.py +499 -0
  153. vllm/distributed/kv_transfer/README.md +29 -0
  154. vllm/distributed/kv_transfer/__init__.py +20 -0
  155. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  156. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  157. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  158. vllm/distributed/kv_transfer/kv_connector/factory.py +197 -0
  159. vllm/distributed/kv_transfer/kv_connector/utils.py +322 -0
  160. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  161. vllm/distributed/kv_transfer/kv_connector/v1/base.py +597 -0
  162. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  163. vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
  164. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +327 -0
  165. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  166. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +378 -0
  167. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
  168. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1418 -0
  169. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +895 -0
  170. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
  171. vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +914 -0
  172. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +464 -0
  173. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2526 -0
  174. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +538 -0
  175. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  176. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  177. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  178. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  179. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  180. vllm/distributed/parallel_state.py +1795 -0
  181. vllm/distributed/tpu_distributed_utils.py +188 -0
  182. vllm/distributed/utils.py +545 -0
  183. vllm/engine/__init__.py +0 -0
  184. vllm/engine/arg_utils.py +2068 -0
  185. vllm/engine/async_llm_engine.py +6 -0
  186. vllm/engine/llm_engine.py +6 -0
  187. vllm/engine/protocol.py +190 -0
  188. vllm/entrypoints/__init__.py +0 -0
  189. vllm/entrypoints/anthropic/__init__.py +0 -0
  190. vllm/entrypoints/anthropic/protocol.py +162 -0
  191. vllm/entrypoints/anthropic/serving_messages.py +468 -0
  192. vllm/entrypoints/api_server.py +185 -0
  193. vllm/entrypoints/chat_utils.py +1903 -0
  194. vllm/entrypoints/cli/__init__.py +15 -0
  195. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  196. vllm/entrypoints/cli/benchmark/base.py +25 -0
  197. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  198. vllm/entrypoints/cli/benchmark/main.py +56 -0
  199. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  200. vllm/entrypoints/cli/benchmark/startup.py +21 -0
  201. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  202. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  203. vllm/entrypoints/cli/collect_env.py +38 -0
  204. vllm/entrypoints/cli/main.py +79 -0
  205. vllm/entrypoints/cli/openai.py +260 -0
  206. vllm/entrypoints/cli/run_batch.py +68 -0
  207. vllm/entrypoints/cli/serve.py +249 -0
  208. vllm/entrypoints/cli/types.py +29 -0
  209. vllm/entrypoints/constants.py +12 -0
  210. vllm/entrypoints/context.py +835 -0
  211. vllm/entrypoints/launcher.py +175 -0
  212. vllm/entrypoints/llm.py +1790 -0
  213. vllm/entrypoints/logger.py +84 -0
  214. vllm/entrypoints/openai/__init__.py +0 -0
  215. vllm/entrypoints/openai/api_server.py +1469 -0
  216. vllm/entrypoints/openai/cli_args.py +302 -0
  217. vllm/entrypoints/openai/orca_metrics.py +120 -0
  218. vllm/entrypoints/openai/parser/__init__.py +0 -0
  219. vllm/entrypoints/openai/parser/harmony_utils.py +825 -0
  220. vllm/entrypoints/openai/parser/responses_parser.py +135 -0
  221. vllm/entrypoints/openai/protocol.py +2496 -0
  222. vllm/entrypoints/openai/run_batch.py +631 -0
  223. vllm/entrypoints/openai/serving_chat.py +1822 -0
  224. vllm/entrypoints/openai/serving_completion.py +729 -0
  225. vllm/entrypoints/openai/serving_engine.py +1542 -0
  226. vllm/entrypoints/openai/serving_models.py +304 -0
  227. vllm/entrypoints/openai/serving_responses.py +2080 -0
  228. vllm/entrypoints/openai/serving_transcription.py +168 -0
  229. vllm/entrypoints/openai/speech_to_text.py +559 -0
  230. vllm/entrypoints/openai/tool_parsers/__init__.py +33 -0
  231. vllm/entrypoints/openai/utils.py +49 -0
  232. vllm/entrypoints/pooling/__init__.py +16 -0
  233. vllm/entrypoints/pooling/classify/__init__.py +0 -0
  234. vllm/entrypoints/pooling/classify/api_router.py +50 -0
  235. vllm/entrypoints/pooling/classify/protocol.py +181 -0
  236. vllm/entrypoints/pooling/classify/serving.py +233 -0
  237. vllm/entrypoints/pooling/embed/__init__.py +0 -0
  238. vllm/entrypoints/pooling/embed/api_router.py +67 -0
  239. vllm/entrypoints/pooling/embed/protocol.py +208 -0
  240. vllm/entrypoints/pooling/embed/serving.py +684 -0
  241. vllm/entrypoints/pooling/pooling/__init__.py +0 -0
  242. vllm/entrypoints/pooling/pooling/api_router.py +63 -0
  243. vllm/entrypoints/pooling/pooling/protocol.py +148 -0
  244. vllm/entrypoints/pooling/pooling/serving.py +354 -0
  245. vllm/entrypoints/pooling/score/__init__.py +0 -0
  246. vllm/entrypoints/pooling/score/api_router.py +149 -0
  247. vllm/entrypoints/pooling/score/protocol.py +146 -0
  248. vllm/entrypoints/pooling/score/serving.py +508 -0
  249. vllm/entrypoints/renderer.py +410 -0
  250. vllm/entrypoints/responses_utils.py +249 -0
  251. vllm/entrypoints/sagemaker/__init__.py +4 -0
  252. vllm/entrypoints/sagemaker/routes.py +118 -0
  253. vllm/entrypoints/score_utils.py +237 -0
  254. vllm/entrypoints/serve/__init__.py +60 -0
  255. vllm/entrypoints/serve/disagg/__init__.py +0 -0
  256. vllm/entrypoints/serve/disagg/api_router.py +110 -0
  257. vllm/entrypoints/serve/disagg/protocol.py +90 -0
  258. vllm/entrypoints/serve/disagg/serving.py +285 -0
  259. vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
  260. vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
  261. vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
  262. vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
  263. vllm/entrypoints/serve/instrumentator/health.py +33 -0
  264. vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
  265. vllm/entrypoints/serve/lora/__init__.py +0 -0
  266. vllm/entrypoints/serve/lora/api_router.py +70 -0
  267. vllm/entrypoints/serve/profile/__init__.py +0 -0
  268. vllm/entrypoints/serve/profile/api_router.py +46 -0
  269. vllm/entrypoints/serve/rlhf/__init__.py +0 -0
  270. vllm/entrypoints/serve/rlhf/api_router.py +102 -0
  271. vllm/entrypoints/serve/sleep/__init__.py +0 -0
  272. vllm/entrypoints/serve/sleep/api_router.py +60 -0
  273. vllm/entrypoints/serve/tokenize/__init__.py +0 -0
  274. vllm/entrypoints/serve/tokenize/api_router.py +118 -0
  275. vllm/entrypoints/serve/tokenize/serving.py +204 -0
  276. vllm/entrypoints/ssl.py +78 -0
  277. vllm/entrypoints/tool.py +187 -0
  278. vllm/entrypoints/tool_server.py +234 -0
  279. vllm/entrypoints/utils.py +319 -0
  280. vllm/env_override.py +378 -0
  281. vllm/envs.py +1744 -0
  282. vllm/forward_context.py +358 -0
  283. vllm/inputs/__init__.py +44 -0
  284. vllm/inputs/data.py +359 -0
  285. vllm/inputs/parse.py +146 -0
  286. vllm/inputs/preprocess.py +717 -0
  287. vllm/logger.py +303 -0
  288. vllm/logging_utils/__init__.py +13 -0
  289. vllm/logging_utils/dump_input.py +83 -0
  290. vllm/logging_utils/formatter.py +127 -0
  291. vllm/logging_utils/lazy.py +20 -0
  292. vllm/logging_utils/log_time.py +34 -0
  293. vllm/logits_process.py +121 -0
  294. vllm/logprobs.py +206 -0
  295. vllm/lora/__init__.py +0 -0
  296. vllm/lora/layers/__init__.py +42 -0
  297. vllm/lora/layers/base.py +66 -0
  298. vllm/lora/layers/base_linear.py +165 -0
  299. vllm/lora/layers/column_parallel_linear.py +577 -0
  300. vllm/lora/layers/fused_moe.py +747 -0
  301. vllm/lora/layers/logits_processor.py +203 -0
  302. vllm/lora/layers/replicated_linear.py +70 -0
  303. vllm/lora/layers/row_parallel_linear.py +176 -0
  304. vllm/lora/layers/utils.py +74 -0
  305. vllm/lora/layers/vocal_parallel_embedding.py +140 -0
  306. vllm/lora/lora_model.py +246 -0
  307. vllm/lora/lora_weights.py +227 -0
  308. vllm/lora/model_manager.py +690 -0
  309. vllm/lora/ops/__init__.py +0 -0
  310. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  311. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  312. vllm/lora/ops/torch_ops/__init__.py +20 -0
  313. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  314. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  315. vllm/lora/ops/triton_ops/__init__.py +21 -0
  316. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +665 -0
  317. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  318. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  319. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  320. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  321. vllm/lora/ops/triton_ops/utils.py +295 -0
  322. vllm/lora/ops/xla_ops/__init__.py +6 -0
  323. vllm/lora/ops/xla_ops/lora_ops.py +141 -0
  324. vllm/lora/peft_helper.py +128 -0
  325. vllm/lora/punica_wrapper/__init__.py +10 -0
  326. vllm/lora/punica_wrapper/punica_base.py +493 -0
  327. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  328. vllm/lora/punica_wrapper/punica_gpu.py +412 -0
  329. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  330. vllm/lora/punica_wrapper/punica_tpu.py +358 -0
  331. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  332. vllm/lora/punica_wrapper/utils.py +150 -0
  333. vllm/lora/request.py +100 -0
  334. vllm/lora/resolver.py +88 -0
  335. vllm/lora/utils.py +315 -0
  336. vllm/lora/worker_manager.py +268 -0
  337. vllm/model_executor/__init__.py +11 -0
  338. vllm/model_executor/custom_op.py +199 -0
  339. vllm/model_executor/layers/__init__.py +0 -0
  340. vllm/model_executor/layers/activation.py +595 -0
  341. vllm/model_executor/layers/attention_layer_base.py +32 -0
  342. vllm/model_executor/layers/batch_invariant.py +1067 -0
  343. vllm/model_executor/layers/conv.py +256 -0
  344. vllm/model_executor/layers/fla/__init__.py +8 -0
  345. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  346. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  347. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  348. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  349. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  350. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  351. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  352. vllm/model_executor/layers/fla/ops/index.py +41 -0
  353. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  354. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  355. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  356. vllm/model_executor/layers/fla/ops/op.py +60 -0
  357. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  358. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  359. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  360. vllm/model_executor/layers/fused_moe/__init__.py +114 -0
  361. vllm/model_executor/layers/fused_moe/all2all_utils.py +171 -0
  362. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +409 -0
  363. vllm/model_executor/layers/fused_moe/config.py +1043 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  624. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  625. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  626. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  627. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  628. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  629. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  630. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  631. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  632. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  633. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  634. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  635. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  636. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  637. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  638. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  639. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +292 -0
  640. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1453 -0
  641. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +358 -0
  642. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
  643. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  644. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +434 -0
  645. vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +376 -0
  646. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
  647. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
  648. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  649. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
  650. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +825 -0
  651. vllm/model_executor/layers/fused_moe/fused_moe.py +2223 -0
  652. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +103 -0
  653. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +119 -0
  654. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +524 -0
  655. vllm/model_executor/layers/fused_moe/layer.py +2133 -0
  656. vllm/model_executor/layers/fused_moe/modular_kernel.py +1302 -0
  657. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
  658. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  659. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  660. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  661. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  662. vllm/model_executor/layers/fused_moe/prepare_finalize.py +78 -0
  663. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
  664. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  665. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
  666. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  667. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
  668. vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
  669. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +455 -0
  670. vllm/model_executor/layers/fused_moe/utils.py +332 -0
  671. vllm/model_executor/layers/kda.py +442 -0
  672. vllm/model_executor/layers/layernorm.py +442 -0
  673. vllm/model_executor/layers/lightning_attn.py +735 -0
  674. vllm/model_executor/layers/linear.py +1424 -0
  675. vllm/model_executor/layers/logits_processor.py +106 -0
  676. vllm/model_executor/layers/mamba/__init__.py +0 -0
  677. vllm/model_executor/layers/mamba/abstract.py +68 -0
  678. vllm/model_executor/layers/mamba/linear_attn.py +388 -0
  679. vllm/model_executor/layers/mamba/mamba_mixer.py +526 -0
  680. vllm/model_executor/layers/mamba/mamba_mixer2.py +930 -0
  681. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  682. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  683. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  684. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  685. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
  686. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  687. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  688. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  689. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  690. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  691. vllm/model_executor/layers/mamba/short_conv.py +255 -0
  692. vllm/model_executor/layers/mla.py +176 -0
  693. vllm/model_executor/layers/pooler.py +830 -0
  694. vllm/model_executor/layers/quantization/__init__.py +179 -0
  695. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  696. vllm/model_executor/layers/quantization/awq.py +277 -0
  697. vllm/model_executor/layers/quantization/awq_marlin.py +793 -0
  698. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  699. vllm/model_executor/layers/quantization/base_config.py +170 -0
  700. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  701. vllm/model_executor/layers/quantization/bitsandbytes.py +626 -0
  702. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  703. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +986 -0
  704. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2645 -0
  705. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
  706. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  707. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  708. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  709. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  710. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  711. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
  712. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  713. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  714. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
  715. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  716. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
  717. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  718. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  719. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  720. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  721. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  722. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  723. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  724. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  725. vllm/model_executor/layers/quantization/cpu_wna16.py +625 -0
  726. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  727. vllm/model_executor/layers/quantization/experts_int8.py +207 -0
  728. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  729. vllm/model_executor/layers/quantization/fp8.py +1461 -0
  730. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  731. vllm/model_executor/layers/quantization/gguf.py +677 -0
  732. vllm/model_executor/layers/quantization/gptq.py +393 -0
  733. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  734. vllm/model_executor/layers/quantization/gptq_marlin.py +932 -0
  735. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  736. vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
  737. vllm/model_executor/layers/quantization/inc.py +65 -0
  738. vllm/model_executor/layers/quantization/input_quant_fp8.py +202 -0
  739. vllm/model_executor/layers/quantization/ipex_quant.py +487 -0
  740. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  741. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  742. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +109 -0
  743. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  744. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  745. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  746. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
  747. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  748. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
  749. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  750. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
  751. vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
  752. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
  753. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +81 -0
  754. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
  755. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
  756. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
  757. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +71 -0
  758. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +106 -0
  759. vllm/model_executor/layers/quantization/kv_cache.py +153 -0
  760. vllm/model_executor/layers/quantization/modelopt.py +1684 -0
  761. vllm/model_executor/layers/quantization/moe_wna16.py +516 -0
  762. vllm/model_executor/layers/quantization/mxfp4.py +1140 -0
  763. vllm/model_executor/layers/quantization/petit.py +319 -0
  764. vllm/model_executor/layers/quantization/ptpc_fp8.py +136 -0
  765. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  766. vllm/model_executor/layers/quantization/quark/quark.py +527 -0
  767. vllm/model_executor/layers/quantization/quark/quark_moe.py +622 -0
  768. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  769. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
  770. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  771. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  772. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  773. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  774. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  775. vllm/model_executor/layers/quantization/rtn.py +621 -0
  776. vllm/model_executor/layers/quantization/schema.py +90 -0
  777. vllm/model_executor/layers/quantization/torchao.py +380 -0
  778. vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
  779. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  780. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  781. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  975. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  976. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  977. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  978. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  979. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  980. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  981. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  982. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  983. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  984. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  985. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  986. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  987. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  988. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  989. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  990. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  991. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  992. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  993. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  994. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  995. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  996. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  997. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +412 -0
  998. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +312 -0
  999. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1453 -0
  1000. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  1001. vllm/model_executor/layers/quantization/utils/int8_utils.py +474 -0
  1002. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  1003. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  1004. vllm/model_executor/layers/quantization/utils/marlin_utils.py +678 -0
  1005. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +452 -0
  1006. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +381 -0
  1007. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
  1008. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  1009. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
  1010. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  1011. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  1012. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  1013. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
  1014. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  1015. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  1016. vllm/model_executor/layers/quantization/utils/quant_utils.py +741 -0
  1017. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
  1018. vllm/model_executor/layers/resampler.py +283 -0
  1019. vllm/model_executor/layers/rotary_embedding/__init__.py +289 -0
  1020. vllm/model_executor/layers/rotary_embedding/base.py +254 -0
  1021. vllm/model_executor/layers/rotary_embedding/common.py +279 -0
  1022. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
  1023. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
  1024. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1025. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1026. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
  1027. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1028. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1029. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
  1030. vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
  1031. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1032. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1033. vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
  1034. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
  1035. vllm/model_executor/layers/utils.py +251 -0
  1036. vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
  1037. vllm/model_executor/model_loader/__init__.py +150 -0
  1038. vllm/model_executor/model_loader/base_loader.py +57 -0
  1039. vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
  1040. vllm/model_executor/model_loader/default_loader.py +321 -0
  1041. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1042. vllm/model_executor/model_loader/gguf_loader.py +371 -0
  1043. vllm/model_executor/model_loader/online_quantization.py +275 -0
  1044. vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
  1045. vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
  1046. vllm/model_executor/model_loader/tensorizer.py +790 -0
  1047. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1048. vllm/model_executor/model_loader/tpu.py +118 -0
  1049. vllm/model_executor/model_loader/utils.py +292 -0
  1050. vllm/model_executor/model_loader/weight_utils.py +1157 -0
  1051. vllm/model_executor/models/__init__.py +44 -0
  1052. vllm/model_executor/models/adapters.py +522 -0
  1053. vllm/model_executor/models/afmoe.py +696 -0
  1054. vllm/model_executor/models/aimv2.py +248 -0
  1055. vllm/model_executor/models/apertus.py +565 -0
  1056. vllm/model_executor/models/arcee.py +428 -0
  1057. vllm/model_executor/models/arctic.py +633 -0
  1058. vllm/model_executor/models/aria.py +653 -0
  1059. vllm/model_executor/models/audioflamingo3.py +639 -0
  1060. vllm/model_executor/models/aya_vision.py +448 -0
  1061. vllm/model_executor/models/bagel.py +584 -0
  1062. vllm/model_executor/models/baichuan.py +493 -0
  1063. vllm/model_executor/models/bailing_moe.py +642 -0
  1064. vllm/model_executor/models/bamba.py +511 -0
  1065. vllm/model_executor/models/bee.py +157 -0
  1066. vllm/model_executor/models/bert.py +925 -0
  1067. vllm/model_executor/models/bert_with_rope.py +732 -0
  1068. vllm/model_executor/models/blip.py +350 -0
  1069. vllm/model_executor/models/blip2.py +693 -0
  1070. vllm/model_executor/models/bloom.py +390 -0
  1071. vllm/model_executor/models/chameleon.py +1095 -0
  1072. vllm/model_executor/models/chatglm.py +502 -0
  1073. vllm/model_executor/models/clip.py +1004 -0
  1074. vllm/model_executor/models/cohere2_vision.py +470 -0
  1075. vllm/model_executor/models/commandr.py +469 -0
  1076. vllm/model_executor/models/config.py +531 -0
  1077. vllm/model_executor/models/dbrx.py +484 -0
  1078. vllm/model_executor/models/deepencoder.py +676 -0
  1079. vllm/model_executor/models/deepseek_eagle.py +252 -0
  1080. vllm/model_executor/models/deepseek_mtp.py +446 -0
  1081. vllm/model_executor/models/deepseek_ocr.py +591 -0
  1082. vllm/model_executor/models/deepseek_v2.py +1710 -0
  1083. vllm/model_executor/models/deepseek_vl2.py +642 -0
  1084. vllm/model_executor/models/dots1.py +565 -0
  1085. vllm/model_executor/models/dots_ocr.py +821 -0
  1086. vllm/model_executor/models/ernie45.py +53 -0
  1087. vllm/model_executor/models/ernie45_moe.py +754 -0
  1088. vllm/model_executor/models/ernie45_vl.py +1621 -0
  1089. vllm/model_executor/models/ernie45_vl_moe.py +800 -0
  1090. vllm/model_executor/models/ernie_mtp.py +279 -0
  1091. vllm/model_executor/models/exaone.py +524 -0
  1092. vllm/model_executor/models/exaone4.py +516 -0
  1093. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1094. vllm/model_executor/models/falcon.py +543 -0
  1095. vllm/model_executor/models/falcon_h1.py +675 -0
  1096. vllm/model_executor/models/flex_olmo.py +155 -0
  1097. vllm/model_executor/models/fuyu.py +371 -0
  1098. vllm/model_executor/models/gemma.py +425 -0
  1099. vllm/model_executor/models/gemma2.py +435 -0
  1100. vllm/model_executor/models/gemma3.py +507 -0
  1101. vllm/model_executor/models/gemma3_mm.py +664 -0
  1102. vllm/model_executor/models/gemma3n.py +1166 -0
  1103. vllm/model_executor/models/gemma3n_mm.py +810 -0
  1104. vllm/model_executor/models/glm.py +24 -0
  1105. vllm/model_executor/models/glm4.py +295 -0
  1106. vllm/model_executor/models/glm4_1v.py +1808 -0
  1107. vllm/model_executor/models/glm4_moe.py +736 -0
  1108. vllm/model_executor/models/glm4_moe_mtp.py +359 -0
  1109. vllm/model_executor/models/glm4v.py +783 -0
  1110. vllm/model_executor/models/gpt2.py +397 -0
  1111. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1112. vllm/model_executor/models/gpt_j.py +346 -0
  1113. vllm/model_executor/models/gpt_neox.py +340 -0
  1114. vllm/model_executor/models/gpt_oss.py +744 -0
  1115. vllm/model_executor/models/granite.py +475 -0
  1116. vllm/model_executor/models/granite_speech.py +912 -0
  1117. vllm/model_executor/models/granitemoe.py +560 -0
  1118. vllm/model_executor/models/granitemoehybrid.py +703 -0
  1119. vllm/model_executor/models/granitemoeshared.py +328 -0
  1120. vllm/model_executor/models/gritlm.py +243 -0
  1121. vllm/model_executor/models/grok1.py +554 -0
  1122. vllm/model_executor/models/h2ovl.py +554 -0
  1123. vllm/model_executor/models/hunyuan_v1.py +1040 -0
  1124. vllm/model_executor/models/hunyuan_vision.py +1034 -0
  1125. vllm/model_executor/models/hyperclovax_vision.py +1164 -0
  1126. vllm/model_executor/models/idefics2_vision_model.py +427 -0
  1127. vllm/model_executor/models/idefics3.py +716 -0
  1128. vllm/model_executor/models/interfaces.py +1179 -0
  1129. vllm/model_executor/models/interfaces_base.py +228 -0
  1130. vllm/model_executor/models/intern_vit.py +454 -0
  1131. vllm/model_executor/models/internlm2.py +453 -0
  1132. vllm/model_executor/models/internlm2_ve.py +139 -0
  1133. vllm/model_executor/models/interns1.py +828 -0
  1134. vllm/model_executor/models/interns1_vit.py +433 -0
  1135. vllm/model_executor/models/internvl.py +1450 -0
  1136. vllm/model_executor/models/jais.py +397 -0
  1137. vllm/model_executor/models/jais2.py +529 -0
  1138. vllm/model_executor/models/jamba.py +609 -0
  1139. vllm/model_executor/models/jina_vl.py +147 -0
  1140. vllm/model_executor/models/keye.py +1706 -0
  1141. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1142. vllm/model_executor/models/kimi_linear.py +658 -0
  1143. vllm/model_executor/models/kimi_vl.py +576 -0
  1144. vllm/model_executor/models/lfm2.py +515 -0
  1145. vllm/model_executor/models/lfm2_moe.py +745 -0
  1146. vllm/model_executor/models/lightonocr.py +195 -0
  1147. vllm/model_executor/models/llama.py +700 -0
  1148. vllm/model_executor/models/llama4.py +856 -0
  1149. vllm/model_executor/models/llama4_eagle.py +225 -0
  1150. vllm/model_executor/models/llama_eagle.py +213 -0
  1151. vllm/model_executor/models/llama_eagle3.py +375 -0
  1152. vllm/model_executor/models/llava.py +840 -0
  1153. vllm/model_executor/models/llava_next.py +581 -0
  1154. vllm/model_executor/models/llava_next_video.py +465 -0
  1155. vllm/model_executor/models/llava_onevision.py +921 -0
  1156. vllm/model_executor/models/longcat_flash.py +743 -0
  1157. vllm/model_executor/models/longcat_flash_mtp.py +349 -0
  1158. vllm/model_executor/models/mamba.py +276 -0
  1159. vllm/model_executor/models/mamba2.py +288 -0
  1160. vllm/model_executor/models/medusa.py +179 -0
  1161. vllm/model_executor/models/midashenglm.py +826 -0
  1162. vllm/model_executor/models/mimo.py +188 -0
  1163. vllm/model_executor/models/mimo_mtp.py +294 -0
  1164. vllm/model_executor/models/minicpm.py +656 -0
  1165. vllm/model_executor/models/minicpm3.py +233 -0
  1166. vllm/model_executor/models/minicpm_eagle.py +385 -0
  1167. vllm/model_executor/models/minicpmo.py +768 -0
  1168. vllm/model_executor/models/minicpmv.py +1742 -0
  1169. vllm/model_executor/models/minimax_m2.py +550 -0
  1170. vllm/model_executor/models/minimax_text_01.py +1007 -0
  1171. vllm/model_executor/models/minimax_vl_01.py +394 -0
  1172. vllm/model_executor/models/mistral3.py +635 -0
  1173. vllm/model_executor/models/mistral_large_3.py +63 -0
  1174. vllm/model_executor/models/mistral_large_3_eagle.py +136 -0
  1175. vllm/model_executor/models/mixtral.py +598 -0
  1176. vllm/model_executor/models/mllama4.py +1149 -0
  1177. vllm/model_executor/models/mlp_speculator.py +235 -0
  1178. vllm/model_executor/models/modernbert.py +451 -0
  1179. vllm/model_executor/models/module_mapping.py +74 -0
  1180. vllm/model_executor/models/molmo.py +1550 -0
  1181. vllm/model_executor/models/moonvit.py +686 -0
  1182. vllm/model_executor/models/mpt.py +335 -0
  1183. vllm/model_executor/models/nano_nemotron_vl.py +1730 -0
  1184. vllm/model_executor/models/nemotron.py +499 -0
  1185. vllm/model_executor/models/nemotron_h.py +900 -0
  1186. vllm/model_executor/models/nemotron_nas.py +471 -0
  1187. vllm/model_executor/models/nemotron_vl.py +651 -0
  1188. vllm/model_executor/models/nvlm_d.py +216 -0
  1189. vllm/model_executor/models/olmo.py +412 -0
  1190. vllm/model_executor/models/olmo2.py +454 -0
  1191. vllm/model_executor/models/olmoe.py +493 -0
  1192. vllm/model_executor/models/opencua.py +262 -0
  1193. vllm/model_executor/models/openpangu.py +1049 -0
  1194. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1195. vllm/model_executor/models/opt.py +426 -0
  1196. vllm/model_executor/models/orion.py +365 -0
  1197. vllm/model_executor/models/ouro.py +507 -0
  1198. vllm/model_executor/models/ovis.py +557 -0
  1199. vllm/model_executor/models/ovis2_5.py +661 -0
  1200. vllm/model_executor/models/paddleocr_vl.py +1300 -0
  1201. vllm/model_executor/models/paligemma.py +408 -0
  1202. vllm/model_executor/models/persimmon.py +373 -0
  1203. vllm/model_executor/models/phi.py +363 -0
  1204. vllm/model_executor/models/phi3.py +18 -0
  1205. vllm/model_executor/models/phi3v.py +729 -0
  1206. vllm/model_executor/models/phi4mm.py +1251 -0
  1207. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1208. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1209. vllm/model_executor/models/phimoe.py +669 -0
  1210. vllm/model_executor/models/pixtral.py +1379 -0
  1211. vllm/model_executor/models/plamo2.py +965 -0
  1212. vllm/model_executor/models/plamo3.py +440 -0
  1213. vllm/model_executor/models/qwen.py +365 -0
  1214. vllm/model_executor/models/qwen2.py +600 -0
  1215. vllm/model_executor/models/qwen2_5_omni_thinker.py +1219 -0
  1216. vllm/model_executor/models/qwen2_5_vl.py +1569 -0
  1217. vllm/model_executor/models/qwen2_audio.py +471 -0
  1218. vllm/model_executor/models/qwen2_moe.py +597 -0
  1219. vllm/model_executor/models/qwen2_rm.py +123 -0
  1220. vllm/model_executor/models/qwen2_vl.py +1568 -0
  1221. vllm/model_executor/models/qwen3.py +331 -0
  1222. vllm/model_executor/models/qwen3_moe.py +751 -0
  1223. vllm/model_executor/models/qwen3_next.py +1395 -0
  1224. vllm/model_executor/models/qwen3_next_mtp.py +296 -0
  1225. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1793 -0
  1226. vllm/model_executor/models/qwen3_vl.py +2092 -0
  1227. vllm/model_executor/models/qwen3_vl_moe.py +474 -0
  1228. vllm/model_executor/models/qwen_vl.py +801 -0
  1229. vllm/model_executor/models/radio.py +555 -0
  1230. vllm/model_executor/models/registry.py +1189 -0
  1231. vllm/model_executor/models/roberta.py +259 -0
  1232. vllm/model_executor/models/rvl.py +107 -0
  1233. vllm/model_executor/models/seed_oss.py +492 -0
  1234. vllm/model_executor/models/siglip.py +1244 -0
  1235. vllm/model_executor/models/siglip2navit.py +658 -0
  1236. vllm/model_executor/models/skyworkr1v.py +951 -0
  1237. vllm/model_executor/models/smolvlm.py +38 -0
  1238. vllm/model_executor/models/solar.py +484 -0
  1239. vllm/model_executor/models/stablelm.py +354 -0
  1240. vllm/model_executor/models/starcoder2.py +365 -0
  1241. vllm/model_executor/models/step3_text.py +554 -0
  1242. vllm/model_executor/models/step3_vl.py +1147 -0
  1243. vllm/model_executor/models/swin.py +514 -0
  1244. vllm/model_executor/models/tarsier.py +617 -0
  1245. vllm/model_executor/models/telechat2.py +153 -0
  1246. vllm/model_executor/models/teleflm.py +78 -0
  1247. vllm/model_executor/models/terratorch.py +318 -0
  1248. vllm/model_executor/models/transformers/__init__.py +127 -0
  1249. vllm/model_executor/models/transformers/base.py +518 -0
  1250. vllm/model_executor/models/transformers/causal.py +65 -0
  1251. vllm/model_executor/models/transformers/legacy.py +90 -0
  1252. vllm/model_executor/models/transformers/moe.py +325 -0
  1253. vllm/model_executor/models/transformers/multimodal.py +411 -0
  1254. vllm/model_executor/models/transformers/pooling.py +119 -0
  1255. vllm/model_executor/models/transformers/utils.py +213 -0
  1256. vllm/model_executor/models/ultravox.py +766 -0
  1257. vllm/model_executor/models/utils.py +832 -0
  1258. vllm/model_executor/models/vision.py +546 -0
  1259. vllm/model_executor/models/voxtral.py +841 -0
  1260. vllm/model_executor/models/whisper.py +971 -0
  1261. vllm/model_executor/models/zamba2.py +979 -0
  1262. vllm/model_executor/parameter.py +642 -0
  1263. vllm/model_executor/utils.py +119 -0
  1264. vllm/model_executor/warmup/__init__.py +0 -0
  1265. vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
  1266. vllm/model_executor/warmup/kernel_warmup.py +98 -0
  1267. vllm/multimodal/__init__.py +40 -0
  1268. vllm/multimodal/audio.py +147 -0
  1269. vllm/multimodal/base.py +56 -0
  1270. vllm/multimodal/cache.py +823 -0
  1271. vllm/multimodal/evs.py +294 -0
  1272. vllm/multimodal/hasher.py +120 -0
  1273. vllm/multimodal/image.py +142 -0
  1274. vllm/multimodal/inputs.py +1089 -0
  1275. vllm/multimodal/parse.py +565 -0
  1276. vllm/multimodal/processing.py +2240 -0
  1277. vllm/multimodal/profiling.py +351 -0
  1278. vllm/multimodal/registry.py +357 -0
  1279. vllm/multimodal/utils.py +513 -0
  1280. vllm/multimodal/video.py +340 -0
  1281. vllm/outputs.py +345 -0
  1282. vllm/platforms/__init__.py +277 -0
  1283. vllm/platforms/cpu.py +421 -0
  1284. vllm/platforms/cuda.py +618 -0
  1285. vllm/platforms/interface.py +695 -0
  1286. vllm/platforms/rocm.py +564 -0
  1287. vllm/platforms/tpu.py +295 -0
  1288. vllm/platforms/xpu.py +277 -0
  1289. vllm/plugins/__init__.py +81 -0
  1290. vllm/plugins/io_processors/__init__.py +68 -0
  1291. vllm/plugins/io_processors/interface.py +77 -0
  1292. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1293. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1294. vllm/pooling_params.py +230 -0
  1295. vllm/profiler/__init__.py +0 -0
  1296. vllm/profiler/layerwise_profile.py +392 -0
  1297. vllm/profiler/utils.py +151 -0
  1298. vllm/profiler/wrapper.py +241 -0
  1299. vllm/py.typed +2 -0
  1300. vllm/ray/__init__.py +0 -0
  1301. vllm/ray/lazy_utils.py +30 -0
  1302. vllm/ray/ray_env.py +79 -0
  1303. vllm/reasoning/__init__.py +96 -0
  1304. vllm/reasoning/abs_reasoning_parsers.py +318 -0
  1305. vllm/reasoning/basic_parsers.py +175 -0
  1306. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1307. vllm/reasoning/deepseek_v3_reasoning_parser.py +67 -0
  1308. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1309. vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
  1310. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1311. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1312. vllm/reasoning/holo2_reasoning_parser.py +88 -0
  1313. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1314. vllm/reasoning/identity_reasoning_parser.py +63 -0
  1315. vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
  1316. vllm/reasoning/mistral_reasoning_parser.py +154 -0
  1317. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1318. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1319. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1320. vllm/reasoning/step3_reasoning_parser.py +107 -0
  1321. vllm/sampling_params.py +597 -0
  1322. vllm/scalar_type.py +355 -0
  1323. vllm/scripts.py +17 -0
  1324. vllm/sequence.py +98 -0
  1325. vllm/tasks.py +13 -0
  1326. vllm/third_party/__init__.py +0 -0
  1327. vllm/third_party/pynvml.py +6140 -0
  1328. vllm/tokenizers/__init__.py +20 -0
  1329. vllm/tokenizers/deepseek_v32.py +175 -0
  1330. vllm/tokenizers/deepseek_v32_encoding.py +459 -0
  1331. vllm/tokenizers/detokenizer_utils.py +198 -0
  1332. vllm/tokenizers/hf.py +119 -0
  1333. vllm/tokenizers/mistral.py +567 -0
  1334. vllm/tokenizers/protocol.py +114 -0
  1335. vllm/tokenizers/registry.py +233 -0
  1336. vllm/tool_parsers/__init__.py +150 -0
  1337. vllm/tool_parsers/abstract_tool_parser.py +273 -0
  1338. vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
  1339. vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
  1340. vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
  1341. vllm/tool_parsers/ernie45_tool_parser.py +210 -0
  1342. vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
  1343. vllm/tool_parsers/glm4_moe_tool_parser.py +200 -0
  1344. vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  1345. vllm/tool_parsers/granite_tool_parser.py +253 -0
  1346. vllm/tool_parsers/hermes_tool_parser.py +495 -0
  1347. vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  1348. vllm/tool_parsers/internlm2_tool_parser.py +227 -0
  1349. vllm/tool_parsers/jamba_tool_parser.py +323 -0
  1350. vllm/tool_parsers/kimi_k2_tool_parser.py +590 -0
  1351. vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  1352. vllm/tool_parsers/llama_tool_parser.py +324 -0
  1353. vllm/tool_parsers/longcat_tool_parser.py +37 -0
  1354. vllm/tool_parsers/minimax_m2_tool_parser.py +643 -0
  1355. vllm/tool_parsers/minimax_tool_parser.py +849 -0
  1356. vllm/tool_parsers/mistral_tool_parser.py +585 -0
  1357. vllm/tool_parsers/olmo3_tool_parser.py +366 -0
  1358. vllm/tool_parsers/openai_tool_parser.py +102 -0
  1359. vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
  1360. vllm/tool_parsers/pythonic_tool_parser.py +332 -0
  1361. vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
  1362. vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  1363. vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
  1364. vllm/tool_parsers/step3_tool_parser.py +303 -0
  1365. vllm/tool_parsers/utils.py +229 -0
  1366. vllm/tool_parsers/xlam_tool_parser.py +556 -0
  1367. vllm/tracing.py +135 -0
  1368. vllm/transformers_utils/__init__.py +26 -0
  1369. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1370. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1371. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1372. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1373. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1374. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1375. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1376. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1377. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1378. vllm/transformers_utils/config.py +1144 -0
  1379. vllm/transformers_utils/config_parser_base.py +20 -0
  1380. vllm/transformers_utils/configs/__init__.py +102 -0
  1381. vllm/transformers_utils/configs/afmoe.py +87 -0
  1382. vllm/transformers_utils/configs/arctic.py +216 -0
  1383. vllm/transformers_utils/configs/bagel.py +53 -0
  1384. vllm/transformers_utils/configs/chatglm.py +75 -0
  1385. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1386. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1387. vllm/transformers_utils/configs/eagle.py +90 -0
  1388. vllm/transformers_utils/configs/falcon.py +89 -0
  1389. vllm/transformers_utils/configs/flex_olmo.py +82 -0
  1390. vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
  1391. vllm/transformers_utils/configs/jais.py +243 -0
  1392. vllm/transformers_utils/configs/kimi_linear.py +148 -0
  1393. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1394. vllm/transformers_utils/configs/lfm2_moe.py +163 -0
  1395. vllm/transformers_utils/configs/medusa.py +65 -0
  1396. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1397. vllm/transformers_utils/configs/mistral.py +235 -0
  1398. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1399. vllm/transformers_utils/configs/moonvit.py +33 -0
  1400. vllm/transformers_utils/configs/nemotron.py +220 -0
  1401. vllm/transformers_utils/configs/nemotron_h.py +284 -0
  1402. vllm/transformers_utils/configs/olmo3.py +83 -0
  1403. vllm/transformers_utils/configs/ovis.py +182 -0
  1404. vllm/transformers_utils/configs/qwen3_next.py +277 -0
  1405. vllm/transformers_utils/configs/radio.py +89 -0
  1406. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1407. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1408. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1409. vllm/transformers_utils/configs/step3_vl.py +178 -0
  1410. vllm/transformers_utils/configs/tarsier2.py +24 -0
  1411. vllm/transformers_utils/configs/ultravox.py +120 -0
  1412. vllm/transformers_utils/dynamic_module.py +59 -0
  1413. vllm/transformers_utils/gguf_utils.py +280 -0
  1414. vllm/transformers_utils/processor.py +424 -0
  1415. vllm/transformers_utils/processors/__init__.py +25 -0
  1416. vllm/transformers_utils/processors/bagel.py +73 -0
  1417. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1418. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1419. vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
  1420. vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
  1421. vllm/transformers_utils/processors/ovis.py +453 -0
  1422. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1423. vllm/transformers_utils/repo_utils.py +287 -0
  1424. vllm/transformers_utils/runai_utils.py +102 -0
  1425. vllm/transformers_utils/s3_utils.py +95 -0
  1426. vllm/transformers_utils/tokenizer.py +127 -0
  1427. vllm/transformers_utils/tokenizer_base.py +33 -0
  1428. vllm/transformers_utils/utils.py +112 -0
  1429. vllm/triton_utils/__init__.py +20 -0
  1430. vllm/triton_utils/importing.py +103 -0
  1431. vllm/usage/__init__.py +0 -0
  1432. vllm/usage/usage_lib.py +294 -0
  1433. vllm/utils/__init__.py +66 -0
  1434. vllm/utils/argparse_utils.py +492 -0
  1435. vllm/utils/async_utils.py +310 -0
  1436. vllm/utils/cache.py +214 -0
  1437. vllm/utils/collection_utils.py +112 -0
  1438. vllm/utils/counter.py +45 -0
  1439. vllm/utils/deep_gemm.py +400 -0
  1440. vllm/utils/flashinfer.py +528 -0
  1441. vllm/utils/func_utils.py +236 -0
  1442. vllm/utils/gc_utils.py +151 -0
  1443. vllm/utils/hashing.py +117 -0
  1444. vllm/utils/import_utils.py +449 -0
  1445. vllm/utils/jsontree.py +158 -0
  1446. vllm/utils/math_utils.py +32 -0
  1447. vllm/utils/mem_constants.py +13 -0
  1448. vllm/utils/mem_utils.py +232 -0
  1449. vllm/utils/nccl.py +64 -0
  1450. vllm/utils/network_utils.py +331 -0
  1451. vllm/utils/nvtx_pytorch_hooks.py +286 -0
  1452. vllm/utils/platform_utils.py +59 -0
  1453. vllm/utils/profiling.py +56 -0
  1454. vllm/utils/registry.py +51 -0
  1455. vllm/utils/serial_utils.py +214 -0
  1456. vllm/utils/system_utils.py +269 -0
  1457. vllm/utils/tensor_schema.py +255 -0
  1458. vllm/utils/torch_utils.py +648 -0
  1459. vllm/v1/__init__.py +0 -0
  1460. vllm/v1/attention/__init__.py +0 -0
  1461. vllm/v1/attention/backends/__init__.py +0 -0
  1462. vllm/v1/attention/backends/cpu_attn.py +497 -0
  1463. vllm/v1/attention/backends/flash_attn.py +1051 -0
  1464. vllm/v1/attention/backends/flashinfer.py +1575 -0
  1465. vllm/v1/attention/backends/flex_attention.py +1028 -0
  1466. vllm/v1/attention/backends/gdn_attn.py +375 -0
  1467. vllm/v1/attention/backends/linear_attn.py +77 -0
  1468. vllm/v1/attention/backends/mamba1_attn.py +159 -0
  1469. vllm/v1/attention/backends/mamba2_attn.py +348 -0
  1470. vllm/v1/attention/backends/mamba_attn.py +117 -0
  1471. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1472. vllm/v1/attention/backends/mla/aiter_triton_mla.py +74 -0
  1473. vllm/v1/attention/backends/mla/common.py +2114 -0
  1474. vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
  1475. vllm/v1/attention/backends/mla/flashattn_mla.py +342 -0
  1476. vllm/v1/attention/backends/mla/flashinfer_mla.py +174 -0
  1477. vllm/v1/attention/backends/mla/flashmla.py +317 -0
  1478. vllm/v1/attention/backends/mla/flashmla_sparse.py +1020 -0
  1479. vllm/v1/attention/backends/mla/indexer.py +345 -0
  1480. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +275 -0
  1481. vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +325 -0
  1482. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1483. vllm/v1/attention/backends/pallas.py +436 -0
  1484. vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
  1485. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
  1486. vllm/v1/attention/backends/rocm_attn.py +359 -0
  1487. vllm/v1/attention/backends/short_conv_attn.py +104 -0
  1488. vllm/v1/attention/backends/tree_attn.py +428 -0
  1489. vllm/v1/attention/backends/triton_attn.py +497 -0
  1490. vllm/v1/attention/backends/utils.py +1212 -0
  1491. vllm/v1/core/__init__.py +0 -0
  1492. vllm/v1/core/block_pool.py +485 -0
  1493. vllm/v1/core/encoder_cache_manager.py +402 -0
  1494. vllm/v1/core/kv_cache_coordinator.py +570 -0
  1495. vllm/v1/core/kv_cache_manager.py +419 -0
  1496. vllm/v1/core/kv_cache_metrics.py +96 -0
  1497. vllm/v1/core/kv_cache_utils.py +1476 -0
  1498. vllm/v1/core/sched/__init__.py +0 -0
  1499. vllm/v1/core/sched/async_scheduler.py +68 -0
  1500. vllm/v1/core/sched/interface.py +189 -0
  1501. vllm/v1/core/sched/output.py +230 -0
  1502. vllm/v1/core/sched/request_queue.py +217 -0
  1503. vllm/v1/core/sched/scheduler.py +1826 -0
  1504. vllm/v1/core/sched/utils.py +64 -0
  1505. vllm/v1/core/single_type_kv_cache_manager.py +801 -0
  1506. vllm/v1/cudagraph_dispatcher.py +183 -0
  1507. vllm/v1/engine/__init__.py +217 -0
  1508. vllm/v1/engine/async_llm.py +866 -0
  1509. vllm/v1/engine/coordinator.py +377 -0
  1510. vllm/v1/engine/core.py +1455 -0
  1511. vllm/v1/engine/core_client.py +1416 -0
  1512. vllm/v1/engine/detokenizer.py +351 -0
  1513. vllm/v1/engine/exceptions.py +18 -0
  1514. vllm/v1/engine/input_processor.py +643 -0
  1515. vllm/v1/engine/llm_engine.py +414 -0
  1516. vllm/v1/engine/logprobs.py +189 -0
  1517. vllm/v1/engine/output_processor.py +659 -0
  1518. vllm/v1/engine/parallel_sampling.py +145 -0
  1519. vllm/v1/engine/processor.py +20 -0
  1520. vllm/v1/engine/utils.py +1068 -0
  1521. vllm/v1/executor/__init__.py +6 -0
  1522. vllm/v1/executor/abstract.py +352 -0
  1523. vllm/v1/executor/multiproc_executor.py +890 -0
  1524. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1525. vllm/v1/executor/ray_executor.py +626 -0
  1526. vllm/v1/executor/ray_utils.py +465 -0
  1527. vllm/v1/executor/uniproc_executor.py +186 -0
  1528. vllm/v1/kv_cache_interface.py +404 -0
  1529. vllm/v1/kv_offload/__init__.py +0 -0
  1530. vllm/v1/kv_offload/abstract.py +161 -0
  1531. vllm/v1/kv_offload/arc_manager.py +237 -0
  1532. vllm/v1/kv_offload/backend.py +97 -0
  1533. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1534. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1535. vllm/v1/kv_offload/cpu.py +86 -0
  1536. vllm/v1/kv_offload/factory.py +56 -0
  1537. vllm/v1/kv_offload/lru_manager.py +139 -0
  1538. vllm/v1/kv_offload/mediums.py +39 -0
  1539. vllm/v1/kv_offload/spec.py +66 -0
  1540. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1541. vllm/v1/kv_offload/worker/cpu_gpu.py +280 -0
  1542. vllm/v1/kv_offload/worker/worker.py +144 -0
  1543. vllm/v1/metrics/__init__.py +0 -0
  1544. vllm/v1/metrics/loggers.py +1305 -0
  1545. vllm/v1/metrics/prometheus.py +82 -0
  1546. vllm/v1/metrics/ray_wrappers.py +194 -0
  1547. vllm/v1/metrics/reader.py +257 -0
  1548. vllm/v1/metrics/stats.py +437 -0
  1549. vllm/v1/outputs.py +245 -0
  1550. vllm/v1/pool/__init__.py +0 -0
  1551. vllm/v1/pool/metadata.py +126 -0
  1552. vllm/v1/request.py +282 -0
  1553. vllm/v1/sample/__init__.py +0 -0
  1554. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1555. vllm/v1/sample/logits_processor/builtin.py +278 -0
  1556. vllm/v1/sample/logits_processor/interface.py +106 -0
  1557. vllm/v1/sample/logits_processor/state.py +165 -0
  1558. vllm/v1/sample/metadata.py +44 -0
  1559. vllm/v1/sample/ops/__init__.py +0 -0
  1560. vllm/v1/sample/ops/bad_words.py +52 -0
  1561. vllm/v1/sample/ops/logprobs.py +25 -0
  1562. vllm/v1/sample/ops/penalties.py +57 -0
  1563. vllm/v1/sample/ops/topk_topp_sampler.py +384 -0
  1564. vllm/v1/sample/rejection_sampler.py +805 -0
  1565. vllm/v1/sample/sampler.py +319 -0
  1566. vllm/v1/sample/tpu/__init__.py +0 -0
  1567. vllm/v1/sample/tpu/metadata.py +120 -0
  1568. vllm/v1/sample/tpu/sampler.py +215 -0
  1569. vllm/v1/serial_utils.py +514 -0
  1570. vllm/v1/spec_decode/__init__.py +0 -0
  1571. vllm/v1/spec_decode/eagle.py +1331 -0
  1572. vllm/v1/spec_decode/medusa.py +73 -0
  1573. vllm/v1/spec_decode/metadata.py +66 -0
  1574. vllm/v1/spec_decode/metrics.py +225 -0
  1575. vllm/v1/spec_decode/ngram_proposer.py +291 -0
  1576. vllm/v1/spec_decode/suffix_decoding.py +101 -0
  1577. vllm/v1/spec_decode/utils.py +121 -0
  1578. vllm/v1/structured_output/__init__.py +353 -0
  1579. vllm/v1/structured_output/backend_guidance.py +265 -0
  1580. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1581. vllm/v1/structured_output/backend_outlines.py +324 -0
  1582. vllm/v1/structured_output/backend_types.py +136 -0
  1583. vllm/v1/structured_output/backend_xgrammar.py +378 -0
  1584. vllm/v1/structured_output/request.py +94 -0
  1585. vllm/v1/structured_output/utils.py +469 -0
  1586. vllm/v1/utils.py +414 -0
  1587. vllm/v1/worker/__init__.py +0 -0
  1588. vllm/v1/worker/block_table.py +343 -0
  1589. vllm/v1/worker/cp_utils.py +42 -0
  1590. vllm/v1/worker/cpu_model_runner.py +122 -0
  1591. vllm/v1/worker/cpu_worker.py +192 -0
  1592. vllm/v1/worker/dp_utils.py +240 -0
  1593. vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
  1594. vllm/v1/worker/gpu/README.md +4 -0
  1595. vllm/v1/worker/gpu/__init__.py +0 -0
  1596. vllm/v1/worker/gpu/async_utils.py +98 -0
  1597. vllm/v1/worker/gpu/attn_utils.py +189 -0
  1598. vllm/v1/worker/gpu/block_table.py +314 -0
  1599. vllm/v1/worker/gpu/cudagraph_utils.py +259 -0
  1600. vllm/v1/worker/gpu/dp_utils.py +31 -0
  1601. vllm/v1/worker/gpu/input_batch.py +479 -0
  1602. vllm/v1/worker/gpu/metrics/__init__.py +0 -0
  1603. vllm/v1/worker/gpu/metrics/logits.py +42 -0
  1604. vllm/v1/worker/gpu/model_runner.py +1006 -0
  1605. vllm/v1/worker/gpu/sample/__init__.py +0 -0
  1606. vllm/v1/worker/gpu/sample/gumbel.py +101 -0
  1607. vllm/v1/worker/gpu/sample/logprob.py +167 -0
  1608. vllm/v1/worker/gpu/sample/metadata.py +192 -0
  1609. vllm/v1/worker/gpu/sample/min_p.py +51 -0
  1610. vllm/v1/worker/gpu/sample/output.py +14 -0
  1611. vllm/v1/worker/gpu/sample/penalties.py +155 -0
  1612. vllm/v1/worker/gpu/sample/sampler.py +87 -0
  1613. vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
  1614. vllm/v1/worker/gpu/spec_decode/eagle.py +565 -0
  1615. vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
  1616. vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
  1617. vllm/v1/worker/gpu/states.py +316 -0
  1618. vllm/v1/worker/gpu/structured_outputs.py +76 -0
  1619. vllm/v1/worker/gpu_input_batch.py +990 -0
  1620. vllm/v1/worker/gpu_model_runner.py +5470 -0
  1621. vllm/v1/worker/gpu_ubatch_wrapper.py +472 -0
  1622. vllm/v1/worker/gpu_worker.py +955 -0
  1623. vllm/v1/worker/kv_connector_model_runner_mixin.py +302 -0
  1624. vllm/v1/worker/lora_model_runner_mixin.py +212 -0
  1625. vllm/v1/worker/tpu_input_batch.py +583 -0
  1626. vllm/v1/worker/tpu_model_runner.py +2191 -0
  1627. vllm/v1/worker/tpu_worker.py +352 -0
  1628. vllm/v1/worker/ubatch_utils.py +109 -0
  1629. vllm/v1/worker/ubatching.py +231 -0
  1630. vllm/v1/worker/utils.py +375 -0
  1631. vllm/v1/worker/worker_base.py +377 -0
  1632. vllm/v1/worker/workspace.py +253 -0
  1633. vllm/v1/worker/xpu_model_runner.py +48 -0
  1634. vllm/v1/worker/xpu_worker.py +174 -0
  1635. vllm/version.py +39 -0
  1636. vllm/vllm_flash_attn/.gitkeep +0 -0
  1637. vllm_cpu_avx512vnni-0.13.0.dist-info/METADATA +339 -0
  1638. vllm_cpu_avx512vnni-0.13.0.dist-info/RECORD +1641 -0
  1639. vllm_cpu_avx512vnni-0.13.0.dist-info/WHEEL +5 -0
  1640. vllm_cpu_avx512vnni-0.13.0.dist-info/entry_points.txt +5 -0
  1641. vllm_cpu_avx512vnni-0.13.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2191 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import bisect
4
+ import gc
5
+ import time
6
+ from typing import TYPE_CHECKING, Any, cast
7
+ from unittest.mock import patch
8
+
9
+ import numpy as np
10
+ import torch
11
+ import torch.nn as nn
12
+
13
+ # TPU XLA related
14
+ import torch_xla
15
+ import torch_xla.core.xla_model as xm
16
+ import torch_xla.distributed.spmd as xs
17
+ import torch_xla.runtime as xr
18
+
19
+ import vllm.envs as envs
20
+ from vllm.attention.backends.abstract import AttentionType
21
+ from vllm.attention.layer import Attention, MLAAttention
22
+ from vllm.attention.layers.chunked_local_attention import ChunkedLocalAttention
23
+ from vllm.compilation.wrapper import TorchCompileWithNoGuardsWrapper
24
+ from vllm.config import (
25
+ ParallelConfig,
26
+ VllmConfig,
27
+ get_layers_from_vllm_config,
28
+ update_config,
29
+ )
30
+ from vllm.distributed.kv_transfer import get_kv_transfer_group, has_kv_transfer_group
31
+ from vllm.distributed.kv_transfer.kv_connector.utils import copy_kv_blocks
32
+ from vllm.forward_context import set_forward_context
33
+ from vllm.logger import init_logger
34
+ from vllm.lora.layers import BaseLayerWithLoRA
35
+ from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
36
+ from vllm.model_executor.model_loader import get_model_loader
37
+ from vllm.model_executor.model_loader.tpu import TPUModelLoader
38
+ from vllm.model_executor.models.interfaces import (
39
+ SupportsMultiModal,
40
+ supports_transcription,
41
+ )
42
+ from vllm.model_executor.models.interfaces_base import (
43
+ is_pooling_model,
44
+ is_text_generation_model,
45
+ )
46
+ from vllm.multimodal import MULTIMODAL_REGISTRY
47
+ from vllm.multimodal.inputs import (
48
+ BatchedTensorInputs,
49
+ MultiModalKwargsItem,
50
+ PlaceholderRange,
51
+ )
52
+ from vllm.multimodal.utils import group_mm_kwargs_by_modality
53
+ from vllm.sequence import IntermediateTensors
54
+ from vllm.tasks import GenerationTask, PoolingTask, SupportedTask
55
+ from vllm.utils.math_utils import cdiv, prev_power_of_2
56
+ from vllm.utils.platform_utils import is_pin_memory_available
57
+ from vllm.v1.attention.backends.pallas import (
58
+ TPU_STR_DTYPE_TO_TORCH_DTYPE,
59
+ PallasAttentionBackend,
60
+ PallasMetadata,
61
+ get_page_size_bytes,
62
+ )
63
+ from vllm.v1.kv_cache_interface import (
64
+ AttentionSpec,
65
+ FullAttentionSpec,
66
+ KVCacheConfig,
67
+ KVCacheSpec,
68
+ MLAAttentionSpec,
69
+ SlidingWindowSpec,
70
+ )
71
+ from vllm.v1.outputs import (
72
+ EMPTY_MODEL_RUNNER_OUTPUT,
73
+ LogprobsLists,
74
+ LogprobsTensors,
75
+ ModelRunnerOutput,
76
+ )
77
+ from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata
78
+ from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler
79
+ from vllm.v1.worker.kv_connector_model_runner_mixin import (
80
+ KVConnectorModelRunnerMixin,
81
+ KVConnectorOutput,
82
+ )
83
+ from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
84
+ from vllm.v1.worker.tpu_input_batch import CachedRequestState, InputBatch
85
+
86
+ from .utils import (
87
+ MultiModalBudget,
88
+ add_kv_sharing_layers_to_kv_cache_groups,
89
+ bind_kv_cache,
90
+ sanity_check_mm_encoder_outputs,
91
+ )
92
+
93
+ if TYPE_CHECKING:
94
+ from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput
95
+
96
+ logger = init_logger(__name__)
97
+
98
+ INVALID_TOKEN_ID = -1
99
+ # Smallest output size
100
+ MIN_NUM_SEQS = 8
101
+
102
+
103
+ #########################################################
104
+ # Ways to avoid recompilation
105
+ #########################################################
106
+ #
107
+ # The model executor has two primary components:
108
+ # 1. preparing the model and sampler inputs
109
+ # 2. executing the model and sampler.
110
+ # The core idea is to avoid any TPU computation during input preparation. For
111
+ # better compilation tracking and increased flexibility, the model execution and
112
+ # sampler are divided into several distinct components.
113
+ #
114
+ # Below are the detailed steps:
115
+ #
116
+ # Step 1
117
+ # It is recommended to avoid TPU operations when preparing the model and sampler
118
+ # inputs. CPU tensors can be prepared and transferred to the XLA device using
119
+ # cpu_tensor.to(xla_device), which only triggers CPU to TPU transfers and avoids
120
+ # compilation.
121
+ #
122
+ # Step 2
123
+ # The TPU execution should be decomposed into subgraphs (4 at the moment):
124
+ # 1. the main model
125
+ # 2. selecting hidden states for each request
126
+ # 3. sampler
127
+ # 4. encoder.
128
+ # Each subgraph should be decorated in a torch.compile. This is used to make
129
+ # sure that we have the same subgraph topology in both dummy_run and
130
+ # xecute_model. The results from these subgraphs should either be passed to
131
+ # other subgraphs, or transferred from TPU to CPU using xla_tensor.cpu() for
132
+ # subsequent processing on the CPU.
133
+ #
134
+ # Step 3
135
+ # The dummy_run should be comprehensive, ensuring all potential input shapes and
136
+ # branch predictions are included as subgraph inputs to facilitate
137
+ # pre-compilation.
138
+ class TPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin):
139
+ def __init__(
140
+ self,
141
+ vllm_config: VllmConfig,
142
+ device: torch.device,
143
+ original_parallel_config: ParallelConfig | None = None,
144
+ ):
145
+ self.vllm_config = vllm_config
146
+ self.model_config = vllm_config.model_config
147
+ self.cache_config = vllm_config.cache_config
148
+ self.lora_config = vllm_config.lora_config
149
+ self.load_config = vllm_config.load_config
150
+ self.parallel_config = vllm_config.parallel_config
151
+ self.original_parallel_config = original_parallel_config
152
+ self.scheduler_config = vllm_config.scheduler_config
153
+ self.speculative_config = vllm_config.speculative_config
154
+ self.observability_config = vllm_config.observability_config
155
+ self.device_config = vllm_config.device_config
156
+
157
+ model_config = self.model_config
158
+ cache_config = self.cache_config
159
+ scheduler_config = self.scheduler_config
160
+ parallel_config = self.parallel_config
161
+ self.device = device
162
+ self.check_recompilation = envs.VLLM_XLA_CHECK_RECOMPILATION
163
+
164
+ # SPMD Related
165
+ self.use_spmd = envs.VLLM_XLA_USE_SPMD
166
+ if self.use_spmd:
167
+ num_devices = xr.global_runtime_device_count()
168
+ mesh_shape = (num_devices, 1)
169
+ device_ids = np.array(range(num_devices))
170
+ self.mesh = xs.Mesh(device_ids, mesh_shape, ("x", "y"))
171
+
172
+ self.enforce_eager = model_config.enforce_eager
173
+
174
+ self.num_xla_graphs = 0
175
+ self._update_num_xla_graphs("init")
176
+
177
+ self.pin_memory = is_pin_memory_available()
178
+ self.dtype = self.model_config.dtype
179
+ if cache_config.cache_dtype == "auto":
180
+ model_dtype = self.dtype
181
+ if isinstance(model_dtype, str):
182
+ self.kv_cache_dtype = TPU_STR_DTYPE_TO_TORCH_DTYPE[model_dtype]
183
+ else:
184
+ self.kv_cache_dtype = model_dtype
185
+ else:
186
+ self.kv_cache_dtype = TPU_STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]
187
+ self._hidden_states_dtype = self.dtype
188
+
189
+ self.sliding_window = model_config.get_sliding_window()
190
+ self.block_size = cache_config.block_size
191
+ self.max_model_len = model_config.max_model_len
192
+ self.most_model_len = envs.VLLM_TPU_MOST_MODEL_LEN
193
+ self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size)
194
+ self.num_blocks_per_most_len_req = (
195
+ cdiv(self.most_model_len, self.block_size)
196
+ if self.most_model_len is not None
197
+ else None
198
+ )
199
+ # InputBatch needs to work with sampling tensors greater than padding
200
+ # to avoid dynamic shapes. Also, avoid suboptimal alignment.
201
+ self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS)
202
+ self.num_tokens_paddings = _get_token_paddings(
203
+ min_token_size=16,
204
+ max_token_size=scheduler_config.max_num_batched_tokens,
205
+ padding_gap=envs.VLLM_TPU_BUCKET_PADDING_GAP,
206
+ )
207
+ # In case `max_num_tokens < max(num_tokens_paddings)` use the actual
208
+ # padded max value to pre-allocate data structures and pre-compile.
209
+ self.max_num_tokens = self.num_tokens_paddings[-1]
210
+
211
+ # Model-related.
212
+ self.num_attn_layers = model_config.get_num_layers_by_block_type(
213
+ parallel_config, "attention"
214
+ )
215
+ self.num_query_heads = model_config.get_num_attention_heads(parallel_config)
216
+ self.num_kv_heads = model_config.get_num_kv_heads(parallel_config)
217
+ self.head_size = model_config.get_head_size()
218
+ self.inputs_embeds_size = model_config.get_inputs_embeds_size()
219
+ self.vocab_size = model_config.get_vocab_size()
220
+
221
+ # Multi-modal data support
222
+ self.mm_registry = MULTIMODAL_REGISTRY
223
+ self.uses_mrope = model_config.uses_mrope
224
+ self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs(
225
+ model_config
226
+ )
227
+ # TODO: Support M-RoPE (e.g, Qwen2-VL)
228
+ assert not self.uses_mrope, "TPU does not support M-RoPE yet."
229
+
230
+ self._num_slices_per_kv_cache_update_block = (
231
+ _get_num_slices_per_kv_cache_update_block(
232
+ get_page_size_bytes(
233
+ block_size=self.block_size,
234
+ num_kv_heads=self.num_kv_heads,
235
+ head_size=self.head_size,
236
+ kv_cache_dtype=self.kv_cache_dtype,
237
+ )
238
+ )
239
+ )
240
+
241
+ # Lazy initialization
242
+ self.model: nn.Module # Set after load_model
243
+ self.kv_caches: list[torch.Tensor] = []
244
+ # mm_hash -> encoder_output
245
+ self.encoder_cache: dict[str, torch.Tensor] = {}
246
+
247
+ # Request states.
248
+ self.requests: dict[str, CachedRequestState] = {}
249
+ # NOTE(rob): num_prompt_logprobs only includes reqs
250
+ # that are currently in the prefill phase.
251
+ self.num_prompt_logprobs: dict[str, int] = {}
252
+
253
+ # Initialize input batch early to avoid AttributeError in _update_states
254
+ self.input_batch = InputBatch(
255
+ max_num_reqs=self.max_num_reqs,
256
+ max_model_len=self.max_model_len,
257
+ max_num_batched_tokens=self.max_num_tokens,
258
+ device=self.device,
259
+ pin_memory=self.pin_memory,
260
+ vocab_size=self.model_config.get_vocab_size(),
261
+ block_sizes=[self.block_size],
262
+ kernel_block_sizes=[self.cache_config.block_size],
263
+ )
264
+
265
+ # Cached torch/numpy tensor
266
+ # The pytorch tensor and numpy array share the same buffer.
267
+ # Sometimes the numpy op is faster so we create both.
268
+ self.input_ids_cpu = torch.zeros(
269
+ self.max_num_tokens, dtype=torch.int32, device="cpu"
270
+ )
271
+
272
+ self.positions_cpu = torch.zeros(
273
+ self.max_num_tokens, dtype=torch.int32, device="cpu"
274
+ )
275
+ self.positions_np = self.positions_cpu.numpy()
276
+ self.block_table_cpu = torch.zeros(
277
+ (self.max_num_reqs, self.max_num_blocks_per_req),
278
+ dtype=torch.int32,
279
+ device="cpu",
280
+ )
281
+ # adjust num_reqs to avoid SMEM OOM.
282
+ self.num_reqs_most_model_len = (
283
+ min(
284
+ PallasAttentionBackend.get_max_num_seqs(
285
+ self.most_model_len, self.block_size
286
+ ),
287
+ self.max_num_reqs,
288
+ )
289
+ if self.most_model_len is not None
290
+ else None
291
+ )
292
+ self.num_reqs_max_model_len = min(
293
+ PallasAttentionBackend.get_max_num_seqs(
294
+ self.max_model_len, self.block_size
295
+ ),
296
+ self.max_num_reqs,
297
+ )
298
+ self.query_start_loc_cpu = torch.zeros(
299
+ self.max_num_tokens + 1,
300
+ dtype=torch.int32,
301
+ device="cpu",
302
+ pin_memory=self.pin_memory,
303
+ )
304
+ self.query_start_loc_np = self.query_start_loc_cpu.numpy()
305
+
306
+ self.seq_lens_cpu = torch.zeros(
307
+ self.max_num_tokens,
308
+ dtype=torch.int32,
309
+ device="cpu",
310
+ pin_memory=self.pin_memory,
311
+ )
312
+ self.seq_lens_np = self.seq_lens_cpu.numpy()
313
+
314
+ # Only relevant for multimodal models
315
+ if self.supports_mm_inputs:
316
+ self.is_mm_embed_cpu = torch.zeros(
317
+ self.max_num_tokens,
318
+ dtype=torch.bool,
319
+ device="cpu",
320
+ pin_memory=self.pin_memory,
321
+ )
322
+
323
+ # Range tensor with values [0 .. self.max_num_tokens - 1].
324
+ # Used to initialize positions / context_lens / seq_lens
325
+ # Keep in int64 to avoid overflow with long context
326
+ self.arange_np = np.arange(self.max_num_tokens, dtype=np.int64)
327
+ self.num_reqs_paddings = _get_req_paddings(
328
+ min_req_size=MIN_NUM_SEQS, max_req_size=self.max_num_reqs
329
+ )
330
+
331
+ # Layer pairings for cross-layer KV sharing.
332
+ # If an Attention layer `layer_name` is in the keys of this dict, it
333
+ # means this layer will perform attention using the keys and values
334
+ # from the KV cache of `shared_kv_cache_layers[layer_name]`.
335
+ self.shared_kv_cache_layers: dict[str, str] = {}
336
+
337
+ # tensors for structured decoding
338
+ self.grammar_bitmask_cpu = torch.zeros(
339
+ (self.max_num_reqs, cdiv(self.vocab_size, 32)),
340
+ dtype=torch.int32,
341
+ device="cpu",
342
+ pin_memory=self.pin_memory,
343
+ )
344
+ self.require_structured_out_cpu = torch.zeros(
345
+ (self.max_num_reqs, 1),
346
+ dtype=torch.bool,
347
+ device="cpu",
348
+ pin_memory=self.pin_memory,
349
+ )
350
+ self.structured_decode_arange = torch.arange(
351
+ 0, 32, device="cpu", pin_memory=self.pin_memory
352
+ )
353
+
354
+ self.mm_budget = (
355
+ MultiModalBudget(
356
+ self.model_config,
357
+ self.scheduler_config,
358
+ self.mm_registry,
359
+ )
360
+ if self.supports_mm_inputs
361
+ else None
362
+ )
363
+
364
+ if not self.use_spmd:
365
+ self.sample_from_logits_func = torch.compile(
366
+ self.sample_from_logits,
367
+ backend="openxla",
368
+ fullgraph=True,
369
+ dynamic=False,
370
+ )
371
+ else:
372
+ self.sample_from_logits_func = self.sample_from_logits
373
+
374
+ # For passing scheduler_output between successive
375
+ # execute_model() and sample_tokens() calls.
376
+ self.scheduler_output: SchedulerOutput | None = None
377
+ self.mm_embed_inputs: tuple[list[torch.Tensor], torch.Tensor] | None = None
378
+
379
+ def reset_mm_cache(self) -> None:
380
+ if self.mm_budget:
381
+ self.mm_budget.reset_cache()
382
+
383
+ def _update_num_xla_graphs(self, case_str):
384
+ check_comp = self.check_recompilation and not self.enforce_eager
385
+ if not check_comp:
386
+ return
387
+
388
+ total_cached_graphs = xr.get_num_cached_compilation_graph()
389
+ new_compiled_graphs = total_cached_graphs - self.num_xla_graphs
390
+ if new_compiled_graphs == 0:
391
+ return
392
+
393
+ logger.info(
394
+ "Add new %d compiled XLA graphs due to %s", new_compiled_graphs, case_str
395
+ )
396
+ self.num_xla_graphs += new_compiled_graphs
397
+
398
+ def _verify_num_xla_graphs(self, case_str):
399
+ check_comp = self.check_recompilation and not self.enforce_eager
400
+ if not check_comp:
401
+ return
402
+
403
+ curr_cached_graph = xr.get_num_cached_compilation_graph()
404
+ assert self.num_xla_graphs == curr_cached_graph, (
405
+ "Recompilation after warm up is detected during {}."
406
+ " num_xla_graphs = {} curr_cached_graph = {}".format(
407
+ case_str, self.num_xla_graphs, curr_cached_graph
408
+ )
409
+ )
410
+
411
+ def _update_states(self, scheduler_output: "SchedulerOutput") -> bool:
412
+ """Update the cached states and the persistent batch with the scheduler
413
+ output.
414
+
415
+ The updated states are used by the `_prepare_inputs` function to create
416
+ the input GPU tensors for the model.
417
+
418
+ Returns:
419
+ True if there is a new/resumed/paused/finished request.
420
+ If False, we can skip copying SamplingMetadata to the GPU.
421
+ """
422
+ # Remove finished requests from the cached states.
423
+ for req_id in scheduler_output.finished_req_ids:
424
+ self.requests.pop(req_id, None)
425
+ self.num_prompt_logprobs.pop(req_id, None)
426
+
427
+ # Remove the finished requests from the persistent batch.
428
+ # NOTE(woosuk): There could be an edge case where finished_req_ids and
429
+ # scheduled_req_ids overlap. This happens when a request is aborted and
430
+ # then resubmitted with the same ID. In this case, we treat them as two
431
+ # distinct requests - clearing the cached states for the first request
432
+ # and handling the second as a new request.
433
+ removed_req_indices: list[int] = []
434
+ for req_id in scheduler_output.finished_req_ids:
435
+ req_index = self.input_batch.remove_request(req_id)
436
+ if req_index is not None:
437
+ removed_req_indices.append(req_index)
438
+
439
+ # Free the cached encoder outputs.
440
+ for mm_hash in scheduler_output.free_encoder_mm_hashes:
441
+ self.encoder_cache.pop(mm_hash, None)
442
+
443
+ # Remove the unscheduled requests from the persistent batch.
444
+ # NOTE(woosuk): The unscheduled requests are either preempted requests
445
+ # or running requests that are not scheduled in this step. We remove
446
+ # them from the persistent batch but keep their cached states since
447
+ # they will be scheduled again sometime in the future.
448
+ scheduled_req_ids = scheduler_output.num_scheduled_tokens.keys()
449
+ cached_req_ids = self.input_batch.req_id_to_index.keys()
450
+ unscheduled_req_ids = cached_req_ids - scheduled_req_ids
451
+ # NOTE(woosuk): The persistent batch optimization assumes that
452
+ # consecutive batches contain mostly the same requests. If batches
453
+ # have low request overlap (e.g., alternating between two distinct
454
+ # sets of requests), this optimization becomes very inefficient.
455
+ for req_id in unscheduled_req_ids:
456
+ req_index = self.input_batch.remove_request(req_id)
457
+ assert req_index is not None
458
+ removed_req_indices.append(req_index)
459
+
460
+ req_ids_to_add: list[str] = []
461
+ # Add new requests to the cached states.
462
+ for new_req_data in scheduler_output.scheduled_new_reqs:
463
+ assert new_req_data.sampling_params is not None, (
464
+ "Pooling is not supported in TPU yet"
465
+ )
466
+ req_id = new_req_data.req_id
467
+ sampling_params = new_req_data.sampling_params
468
+
469
+ self.requests[req_id] = CachedRequestState(
470
+ req_id=req_id,
471
+ prompt_token_ids=new_req_data.prompt_token_ids,
472
+ prompt_embeds=new_req_data.prompt_embeds,
473
+ mm_features=new_req_data.mm_features,
474
+ sampling_params=sampling_params,
475
+ pooling_params=None,
476
+ generator=None,
477
+ block_ids=new_req_data.block_ids,
478
+ num_computed_tokens=new_req_data.num_computed_tokens,
479
+ output_token_ids=[],
480
+ lora_request=new_req_data.lora_request,
481
+ )
482
+
483
+ if sampling_params and sampling_params.prompt_logprobs is not None:
484
+ self.num_prompt_logprobs[req_id] = (
485
+ self.input_batch.vocab_size
486
+ if sampling_params.prompt_logprobs == -1
487
+ else sampling_params.prompt_logprobs
488
+ )
489
+
490
+ req_ids_to_add.append(req_id)
491
+
492
+ # Update the states of the running/resumed requests.
493
+ req_data = scheduler_output.scheduled_cached_reqs
494
+ for i, req_id in enumerate(req_data.req_ids):
495
+ req_state = self.requests[req_id]
496
+ num_computed_tokens = req_data.num_computed_tokens[i]
497
+ new_block_ids = req_data.new_block_ids[i]
498
+ resumed_from_preemption = req_id in req_data.resumed_req_ids
499
+
500
+ # Update the cached states.
501
+ req_state.num_computed_tokens = num_computed_tokens
502
+ if not resumed_from_preemption:
503
+ if new_block_ids is not None:
504
+ # Append the new blocks to the existing block IDs.
505
+ for block_ids, new_ids in zip(req_state.block_ids, new_block_ids):
506
+ block_ids.extend(new_ids)
507
+ else:
508
+ assert new_block_ids is not None
509
+ # The request is resumed from preemption.
510
+ # Replace the existing block IDs with the new ones.
511
+ req_state.block_ids = new_block_ids
512
+
513
+ req_index = self.input_batch.req_id_to_index.get(req_id)
514
+ if req_index is None:
515
+ # The request is not in the persistent batch.
516
+ # The request was either preempted and resumed later, or was not
517
+ # scheduled in the previous step and needs to be added again.
518
+ req_ids_to_add.append(req_id)
519
+ continue
520
+
521
+ # Update the persistent batch.
522
+ self.input_batch.num_computed_tokens_cpu[req_index] = num_computed_tokens
523
+ if new_block_ids is not None:
524
+ self.input_batch.block_table.append_row(new_block_ids, req_index)
525
+
526
+ # Add the new or resumed requests to the persistent batch.
527
+ # The smaller empty indices are filled first.
528
+ removed_req_indices = sorted(removed_req_indices, reverse=True)
529
+ for req_id in req_ids_to_add:
530
+ req_state = self.requests[req_id]
531
+ # Fill the empty index or append to the end
532
+ req_index = removed_req_indices.pop() if removed_req_indices else None
533
+ self.input_batch.add_request(req_state, req_index)
534
+
535
+ # Condense the batched states if there are empty indices.
536
+ if removed_req_indices:
537
+ self.input_batch.condense(removed_req_indices)
538
+
539
+ return len(unscheduled_req_ids) > 0 or len(req_ids_to_add) > 0
540
+
541
+ def get_model(self) -> nn.Module:
542
+ return self.model
543
+
544
+ def get_supported_generation_tasks(self) -> list[GenerationTask]:
545
+ model = self.get_model()
546
+ supported_tasks = list[GenerationTask]()
547
+
548
+ if is_text_generation_model(model):
549
+ supported_tasks.append("generate")
550
+
551
+ if supports_transcription(model):
552
+ if model.supports_transcription_only:
553
+ return ["transcription"]
554
+
555
+ supported_tasks.append("transcription")
556
+
557
+ return supported_tasks
558
+
559
+ def get_supported_pooling_tasks(self) -> list[PoolingTask]:
560
+ model = self.get_model()
561
+ if not is_pooling_model(model):
562
+ return []
563
+
564
+ return list(model.pooler.get_supported_tasks())
565
+
566
+ def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
567
+ tasks = list[SupportedTask]()
568
+
569
+ if self.model_config.runner_type == "generate":
570
+ tasks.extend(self.get_supported_generation_tasks())
571
+ if self.model_config.runner_type == "pooling":
572
+ tasks.extend(self.get_supported_pooling_tasks())
573
+
574
+ return tuple(tasks)
575
+
576
+ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
577
+ """
578
+ Generates the KVCacheSpec by parsing the kv cache format from each
579
+ Attention module in the static forward context.
580
+ Returns:
581
+ KVCacheSpec: A dictionary mapping layer names to their KV cache
582
+ format. Layers that do not need KV cache are not included.
583
+ """
584
+
585
+ layers = get_layers_from_vllm_config(
586
+ self.vllm_config,
587
+ AttentionLayerBase, # type: ignore[type-abstract]
588
+ )
589
+ block_size = self.vllm_config.cache_config.block_size
590
+ cache_dtype_str = self.vllm_config.cache_config.cache_dtype
591
+
592
+ kv_cache_spec: dict[str, KVCacheSpec] = {}
593
+ for layer_name, attn_module in layers.items():
594
+ # Classic Attention path
595
+ if isinstance(attn_module, Attention):
596
+ if (
597
+ kv_tgt_layer := attn_module.kv_sharing_target_layer_name
598
+ ) is not None:
599
+ # The layer doesn't need its own KV cache and will use that of
600
+ # the target layer. We skip creating a KVCacheSpec for it, so
601
+ # that KV cache management logic will act as this layer does
602
+ # not exist, and doesn't allocate KV cache for the layer. This
603
+ # enables the memory saving of cross-layer kv sharing, allowing
604
+ # a given amount of memory to accommodate longer context lengths
605
+ # or enable more requests to be processed simultaneously.
606
+ self.shared_kv_cache_layers[layer_name] = kv_tgt_layer
607
+ continue
608
+
609
+ if attn_module.attn_type == AttentionType.DECODER:
610
+ if isinstance(attn_module, ChunkedLocalAttention):
611
+ logger.warning_once(
612
+ "Using irope in Pallas is not supported yet, it "
613
+ "will fall back to global attention for long context."
614
+ )
615
+ if attn_module.sliding_window is not None:
616
+ kv_cache_spec[layer_name] = SlidingWindowSpec(
617
+ block_size=block_size,
618
+ num_kv_heads=attn_module.num_kv_heads,
619
+ head_size=attn_module.head_size,
620
+ dtype=self.kv_cache_dtype,
621
+ sliding_window=attn_module.sliding_window,
622
+ )
623
+ else:
624
+ kv_cache_spec[layer_name] = FullAttentionSpec(
625
+ block_size=block_size,
626
+ num_kv_heads=attn_module.num_kv_heads,
627
+ head_size=attn_module.head_size,
628
+ dtype=self.kv_cache_dtype,
629
+ )
630
+ elif attn_module.attn_type in (
631
+ AttentionType.ENCODER,
632
+ AttentionType.ENCODER_ONLY,
633
+ ):
634
+ # encoder-only attention does not need KV cache.
635
+ continue
636
+ elif attn_module.attn_type == AttentionType.ENCODER_DECODER:
637
+ raise NotImplementedError
638
+ else:
639
+ raise ValueError(f"Unknown attention type: {attn_module.attn_type}")
640
+ # MLAAttention path
641
+ elif isinstance(attn_module, MLAAttention):
642
+ if layer_name in kv_cache_spec:
643
+ continue
644
+ kv_cache_spec[layer_name] = MLAAttentionSpec(
645
+ block_size=block_size,
646
+ num_kv_heads=1,
647
+ head_size=attn_module.head_size,
648
+ dtype=self.kv_cache_dtype,
649
+ cache_dtype_str=cache_dtype_str,
650
+ )
651
+ else:
652
+ continue
653
+
654
+ return kv_cache_spec
655
+
656
+ def _get_slot_mapping_metadata(
657
+ self, num_reqs, num_scheduled_tokens_per_req
658
+ ) -> np.ndarray:
659
+ """
660
+ Computes metadata for mapping slots to blocks in the key-value (KV)
661
+ cache for a batch of requests.
662
+
663
+ This function determines, for each request in the batch, how the
664
+ scheduled tokens are distributed across memory blocks, and generates
665
+ metadata needed to map slices of tokens to their corresponding positions
666
+ in the KV cache.
667
+
668
+ Args:
669
+ num_reqs (int): Number of requests in the current batch.
670
+ num_scheduled_tokens_per_req (int or np.ndarray): Number of tokens
671
+ to be scheduled for each request.
672
+
673
+ Returns:
674
+ np.ndarray: A 2D array of shape (total_block_len, 3), where each row
675
+ contains:
676
+ - kv_cache_start_index (int): The starting index in the KV cache
677
+ for the corresponding slice.
678
+ - new_kv_start_index (int): The starting index in the new KV
679
+ cache for the corresponding slice.
680
+ - slice_len (int): The length of the slice.
681
+ """
682
+ slices_start = self.input_batch.num_computed_tokens_cpu[:num_reqs]
683
+ slices_end = (
684
+ self.input_batch.num_computed_tokens_cpu[:num_reqs]
685
+ + num_scheduled_tokens_per_req
686
+ )
687
+ local_block_start_idx = slices_start // self.block_size
688
+ local_block_end_idx = (slices_end - 1) // self.block_size
689
+ no_repeat_req_indices = self.arange_np[:num_reqs]
690
+ global_block_start_idx = (
691
+ no_repeat_req_indices * self.max_num_blocks_per_req + local_block_start_idx
692
+ )
693
+ block_lens = local_block_end_idx - local_block_start_idx + 1
694
+ global_block_start_idx = np.repeat(global_block_start_idx, block_lens)
695
+ slice_arange = np.concatenate([self.arange_np[:n] for n in block_lens])
696
+ global_block_indices = global_block_start_idx + slice_arange
697
+ block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor()
698
+ block_numbers = block_table_cpu.flatten()[global_block_indices].numpy()
699
+ total_block_len = np.sum(block_lens)
700
+ slot_mapping_slices = np.repeat(
701
+ np.array([[0, self.block_size]], dtype=np.int32), total_block_len, axis=0
702
+ )
703
+ cu_block_lens = np.zeros(len(block_lens) + 1, dtype=np.int32)
704
+ np.cumsum(block_lens, out=cu_block_lens[1:])
705
+ for req_idx in range(num_reqs):
706
+ slot_mapping_slices[cu_block_lens[req_idx]][0] = (
707
+ slices_start[req_idx] % self.block_size
708
+ )
709
+ slot_mapping_slices[cu_block_lens[req_idx + 1] - 1][1] = (
710
+ slices_end[req_idx] - 1
711
+ ) % self.block_size + 1
712
+ slice_lens = slot_mapping_slices[:, 1] - slot_mapping_slices[:, 0]
713
+ cu_slices_lens = np.zeros(len(slice_lens) + 1, dtype=np.int32)
714
+ np.cumsum(slice_lens, out=cu_slices_lens[1:])
715
+ kv_cache_start_indices = slot_mapping_slices[:, 0] + (
716
+ block_numbers * self.block_size
717
+ )
718
+ new_kv_start_indices = cu_slices_lens[:-1]
719
+ slot_mapping_metadata = np.stack(
720
+ [kv_cache_start_indices, new_kv_start_indices, slice_lens], axis=1
721
+ )
722
+ return slot_mapping_metadata
723
+
724
+ def _prepare_inputs(self, scheduler_output: "SchedulerOutput", start_index: int):
725
+ assert scheduler_output.total_num_scheduled_tokens > 0
726
+ num_reqs = self.input_batch.num_reqs
727
+ assert num_reqs > 0
728
+ assert start_index < num_reqs
729
+
730
+ # Get the number of scheduled tokens for each request.
731
+ use_max_model_len = self.most_model_len is None
732
+ num_scheduled_tokens_per_req = []
733
+ max_num_scheduled_tokens_all_reqs = 0
734
+ end_index = start_index
735
+
736
+ # Use either most_model_len or max_model_len depending on request size.
737
+ for i in range(start_index, num_reqs):
738
+ req_id = self.input_batch.req_ids[i]
739
+ assert req_id is not None
740
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
741
+ if (
742
+ not use_max_model_len
743
+ and self.most_model_len is not None
744
+ and num_tokens > self.most_model_len
745
+ ):
746
+ use_max_model_len = True
747
+ num_scheduled_tokens_per_req.append(num_tokens)
748
+ if use_max_model_len:
749
+ if len(num_scheduled_tokens_per_req) > self.num_reqs_max_model_len:
750
+ num_scheduled_tokens_per_req = num_scheduled_tokens_per_req[
751
+ : self.num_reqs_max_model_len
752
+ ]
753
+ end_index = start_index + self.num_reqs_max_model_len
754
+ else:
755
+ end_index = num_reqs
756
+ else:
757
+ assert self.num_reqs_most_model_len is not None
758
+ if len(num_scheduled_tokens_per_req) > self.num_reqs_most_model_len:
759
+ num_scheduled_tokens_per_req = num_scheduled_tokens_per_req[
760
+ : self.num_reqs_most_model_len
761
+ ]
762
+ end_index = start_index + self.num_reqs_most_model_len
763
+ else:
764
+ end_index = num_reqs
765
+ max_num_scheduled_tokens_all_reqs = max(num_scheduled_tokens_per_req)
766
+ num_scheduled_tokens_per_req = np.array(
767
+ num_scheduled_tokens_per_req, dtype=np.int32
768
+ )
769
+ total_num_scheduled_tokens = sum(num_scheduled_tokens_per_req)
770
+ assert max_num_scheduled_tokens_all_reqs > 0
771
+
772
+ num_reqs = len(num_scheduled_tokens_per_req)
773
+
774
+ # Get request indices.
775
+ # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]
776
+ # For each scheduled token, what are the corresponding req index.
777
+ req_indices = np.repeat(self.arange_np[:num_reqs], num_scheduled_tokens_per_req)
778
+
779
+ # Get batched arange.
780
+ # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
781
+ # For each scheduled token, what is its position in corresponding req.
782
+ arange = np.concatenate(
783
+ [self.arange_np[:n] for n in num_scheduled_tokens_per_req]
784
+ )
785
+
786
+ # Get positions.
787
+ positions_np = self.positions_np[:total_num_scheduled_tokens]
788
+ np.add(
789
+ self.input_batch.num_computed_tokens_cpu[req_indices],
790
+ arange,
791
+ out=positions_np,
792
+ )
793
+
794
+ # Get token indices.
795
+ # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2]
796
+ # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2]
797
+ # where M is the max_model_len.
798
+ token_indices = (
799
+ positions_np + req_indices * self.input_batch.token_ids_cpu.shape[1]
800
+ )
801
+
802
+ # NOTE(woosuk): We use torch.index_select instead of np.take here
803
+ # because torch.index_select is much faster than np.take for large
804
+ # tensors.
805
+ torch.index_select(
806
+ self.input_batch.token_ids_cpu_tensor.flatten(),
807
+ 0,
808
+ torch.from_numpy(token_indices),
809
+ out=self.input_ids_cpu[:total_num_scheduled_tokens],
810
+ )
811
+
812
+ # Prepare the attention metadata.
813
+ self.query_start_loc_np[0] = 0
814
+ np.cumsum(
815
+ num_scheduled_tokens_per_req, out=self.query_start_loc_np[1 : num_reqs + 1]
816
+ )
817
+ self.query_start_loc_np[num_reqs + 1 :] = 1
818
+
819
+ self.seq_lens_np[:num_reqs] = (
820
+ self.input_batch.num_computed_tokens_cpu[:num_reqs]
821
+ + num_scheduled_tokens_per_req
822
+ )
823
+
824
+ # Do the padding and copy the tensors to the TPU.
825
+ padded_total_num_scheduled_tokens = _get_padded_token_len(
826
+ self.num_tokens_paddings, total_num_scheduled_tokens
827
+ )
828
+ # Zero out to avoid spurious values from prev iteration (last cp chunk)
829
+ self.input_ids_cpu[
830
+ total_num_scheduled_tokens:padded_total_num_scheduled_tokens
831
+ ] = 0
832
+ self.input_ids = self.input_ids_cpu[:padded_total_num_scheduled_tokens].to(
833
+ self.device
834
+ )
835
+ self.position_ids = self.positions_cpu[:padded_total_num_scheduled_tokens].to(
836
+ self.device
837
+ )
838
+ if use_max_model_len:
839
+ block_tables = self.block_table_cpu[
840
+ : self.num_reqs_max_model_len, : self.max_num_blocks_per_req
841
+ ]
842
+ block_tables[:num_reqs, : self.max_num_blocks_per_req] = (
843
+ self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs]
844
+ )
845
+ query_start_loc = self.query_start_loc_cpu[
846
+ : self.num_reqs_max_model_len + 1
847
+ ].to(self.device)
848
+ seq_lens = self.seq_lens_cpu[: self.num_reqs_max_model_len].to(self.device)
849
+ else:
850
+ assert self.num_reqs_most_model_len is not None
851
+ block_tables = self.block_table_cpu[
852
+ : self.num_reqs_most_model_len, : self.num_blocks_per_most_len_req
853
+ ]
854
+ block_tables[:num_reqs, : self.num_blocks_per_most_len_req] = (
855
+ self.input_batch.block_table[0].get_cpu_tensor()[
856
+ :num_reqs, : self.num_blocks_per_most_len_req
857
+ ]
858
+ )
859
+ query_start_loc = self.query_start_loc_cpu[
860
+ : self.num_reqs_most_model_len + 1
861
+ ].to(self.device)
862
+ seq_lens = self.seq_lens_cpu[: self.num_reqs_most_model_len].to(self.device)
863
+ block_tables = block_tables.to(self.device)
864
+
865
+ # Calculate the slot mapping
866
+ slot_mapping_metadata = self._get_slot_mapping_metadata(
867
+ num_reqs, num_scheduled_tokens_per_req
868
+ )
869
+ num_kv_update_slices = slot_mapping_metadata.shape[0]
870
+ padded_num_slices = _get_padded_num_kv_cache_update_slices(
871
+ padded_total_num_scheduled_tokens, self.max_num_reqs, self.block_size
872
+ )
873
+ slot_mapping_metadata = np.pad(
874
+ slot_mapping_metadata,
875
+ [[0, padded_num_slices - len(slot_mapping_metadata)], [0, 0]],
876
+ constant_values=0,
877
+ )
878
+ slot_mapping_metadata = np.transpose(slot_mapping_metadata)
879
+ slot_mapping_metadata = torch.tensor(slot_mapping_metadata, device=self.device)
880
+
881
+ if self.lora_config is not None:
882
+ # We need to respect padding when activating LoRA adapters
883
+ padded_num_scheduled_tokens_per_req = np.copy(
884
+ num_scheduled_tokens_per_req
885
+ ) # Copying to avoid accidental state corruption bugs
886
+ padded_num_scheduled_tokens_per_req[-1] += (
887
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
888
+ )
889
+
890
+ self.set_active_loras(self.input_batch, padded_num_scheduled_tokens_per_req)
891
+
892
+ attn_metadata = PallasMetadata(
893
+ slot_mapping=slot_mapping_metadata,
894
+ block_tables=block_tables,
895
+ context_lens=seq_lens,
896
+ query_start_loc=query_start_loc,
897
+ num_seqs=torch.tensor([num_reqs], dtype=torch.int32, device=self.device),
898
+ num_kv_update_slices=torch.tensor(
899
+ [num_kv_update_slices], dtype=torch.int32, device=self.device
900
+ ),
901
+ num_slices_per_kv_cache_update_block=self._num_slices_per_kv_cache_update_block,
902
+ )
903
+ # NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial
904
+ # request in the batch. While we should not sample any token from this
905
+ # partial request, we do so for simplicity. We will ignore the sampled
906
+ # token from the partial request.
907
+ # TODO: Support prompt logprobs.
908
+ padded_num_reqs = _get_padded_num_reqs_with_upper_limit(
909
+ num_reqs, self.max_num_reqs
910
+ )
911
+ # Indices at which we sample (positions of last token in the sequence).
912
+ # Padded to avoid recompiling when `num_reqs` varies.
913
+ logits_indices = self.query_start_loc_cpu[1 : padded_num_reqs + 1] - 1
914
+ logits_indices = logits_indices.to(self.device)
915
+
916
+ if self.lora_config is not None:
917
+ # We need to respect padding when activating LoRA adapters
918
+ padded_num_scheduled_tokens_per_req = np.copy(
919
+ num_scheduled_tokens_per_req
920
+ ) # Copying to avoid accidental state corruption bugs
921
+ padded_num_scheduled_tokens_per_req[-1] += (
922
+ padded_total_num_scheduled_tokens - total_num_scheduled_tokens
923
+ )
924
+
925
+ self.set_active_loras(self.input_batch, padded_num_scheduled_tokens_per_req)
926
+
927
+ layer_names = get_layers_from_vllm_config(self.vllm_config, Attention).keys()
928
+ per_layer_attn_metadata = {
929
+ layer_name: attn_metadata for layer_name in layer_names
930
+ }
931
+ return (
932
+ per_layer_attn_metadata,
933
+ logits_indices,
934
+ padded_num_reqs,
935
+ num_reqs,
936
+ end_index,
937
+ )
938
+
939
+ def _execute_mm_encoder(self, scheduler_output: "SchedulerOutput"):
940
+ scheduled_encoder_inputs = scheduler_output.scheduled_encoder_inputs
941
+ if not scheduled_encoder_inputs:
942
+ return
943
+
944
+ # Batch the multi-modal inputs.
945
+ mm_kwargs = list[MultiModalKwargsItem]()
946
+ # List of tuple (mm_hash, pos_info)
947
+ mm_hashes_pos = list[tuple[str, PlaceholderRange]]()
948
+ for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
949
+ req_state = self.requests[req_id]
950
+
951
+ for mm_input_id in encoder_input_ids:
952
+ mm_feature = req_state.mm_features[mm_input_id]
953
+ if mm_feature.data is None:
954
+ continue
955
+ mm_hash = mm_feature.identifier
956
+ mm_kwargs.append(mm_feature.data)
957
+ mm_hashes_pos.append((mm_hash, mm_feature.mm_position))
958
+
959
+ # Batch mm inputs as much as we can: if a request in the batch has
960
+ # multiple modalities or a different modality than the previous one,
961
+ # we process it separately to preserve item order.
962
+ # FIXME(ywang96): This is a hacky way to deal with multiple modalities
963
+ # in the same batch while still being able to benefit from batching
964
+ # multimodal inputs. The proper solution should be reordering the
965
+ # encoder outputs.
966
+ model = cast(SupportsMultiModal, self.model)
967
+ encoder_outputs = []
968
+ for _, num_items, mm_kwargs_group in group_mm_kwargs_by_modality(
969
+ mm_kwargs,
970
+ device=self.device,
971
+ pin_memory=self.pin_memory,
972
+ ):
973
+ # Run the encoder.
974
+ # `curr_group_outputs` is either of the following:
975
+ # 1. A tensor of shape (num_items, feature_size, hidden_size)
976
+ # in case feature_size is fixed across all multimodal items.
977
+ # 2. A list or tuple (length: num_items) of tensors, each of shape
978
+ # (feature_size, hidden_size) in case the feature size is dynamic
979
+ # depending on the input multimodal items.
980
+ torch_xla.sync(wait=False)
981
+ curr_group_outputs = model.embed_multimodal(**mm_kwargs_group)
982
+ torch_xla.sync(wait=False)
983
+
984
+ sanity_check_mm_encoder_outputs(
985
+ curr_group_outputs,
986
+ expected_num_items=num_items,
987
+ )
988
+
989
+ if isinstance(curr_group_outputs, torch.Tensor):
990
+ encoder_outputs.append(curr_group_outputs)
991
+ else:
992
+ assert isinstance(curr_group_outputs, (list, tuple))
993
+ for output in curr_group_outputs:
994
+ encoder_outputs.append(output)
995
+
996
+ # Cache the encoder outputs.
997
+ # NOTE (NickLucche) here we diverge from logic in other runners, as we
998
+ # assume to only have whole mm items to process. Hence we avoid the
999
+ # intrinsic dynamism that `scatter_mm_placeholders` introduces.
1000
+ for (mm_hash, pos_info), output in zip(mm_hashes_pos, encoder_outputs):
1001
+ assert pos_info.is_embed is None, (
1002
+ "Expected all positions to be contiguous and embeddings."
1003
+ )
1004
+ self.encoder_cache[mm_hash] = output
1005
+
1006
+ def _gather_mm_embeddings(
1007
+ self,
1008
+ scheduler_output: "SchedulerOutput",
1009
+ ) -> tuple[list[torch.Tensor], torch.Tensor]:
1010
+ total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens
1011
+ padded_total_num_scheduled_tokens = _get_padded_token_len(
1012
+ self.num_tokens_paddings, total_num_scheduled_tokens
1013
+ )
1014
+
1015
+ is_mm_embed = self.is_mm_embed_cpu
1016
+ is_mm_embed[:padded_total_num_scheduled_tokens] = False
1017
+ mm_embeds = list[torch.Tensor]()
1018
+ req_start_idx = 0
1019
+
1020
+ for req_id in self.input_batch.req_ids:
1021
+ num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id]
1022
+ req_state = self.requests[req_id]
1023
+ num_computed_tokens = req_state.num_computed_tokens
1024
+
1025
+ # TODO unroll loop and assume/enforce --disable_chunked_mm_input
1026
+ # NOTE (NickLucche) here we diverge from logic in other runners, as
1027
+ # we assume to only have whole mm items to process. Hence we avoid
1028
+ # the intrinsic dynamism that `gather_mm_placeholders` introduces.
1029
+ for mm_feature in req_state.mm_features:
1030
+ pos_info = mm_feature.mm_position
1031
+ start_pos = pos_info.offset
1032
+ num_encoder_tokens = pos_info.length
1033
+
1034
+ # The encoder output is needed if the two ranges overlap:
1035
+ # [num_computed_tokens,
1036
+ # num_computed_tokens + num_scheduled_tokens) and
1037
+ # [start_pos, start_pos + num_encoder_tokens)
1038
+ if start_pos >= num_computed_tokens + num_scheduled_tokens:
1039
+ # The encoder output is not needed in this step.
1040
+ break
1041
+ if start_pos + num_encoder_tokens <= num_computed_tokens:
1042
+ # The encoder output is already processed and stored
1043
+ # in the decoder's KV cache.
1044
+ continue
1045
+
1046
+ start_idx = max(num_computed_tokens - start_pos, 0)
1047
+ end_idx = min(
1048
+ num_computed_tokens - start_pos + num_scheduled_tokens,
1049
+ num_encoder_tokens,
1050
+ )
1051
+ assert start_idx < end_idx
1052
+
1053
+ mm_hash = mm_feature.identifier
1054
+ encoder_output = self.encoder_cache.get(mm_hash, None)
1055
+ assert encoder_output is not None, f"Encoder cache miss for {mm_hash}."
1056
+
1057
+ assert pos_info.is_embed is None, (
1058
+ "Expected all positions to be contiguous and embeddings."
1059
+ )
1060
+
1061
+ req_start_pos = req_start_idx + start_pos - num_computed_tokens
1062
+ is_mm_embed[req_start_pos + start_idx : req_start_pos + end_idx] = True
1063
+
1064
+ # Only whole mm items are processed
1065
+ mm_embeds.append(encoder_output)
1066
+
1067
+ req_start_idx += num_scheduled_tokens
1068
+
1069
+ is_mm_embed = is_mm_embed[:padded_total_num_scheduled_tokens].to(self.device)
1070
+
1071
+ return mm_embeds, is_mm_embed
1072
+
1073
+ def _get_model_inputs(
1074
+ self,
1075
+ input_ids: torch.Tensor,
1076
+ mm_embed_inputs: tuple[list[torch.Tensor], torch.Tensor] | None,
1077
+ ):
1078
+ if self.supports_mm_inputs:
1079
+ mm_embeds, is_mm_embed = mm_embed_inputs or (None, None)
1080
+
1081
+ # NOTE(woosuk): To unify token ids and soft tokens (vision
1082
+ # embeddings), we always use embeddings (rather than token ids)
1083
+ # as input to the multimodal model, even when the input is text.
1084
+ inputs_embeds = self.model.embed_input_ids(
1085
+ input_ids,
1086
+ multimodal_embeddings=mm_embeds,
1087
+ is_multimodal=is_mm_embed,
1088
+ )
1089
+
1090
+ return None, inputs_embeds
1091
+ else:
1092
+ # For text-only models, we use token ids as input.
1093
+ # While it is possible to use embeddings as input just like the
1094
+ # multimodal models, it is not desirable for performance since
1095
+ # then the embedding layer is not included in the CUDA graph.
1096
+ return input_ids, None
1097
+
1098
+ @torch.no_grad()
1099
+ def execute_model(
1100
+ self,
1101
+ scheduler_output: "SchedulerOutput",
1102
+ intermediate_tensors: IntermediateTensors | None = None,
1103
+ ) -> ModelRunnerOutput | None:
1104
+ if self.scheduler_output is not None:
1105
+ raise RuntimeError(
1106
+ "State error: sample_tokens() must be called "
1107
+ "after execute_model() returns None."
1108
+ )
1109
+ # Update cached state
1110
+ self._update_states(scheduler_output)
1111
+ if not scheduler_output.total_num_scheduled_tokens:
1112
+ if not has_kv_transfer_group():
1113
+ # Return empty ModelRunnerOutput if there's no work to do.
1114
+ return EMPTY_MODEL_RUNNER_OUTPUT
1115
+
1116
+ return self.kv_connector_no_forward(scheduler_output, self.vllm_config)
1117
+
1118
+ mm_embed_inputs = None
1119
+ if self.supports_mm_inputs:
1120
+ # Run the multimodal encoder if any.
1121
+ self._execute_mm_encoder(scheduler_output)
1122
+ mm_embed_inputs = self._gather_mm_embeddings(scheduler_output)
1123
+
1124
+ torch_xla.sync(wait=False)
1125
+
1126
+ self.scheduler_output = scheduler_output
1127
+ self.mm_embed_inputs = mm_embed_inputs
1128
+ return None
1129
+
1130
+ @torch.no_grad()
1131
+ def sample_tokens(
1132
+ self, grammar_output: "GrammarOutput | None"
1133
+ ) -> ModelRunnerOutput:
1134
+ if self.scheduler_output is None:
1135
+ # Nothing to do (PP non-final rank case), output isn't used.
1136
+ return None # type: ignore[return-value]
1137
+ scheduler_output = self.scheduler_output
1138
+ mm_embed_inputs = self.mm_embed_inputs
1139
+ self.scheduler_output = None
1140
+ self.mm_embed_inputs = None
1141
+
1142
+ # Prepare inputs, the requests might be split into multiple
1143
+ # executions, combine the result of each execution.
1144
+ start_index = 0
1145
+ combined_selected_tokens: list[torch.Tensor] = []
1146
+ combined_logprobs: list[LogprobsLists] = []
1147
+
1148
+ # NOTE: setup current batch's metadata for kv connector.
1149
+ # Currently, only verified with NixlConnector
1150
+ with set_forward_context(None, self.vllm_config):
1151
+ self.maybe_setup_kv_connector(scheduler_output)
1152
+
1153
+ while start_index < self.input_batch.num_reqs:
1154
+ attn_metadata, logits_indices, padded_num_reqs, num_reqs, end_index = (
1155
+ self._prepare_inputs(scheduler_output, start_index)
1156
+ )
1157
+ input_ids, inputs_embeds = self._get_model_inputs(
1158
+ self.input_ids, mm_embed_inputs
1159
+ )
1160
+ torch_xla.sync(wait=False)
1161
+ # Run the decoder
1162
+ with set_forward_context(
1163
+ attn_metadata,
1164
+ self.vllm_config,
1165
+ num_tokens=scheduler_output.total_num_scheduled_tokens,
1166
+ ):
1167
+ hidden_states = self.model(
1168
+ input_ids=input_ids,
1169
+ positions=self.position_ids,
1170
+ inputs_embeds=inputs_embeds,
1171
+ )
1172
+ hidden_states = self.select_hidden_states(hidden_states, logits_indices)
1173
+ logits = self.compute_logits(hidden_states)
1174
+ tpu_sampling_metadata = TPUSupportedSamplingMetadata.from_input_batch(
1175
+ self.input_batch, padded_num_reqs, self.device
1176
+ )
1177
+ if grammar_output is not None:
1178
+ require_struct_decoding, grammar_bitmask_padded, arange = (
1179
+ self.prepare_structured_decoding_input(logits, grammar_output)
1180
+ )
1181
+ logits = self.structured_decode(
1182
+ require_struct_decoding, grammar_bitmask_padded, logits, arange
1183
+ )
1184
+ selected_token_ids = self.sample_from_logits_func(
1185
+ logits, tpu_sampling_metadata
1186
+ )
1187
+ # NOTE (NickLucche) Use the original logits (before any penalties or
1188
+ # temperature scaling) for the top-k logprobs. We can't enforce it
1189
+ # due to recompilations outside torch.compiled code, so just make
1190
+ # sure `sample_from_logits` does not modify the logits in-place.
1191
+ logprobs = (
1192
+ self.gather_logprobs(logits, selected_token_ids)
1193
+ if tpu_sampling_metadata.logprobs
1194
+ else None
1195
+ )
1196
+
1197
+ # Remove padding on cpu and keep dynamic op outside of xla graph.
1198
+ selected_token_ids = selected_token_ids.cpu()[:num_reqs]
1199
+
1200
+ combined_selected_tokens.append(selected_token_ids)
1201
+ if tpu_sampling_metadata.logprobs:
1202
+ combined_logprobs.append(logprobs.tolists())
1203
+
1204
+ start_index = end_index
1205
+
1206
+ # NOTE: current kv load and save get h2d/d2h copies involved.
1207
+ # Those copies are blocking. Once they become async., kv_save
1208
+ # should be called right after each single forward pass,
1209
+ # instead of the forwards of the entire input batch.
1210
+ self.maybe_wait_for_kv_save()
1211
+ finished_sending, finished_recving = self.get_finished_kv_transfers(
1212
+ scheduler_output
1213
+ )
1214
+
1215
+ selected_token_ids = torch.cat(combined_selected_tokens, dim=0)
1216
+ if tpu_sampling_metadata.logprobs:
1217
+
1218
+ def concat_lists(input_lists):
1219
+ result = []
1220
+ for input_list in input_lists:
1221
+ result.extend(input_list)
1222
+ return result
1223
+
1224
+ logprobs_lists = LogprobsLists(
1225
+ logprob_token_ids=concat_lists(
1226
+ [lp.logprob_token_ids for lp in combined_logprobs]
1227
+ ),
1228
+ logprobs=concat_lists([lp.logprobs for lp in combined_logprobs]),
1229
+ sampled_token_ranks=concat_lists(
1230
+ [lp.sampled_token_ranks for lp in combined_logprobs]
1231
+ ),
1232
+ )
1233
+ else:
1234
+ logprobs_lists = None
1235
+
1236
+ # Update the cache state concurrently. Code above will not block until
1237
+ # we use `selected_token_ids`. Add mark_step if post-processing changes
1238
+ request_seq_lens: list[tuple[int, CachedRequestState, int]] = []
1239
+ discard_sampled_tokens_req_indices = []
1240
+ num_reqs = self.input_batch.num_reqs
1241
+ for i, req_id in zip(range(num_reqs), self.input_batch.req_ids):
1242
+ assert req_id is not None
1243
+ req_state = self.requests[req_id]
1244
+ seq_len = (
1245
+ req_state.num_computed_tokens
1246
+ + scheduler_output.num_scheduled_tokens[req_id]
1247
+ )
1248
+ if seq_len >= req_state.num_tokens:
1249
+ request_seq_lens.append((i, req_state, seq_len))
1250
+ else:
1251
+ # Ignore the sampled token from the partial request.
1252
+ # Rewind the generator state as if the token was not sampled.
1253
+ generator = self.input_batch.generators.get(i)
1254
+ if generator is not None:
1255
+ # This relies on cuda-specific torch-internal impl details
1256
+ generator.set_offset(generator.get_offset() - 4)
1257
+
1258
+ # Record the index of the request that should not be sampled,
1259
+ # so that we could clear the sampled tokens before returning.
1260
+ discard_sampled_tokens_req_indices.append(i)
1261
+
1262
+ assert all(
1263
+ req_id is not None for req_id in self.input_batch.req_ids[:num_reqs]
1264
+ ), "req_ids contains None"
1265
+ req_ids = cast(list[str], self.input_batch.req_ids[:num_reqs])
1266
+
1267
+ prompt_logprobs_dict: dict[str, LogprobsTensors | None] = {}
1268
+ for req_id in self.input_batch.req_ids[:num_reqs]:
1269
+ prompt_logprobs_dict[req_id] = None
1270
+
1271
+ max_gen_len = selected_token_ids.shape[-1]
1272
+ if max_gen_len == 1:
1273
+ valid_sampled_token_ids = selected_token_ids.tolist()
1274
+
1275
+ # Mask out the sampled tokens that should not be sampled.
1276
+ # TODO: Keep in sync with gpu_model_runner.py, in particular
1277
+ # the "else" case here
1278
+ for i in discard_sampled_tokens_req_indices:
1279
+ valid_sampled_token_ids[i].clear()
1280
+
1281
+ # Append sampled tokens
1282
+ for i, req_state, seq_len in request_seq_lens:
1283
+ token_id = valid_sampled_token_ids[i][0]
1284
+ self.input_batch.token_ids_cpu[i, seq_len] = token_id
1285
+ req_state.output_token_ids.append(token_id)
1286
+ self.input_batch.num_tokens[i] += 1
1287
+
1288
+ else:
1289
+ valid_mask = selected_token_ids != INVALID_TOKEN_ID
1290
+ gen_lens = valid_mask.sum(dim=1).tolist()
1291
+ valid_sampled_token_ids = [
1292
+ seq.tolist() for seq in selected_token_ids[valid_mask].split(gen_lens)
1293
+ ]
1294
+ self.input_batch.num_tokens[:num_reqs] += gen_lens
1295
+ for i, req_state, seq_len in request_seq_lens:
1296
+ target_slice = slice(seq_len - gen_lens[i] + 1, seq_len + 1)
1297
+ self.input_batch.token_ids_cpu[i, target_slice] = (
1298
+ valid_sampled_token_ids[i]
1299
+ )
1300
+ req_state.output_token_ids.extend(valid_sampled_token_ids[i])
1301
+
1302
+ kv_connector_output = (
1303
+ None
1304
+ if (finished_sending is None and finished_recving is None)
1305
+ else KVConnectorOutput(
1306
+ finished_sending=finished_sending,
1307
+ finished_recving=finished_recving,
1308
+ )
1309
+ )
1310
+
1311
+ model_runner_output = ModelRunnerOutput(
1312
+ req_ids=req_ids,
1313
+ req_id_to_index=self.input_batch.req_id_to_index,
1314
+ sampled_token_ids=valid_sampled_token_ids,
1315
+ logprobs=logprobs_lists,
1316
+ prompt_logprobs_dict=prompt_logprobs_dict,
1317
+ pooler_output=[],
1318
+ kv_connector_output=kv_connector_output,
1319
+ )
1320
+
1321
+ # Check there are no new graphs compiled - all the graphs should be
1322
+ # captured and compiled during warm up.
1323
+ self._verify_num_xla_graphs("execute_model")
1324
+
1325
+ return model_runner_output
1326
+
1327
+ def update_config(self, overrides: dict[str, Any]) -> None:
1328
+ # TODO: TPU config may need extra validation
1329
+ # https://github.com/vllm-project/vllm/pull/20095#discussion_r2201497754
1330
+ allowed_config_names = {"load_config", "model_config"}
1331
+ for config_name, config_overrides in overrides.items():
1332
+ assert config_name in allowed_config_names, (
1333
+ f"Config `{config_name}` not supported. "
1334
+ f"Allowed configs: {allowed_config_names}"
1335
+ )
1336
+ config = getattr(self, config_name)
1337
+ new_config = update_config(config, config_overrides)
1338
+ setattr(self, config_name, new_config)
1339
+
1340
+ def load_model(self) -> None:
1341
+ self.device = self.device_config.device
1342
+
1343
+ # NOTE(woosuk): While the executor assigns the TP ranks to the worker
1344
+ # process, the ranks can be different from the ranks internally assigned
1345
+ # by the xm runtime. Therefore, there is a mismatch in the rank
1346
+ # assignment between the gloo (cpu) runtime and the xm (tpu) runtime.
1347
+ # This is not a problem in linear layers because all-reduce is
1348
+ # rank-agnostic. However, it matters for all-gather as the ranks
1349
+ # determine the order of concatenating the output tensors.
1350
+ # As a workaround, we use the xm's rank assignment only when loading
1351
+ # the embedding weights.
1352
+ xm_tp_rank = xr.global_ordinal()
1353
+ with patch(
1354
+ "vllm.model_executor.layers.vocab_parallel_embedding."
1355
+ "get_tensor_model_parallel_rank",
1356
+ return_value=xm_tp_rank,
1357
+ ):
1358
+ try:
1359
+ if self.use_spmd:
1360
+ tpu_loader = TPUModelLoader(
1361
+ load_config=self.vllm_config.load_config
1362
+ )
1363
+ model = tpu_loader.load_model(
1364
+ vllm_config=self.vllm_config,
1365
+ model_config=self.vllm_config.model_config,
1366
+ mesh=self.mesh,
1367
+ )
1368
+ else:
1369
+ model_loader = get_model_loader(self.load_config)
1370
+ logger.info("Loading model from scratch...")
1371
+ model = model_loader.load_model(
1372
+ vllm_config=self.vllm_config, model_config=self.model_config
1373
+ )
1374
+ except RuntimeError as e:
1375
+ raise RuntimeError(
1376
+ f"Unable to load model, a likely reason is the model is "
1377
+ "too large for the current device's HBM memory. "
1378
+ "Consider switching to a smaller model "
1379
+ "or sharding the weights on more chips. "
1380
+ f"See the detailed error: {e}"
1381
+ ) from e
1382
+ if self.lora_config is not None:
1383
+ model = self.load_lora_model(model, self.vllm_config, self.device)
1384
+ replace_set_lora(model)
1385
+
1386
+ # Sync all pending XLA execution during model initialization and weight
1387
+ # loading.
1388
+ torch_xla.sync(wait=False)
1389
+ xm.wait_device_ops()
1390
+ if not hasattr(self, "model"):
1391
+ self.model = model
1392
+ self.sampler = TPUSampler()
1393
+
1394
+ def reload_weights(self) -> None:
1395
+ assert getattr(self, "model", None) is not None, (
1396
+ "Cannot reload weights before model is loaded."
1397
+ )
1398
+ model_loader = get_model_loader(self.load_config)
1399
+ logger.info("Reloading weights inplace...")
1400
+ model_loader.load_weights(self.model, model_config=self.model_config)
1401
+
1402
+ @torch.no_grad()
1403
+ def _dummy_run(self, num_tokens: int, num_reqs: int, num_blocks: int) -> None:
1404
+ if self.supports_mm_inputs:
1405
+ input_ids = None
1406
+ inputs_embeds = torch.zeros(
1407
+ (num_tokens, self.inputs_embeds_size),
1408
+ dtype=self.dtype,
1409
+ device=self.device,
1410
+ )
1411
+ else:
1412
+ input_ids = torch.zeros((num_tokens), dtype=torch.int32).to(self.device)
1413
+ inputs_embeds = None
1414
+ actual_num_reqs = min(num_tokens, num_reqs)
1415
+ position_ids = torch.zeros(num_tokens, dtype=torch.int32).to(self.device)
1416
+ padded_num_slices = _get_padded_num_kv_cache_update_slices(
1417
+ num_tokens, self.max_num_reqs, self.block_size
1418
+ )
1419
+ num_kv_update_slices = torch.tensor([padded_num_slices], dtype=torch.int32).to(
1420
+ self.device
1421
+ )
1422
+ slot_mapping = torch.zeros((3, padded_num_slices), dtype=torch.int32).to(
1423
+ self.device
1424
+ )
1425
+ block_tables = torch.zeros((num_reqs, num_blocks), dtype=torch.int32).to(
1426
+ self.device
1427
+ )
1428
+ query_lens = [1] * num_reqs
1429
+ query_start_loc = torch.cumsum(
1430
+ torch.tensor([0] + query_lens, dtype=torch.int32), dim=0, dtype=torch.int32
1431
+ ).to(self.device)
1432
+ context_lens = torch.ones((num_reqs,), dtype=torch.int32).to(self.device)
1433
+ num_seqs = torch.tensor([actual_num_reqs], dtype=torch.int32).to(self.device)
1434
+ attn_metadata = PallasMetadata(
1435
+ slot_mapping=slot_mapping,
1436
+ block_tables=block_tables,
1437
+ context_lens=context_lens,
1438
+ query_start_loc=query_start_loc,
1439
+ num_seqs=num_seqs,
1440
+ num_kv_update_slices=num_kv_update_slices,
1441
+ num_slices_per_kv_cache_update_block=self._num_slices_per_kv_cache_update_block,
1442
+ )
1443
+
1444
+ if self.supports_mm_inputs:
1445
+ torch._dynamo.mark_dynamic(inputs_embeds, 0)
1446
+ else:
1447
+ torch._dynamo.mark_dynamic(input_ids, 0)
1448
+ torch._dynamo.mark_dynamic(position_ids, 0)
1449
+ torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0)
1450
+ torch._dynamo.mark_dynamic(attn_metadata.block_tables, (0, 1))
1451
+ torch._dynamo.mark_dynamic(attn_metadata.context_lens, 0)
1452
+ torch._dynamo.mark_dynamic(attn_metadata.query_start_loc, 0)
1453
+
1454
+ layer_names = get_layers_from_vllm_config(self.vllm_config, Attention).keys()
1455
+ per_layer_attn_metadata = {
1456
+ layer_name: attn_metadata for layer_name in layer_names
1457
+ }
1458
+
1459
+ with (
1460
+ self.maybe_select_dummy_loras(
1461
+ self.lora_config, np.array([num_tokens], dtype=np.int32)
1462
+ ),
1463
+ set_forward_context(per_layer_attn_metadata, self.vllm_config, 0),
1464
+ ):
1465
+ out = self.model(
1466
+ input_ids=input_ids, positions=position_ids, inputs_embeds=inputs_embeds
1467
+ )
1468
+ self._hidden_states_dtype = out.dtype
1469
+
1470
+ def _set_active_loras(
1471
+ self, prompt_lora_mapping, token_lora_mapping, lora_requests
1472
+ ) -> None:
1473
+ torch_xla.sync(wait=False) # Captures input updates
1474
+ super()._set_active_loras(
1475
+ prompt_lora_mapping, token_lora_mapping, lora_requests
1476
+ )
1477
+ torch_xla.sync(wait=False) # Captures metadata updates
1478
+
1479
+ def _precompile_mm_encoder(self) -> None:
1480
+ if not self.supports_mm_inputs:
1481
+ return
1482
+
1483
+ # Pre-compile MM encoder for all supported data modalities.
1484
+ hf_config = self.vllm_config.model_config.hf_config
1485
+
1486
+ mm_budget = self.mm_budget
1487
+ assert mm_budget is not None
1488
+
1489
+ max_items_per_seq_by_modality = mm_budget.max_items_per_batch_by_modality # noqa: E501
1490
+
1491
+ for mode, max_items_per_seq in max_items_per_seq_by_modality.items():
1492
+ logger.info(
1493
+ "Compiling Multimodal %s Encoder with different input shapes.", mode
1494
+ )
1495
+ start = time.perf_counter()
1496
+ # No padding for MM encoder just yet.
1497
+ for num_items in range(1, max_items_per_seq + 1):
1498
+ logger.info(" -- mode: %s items: %d", mode, num_items)
1499
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1500
+ mode,
1501
+ num_items,
1502
+ )
1503
+ # Run multimodal encoder.
1504
+ torch_xla.sync(wait=False)
1505
+ mm_embeds = self.model.embed_multimodal(**batched_dummy_mm_inputs)
1506
+ torch_xla.sync(wait=False)
1507
+ num_patches = mm_embeds[0].shape[0]
1508
+ items_size = num_patches * num_items
1509
+
1510
+ # NOTE (NickLucche) pre-compile `embed_input_ids` when mm
1511
+ # embeddings are present. We assume `--disable-mm-chunked`,
1512
+ # hence only whole items can be scheduled. This implies we just
1513
+ # need to compile when `num_items` fit the (padded) `input_ids`
1514
+ for num_tokens in self.num_tokens_paddings:
1515
+ if num_tokens >= items_size:
1516
+ # XLA Workaround: if torch.zeros(..device) is used, XLA
1517
+ # compiles a scalar+expansion op, which won't match
1518
+ # the graph generated at runtime. CPU->TPU must be used
1519
+ placeholders_ids = torch.zeros(
1520
+ num_tokens, dtype=torch.int32, device="cpu"
1521
+ )
1522
+ # Align placeholders and actual num mm_embeddings.
1523
+ placeholders_ids[:items_size] = hf_config.image_token_index
1524
+
1525
+ placeholders_ids = placeholders_ids.to(self.device)
1526
+
1527
+ mm_mask = torch.tensor([False] * num_tokens)
1528
+ mm_mask[:items_size] = True
1529
+ mm_mask = mm_mask.to(self.device)
1530
+ # Assign outputs or the graph will be cut short.
1531
+ a, b = self._get_model_inputs(
1532
+ placeholders_ids,
1533
+ mm_embed_inputs=([mm_embeds], mm_mask),
1534
+ )
1535
+ assert a is None
1536
+ torch_xla.sync(wait=False)
1537
+
1538
+ # Pre-compile `embed_input_ids` when mm_embeddings are not
1539
+ # present. Chunk is only made of text, no mm_placeholders.
1540
+ for num_tokens in self.num_tokens_paddings:
1541
+ placeholders_ids = torch.zeros(
1542
+ num_tokens, dtype=torch.int32, device="cpu"
1543
+ )
1544
+ placeholders_ids = placeholders_ids.to(self.device)
1545
+ a, b = self._get_model_inputs(
1546
+ placeholders_ids,
1547
+ mm_embed_inputs=None,
1548
+ )
1549
+ assert a is None
1550
+ torch_xla.sync(wait=False)
1551
+
1552
+ xm.wait_device_ops()
1553
+ end = time.perf_counter()
1554
+ logger.info(
1555
+ "Multimodal %s Encoder compilation finished in in %.2f [secs].",
1556
+ mode,
1557
+ end - start,
1558
+ )
1559
+
1560
+ def _precompile_backbone(self) -> None:
1561
+ logger.info("Compiling the model with different input shapes.")
1562
+ start = time.perf_counter()
1563
+ for num_tokens in self.num_tokens_paddings:
1564
+ logger.info(" -- num_tokens: %d", num_tokens)
1565
+ self._dummy_run(
1566
+ num_tokens, self.num_reqs_max_model_len, self.max_num_blocks_per_req
1567
+ )
1568
+ if self.most_model_len is not None:
1569
+ self._dummy_run(
1570
+ num_tokens,
1571
+ self.num_reqs_most_model_len,
1572
+ self.num_blocks_per_most_len_req,
1573
+ )
1574
+ xm.wait_device_ops()
1575
+ end = time.perf_counter()
1576
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1577
+ self._update_num_xla_graphs("model backbone")
1578
+
1579
+ def _precompile_select_hidden_states(self) -> None:
1580
+ # Compile hidden state selection function for bucketed
1581
+ # n_tokens x max_num_reqs. Graph is really small so this is fine.
1582
+ logger.info("Compiling select_hidden_states with different input shapes.")
1583
+ start = time.perf_counter()
1584
+ hsize = self.model_config.get_hidden_size()
1585
+ for num_tokens in self.num_tokens_paddings:
1586
+ dummy_hidden = torch.zeros(
1587
+ (num_tokens, hsize), device=self.device, dtype=self._hidden_states_dtype
1588
+ )
1589
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1590
+ for num_reqs in self.num_reqs_paddings:
1591
+ indices = torch.zeros(num_reqs, dtype=torch.int32, device=self.device)
1592
+ torch._dynamo.mark_dynamic(indices, 0)
1593
+ self.select_hidden_states(dummy_hidden, indices)
1594
+ logger.info(" -- num_tokens: %d, num_seqs: %d", num_tokens, num_reqs)
1595
+ # Requests can't be more than tokens. But do compile for the
1596
+ # next bigger value in case num_tokens uses bucketed padding.
1597
+ if num_reqs >= min(num_tokens, self.max_num_reqs):
1598
+ break
1599
+ xm.wait_device_ops()
1600
+ end = time.perf_counter()
1601
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1602
+ self._update_num_xla_graphs("select_hidden_states")
1603
+
1604
+ def _precompile_compute_logits(self) -> None:
1605
+ logger.info("Compiling compute_logits with different input shapes.")
1606
+ start = time.perf_counter()
1607
+ hsize = self.model_config.get_hidden_size()
1608
+ for num_reqs in self.num_reqs_paddings:
1609
+ dummy_hidden = torch.zeros(
1610
+ (num_reqs, hsize), device=self.device, dtype=self._hidden_states_dtype
1611
+ )
1612
+ torch._dynamo.mark_dynamic(dummy_hidden, 0)
1613
+ self.compute_logits(dummy_hidden)
1614
+ logger.info(" -- num_seqs: %d", num_reqs)
1615
+ xm.wait_device_ops()
1616
+ end = time.perf_counter()
1617
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1618
+ self._update_num_xla_graphs("compute_logits")
1619
+
1620
+ def _precompile_structured_decoding(self) -> None:
1621
+ logger.info("Compiling structured_decoding with different input shapes.")
1622
+ start = time.perf_counter()
1623
+ for num_reqs in self.num_reqs_paddings:
1624
+ dummy_logits = torch.zeros(
1625
+ (num_reqs, self.vocab_size),
1626
+ device=self.device,
1627
+ dtype=self._hidden_states_dtype,
1628
+ )
1629
+ dummy_require_struct_decoding = self.require_structured_out_cpu[
1630
+ :num_reqs
1631
+ ].to(self.device)
1632
+ dummy_grammar_bitmask = self.grammar_bitmask_cpu[:num_reqs].to(self.device)
1633
+ # The first dimension of the above 3 dummy tensors cannot be
1634
+ # mark_dynamic because some operations in structured_decode require
1635
+ # them to be static.
1636
+ arange = self.structured_decode_arange.to(self.device)
1637
+ self.structured_decode(
1638
+ dummy_require_struct_decoding,
1639
+ dummy_grammar_bitmask,
1640
+ dummy_logits,
1641
+ arange,
1642
+ )
1643
+ logger.info(" -- num_seqs: %d", num_reqs)
1644
+ xm.wait_device_ops()
1645
+ end = time.perf_counter()
1646
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1647
+ self._update_num_xla_graphs("structured_decoding")
1648
+
1649
+ def _precompile_sample_from_logits(self) -> None:
1650
+ logger.info("Compiling sample_from_logits with different input shapes.")
1651
+ start = time.perf_counter()
1652
+ for num_reqs in self.num_reqs_paddings:
1653
+ dummy_logits = torch.zeros(
1654
+ (num_reqs, self.vocab_size),
1655
+ device=self.device,
1656
+ dtype=self._hidden_states_dtype,
1657
+ )
1658
+ # The first dimension of dummy_logits cannot be mark_dynamic
1659
+ # because some operations in the sampler require it to be static.
1660
+ for all_greedy in [False, True]:
1661
+ generate_params_if_all_greedy = not all_greedy
1662
+ sampling_metadata = TPUSupportedSamplingMetadata.from_input_batch(
1663
+ self.input_batch,
1664
+ num_reqs,
1665
+ self.device,
1666
+ generate_params_if_all_greedy,
1667
+ )
1668
+ sampling_metadata.all_greedy = all_greedy
1669
+ with self.maybe_select_dummy_loras(
1670
+ self.lora_config, np.array([num_reqs], dtype=np.int32)
1671
+ ):
1672
+ self.sample_from_logits_func(dummy_logits, sampling_metadata)
1673
+ logger.info(" -- num_seqs: %d", num_reqs)
1674
+ xm.wait_device_ops()
1675
+ end = time.perf_counter()
1676
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1677
+ self._update_num_xla_graphs("sample_from_logits")
1678
+
1679
+ def _precompile_gather_logprobs(self) -> None:
1680
+ logger.info("Compiling gather_logprobs with different input shapes.")
1681
+ start = time.perf_counter()
1682
+ for num_reqs in self.num_reqs_paddings:
1683
+ dummy_logits = torch.zeros(
1684
+ (num_reqs, self.vocab_size),
1685
+ device=self.device,
1686
+ dtype=self._hidden_states_dtype,
1687
+ )
1688
+ dummy_tokens = torch.zeros((num_reqs, 1), dtype=torch.int64).to(self.device)
1689
+ with self.maybe_select_dummy_loras(
1690
+ self.lora_config, np.array([num_reqs], dtype=np.int32)
1691
+ ):
1692
+ self.gather_logprobs(dummy_logits, dummy_tokens)
1693
+ logger.info(" -- num_seqs: %d", num_reqs)
1694
+ xm.wait_device_ops()
1695
+ end = time.perf_counter()
1696
+ logger.info("Compilation finished in %.2f [secs].", end - start)
1697
+ self._update_num_xla_graphs("gather_logprobs")
1698
+
1699
+ def capture_model(self) -> None:
1700
+ """
1701
+ Precompile all the subgraphs with possible input shapes.
1702
+ """
1703
+ with self.maybe_setup_dummy_loras(self.lora_config):
1704
+ self._precompile_mm_encoder()
1705
+ self._precompile_backbone()
1706
+ self._precompile_select_hidden_states()
1707
+ self._precompile_compute_logits()
1708
+ self._precompile_structured_decoding()
1709
+ self._precompile_sample_from_logits()
1710
+ self._precompile_gather_logprobs()
1711
+
1712
+ def profile_run(
1713
+ self,
1714
+ num_tokens: int,
1715
+ ) -> None:
1716
+ # Profile with multimodal encoder & encoder cache.
1717
+ if self.supports_mm_inputs:
1718
+ mm_config = self.model_config.multimodal_config
1719
+ if mm_config is not None and mm_config.skip_mm_profiling:
1720
+ logger.info(
1721
+ "Skipping memory profiling for multimodal encoder and "
1722
+ "encoder cache."
1723
+ )
1724
+ else:
1725
+ mm_budget = self.mm_budget
1726
+ assert mm_budget is not None
1727
+
1728
+ # TODO: handle encoder-decoder models once we support them.
1729
+ if (encoder_budget := mm_budget.get_encoder_budget()) > 0:
1730
+ # NOTE: Currently model is profiled with a single non-text
1731
+ # modality with the max possible input tokens even when
1732
+ # it supports multiple.
1733
+ dummy_modality = mm_budget.get_modality_with_max_tokens()
1734
+ max_mm_items_per_batch = mm_budget.max_items_per_batch_by_modality[
1735
+ dummy_modality
1736
+ ]
1737
+
1738
+ logger.info(
1739
+ "Encoder cache will be initialized with a budget of "
1740
+ "%s tokens, and profiled with %s %s items of the "
1741
+ "maximum feature size.",
1742
+ encoder_budget,
1743
+ max_mm_items_per_batch,
1744
+ dummy_modality,
1745
+ )
1746
+
1747
+ # Create dummy batch of multimodal inputs.
1748
+ batched_dummy_mm_inputs = self._get_mm_dummy_batch(
1749
+ dummy_modality,
1750
+ max_mm_items_per_batch,
1751
+ )
1752
+
1753
+ # Run multimodal encoder.
1754
+ # Isolate encoder graph from post-processing to minimize
1755
+ # impact of recompilation until it's fixed.
1756
+ start = time.perf_counter()
1757
+ torch_xla.sync(wait=False)
1758
+ dummy_encoder_outputs = self.model.embed_multimodal(
1759
+ **batched_dummy_mm_inputs
1760
+ )
1761
+ torch_xla.sync(wait=False)
1762
+ xm.wait_device_ops()
1763
+ end = time.perf_counter()
1764
+ logger.info(
1765
+ "Multimodal Encoder profiling finished in %.2f [secs].",
1766
+ end - start,
1767
+ )
1768
+
1769
+ sanity_check_mm_encoder_outputs(
1770
+ dummy_encoder_outputs,
1771
+ expected_num_items=max_mm_items_per_batch,
1772
+ )
1773
+
1774
+ # Cache the dummy encoder outputs.
1775
+ self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs))
1776
+
1777
+ # Trigger compilation for general shape.
1778
+ self._dummy_run(
1779
+ num_tokens, self.num_reqs_max_model_len, self.max_num_blocks_per_req
1780
+ )
1781
+ if self.most_model_len is not None:
1782
+ self._dummy_run(
1783
+ num_tokens,
1784
+ self.num_reqs_most_model_len,
1785
+ self.num_blocks_per_most_len_req,
1786
+ )
1787
+
1788
+ torch_xla.sync(wait=False)
1789
+ xm.wait_device_ops()
1790
+ self.encoder_cache.clear()
1791
+ gc.collect()
1792
+
1793
+ def maybe_setup_cross_layer_kv_sharing(
1794
+ self,
1795
+ kv_caches: dict[str, torch.Tensor],
1796
+ kv_cache_config: KVCacheConfig,
1797
+ ) -> None:
1798
+ """
1799
+ Add layers that re-use KV cache to KV cache group of its target layer.
1800
+ Mapping of KV cache tensors happens in `initialize_kv_cache_tensors()`
1801
+ """
1802
+ if not self.shared_kv_cache_layers:
1803
+ # No cross-layer KV sharing, return
1804
+ return
1805
+
1806
+ add_kv_sharing_layers_to_kv_cache_groups(
1807
+ self.shared_kv_cache_layers,
1808
+ kv_cache_config.kv_cache_groups,
1809
+ )
1810
+
1811
+ for layer_name, target_layer_name in self.shared_kv_cache_layers.items():
1812
+ logger.debug("%s reuses KV cache of %s", layer_name, target_layer_name)
1813
+ kv_caches[layer_name] = kv_caches[target_layer_name]
1814
+
1815
+ def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
1816
+ """
1817
+ Initialize KV cache based on `kv_cache_config`.
1818
+ Args:
1819
+ kv_cache_config: Configuration for the KV cache, including the KV
1820
+ cache size of each layer
1821
+ """
1822
+ if len(kv_cache_config.kv_cache_groups) > 1:
1823
+ raise NotImplementedError(
1824
+ "Hybrid models with more than one KV cache type are not supported yet."
1825
+ )
1826
+
1827
+ if (
1828
+ kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
1829
+ != self.block_size
1830
+ ):
1831
+ self.input_batch = InputBatch(
1832
+ max_num_reqs=self.max_num_reqs,
1833
+ max_model_len=self.max_model_len,
1834
+ max_num_batched_tokens=self.max_num_tokens,
1835
+ device=self.device,
1836
+ pin_memory=self.pin_memory,
1837
+ vocab_size=self.model_config.get_vocab_size(),
1838
+ block_sizes=[
1839
+ kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
1840
+ ],
1841
+ kernel_block_sizes=[
1842
+ kv_cache_config.kv_cache_groups[0].kv_cache_spec.block_size
1843
+ ],
1844
+ )
1845
+ # Verify dtype compatibility between block_table_cpu and input_batch
1846
+ assert (
1847
+ self.block_table_cpu.dtype
1848
+ == self.input_batch.block_table[0].get_cpu_tensor().dtype
1849
+ )
1850
+
1851
+ kv_cache_sizes = {}
1852
+ for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
1853
+ assert len(kv_cache_tensor.shared_by) == 1, (
1854
+ "KV cache tensor shared by multiple layers is not supported in TPU."
1855
+ )
1856
+ kv_cache_sizes[kv_cache_tensor.shared_by[0]] = kv_cache_tensor.size
1857
+
1858
+ kv_caches: dict[str, torch.Tensor] = {}
1859
+ for kv_cache_group in kv_cache_config.kv_cache_groups:
1860
+ kv_cache_spec = kv_cache_group.kv_cache_spec
1861
+ for layer_name in kv_cache_group.layer_names:
1862
+ tensor_size = kv_cache_sizes[layer_name]
1863
+ assert tensor_size % kv_cache_spec.page_size_bytes == 0
1864
+ num_blocks = tensor_size // kv_cache_spec.page_size_bytes # noqa
1865
+ if isinstance(kv_cache_spec, AttentionSpec):
1866
+ if self.use_spmd:
1867
+ num_kv_heads = kv_cache_spec.num_kv_heads
1868
+ assert self.original_parallel_config is not None
1869
+ tp_size = self.original_parallel_config.tensor_parallel_size
1870
+ # TODO: Handle kv cache duplication under SPMD mode.
1871
+ assert num_kv_heads % tp_size == 0, (
1872
+ f"num_kv_heads {num_kv_heads} must be divisible by "
1873
+ f"tp_size {tp_size} under SPMD mode"
1874
+ )
1875
+ kv_cache_shape = PallasAttentionBackend.get_kv_cache_shape(
1876
+ num_blocks,
1877
+ kv_cache_spec.block_size,
1878
+ kv_cache_spec.num_kv_heads,
1879
+ kv_cache_spec.head_size,
1880
+ )
1881
+ dtype = kv_cache_spec.dtype
1882
+
1883
+ tpu_kv_cache = torch.zeros(kv_cache_shape, dtype=dtype).to(
1884
+ self.device
1885
+ )
1886
+
1887
+ kv_caches[layer_name] = tpu_kv_cache
1888
+ else:
1889
+ raise NotImplementedError
1890
+
1891
+ # Set up cross-layer KV cache sharing if needed
1892
+ self.maybe_setup_cross_layer_kv_sharing(kv_caches, kv_cache_config)
1893
+
1894
+ bind_kv_cache(
1895
+ kv_caches,
1896
+ self.vllm_config.compilation_config.static_forward_context,
1897
+ self.kv_caches,
1898
+ )
1899
+
1900
+ if self.use_spmd:
1901
+ # Shard KV Cache
1902
+ for cache in self.kv_caches:
1903
+ xs.mark_sharding(cache, self.mesh, (None, "x", None, None))
1904
+
1905
+ if has_kv_transfer_group():
1906
+ get_kv_transfer_group().register_kv_caches(kv_caches)
1907
+ get_kv_transfer_group().set_host_xfer_buffer_ops(copy_kv_blocks)
1908
+
1909
+ def reset_dynamo_cache(self):
1910
+ # NOTE: We check `is_multimodal_model` instead of `supports_mm_inputs`
1911
+ # since the compiled model object of the language backbone of a
1912
+ # multimodal model needs to be extracted via `get_language_model`.
1913
+ if self.model_config.is_multimodal_model:
1914
+ compiled_model = self.model.get_language_model().model
1915
+ else:
1916
+ compiled_model = self.model.model
1917
+ if isinstance(compiled_model, TorchCompileWithNoGuardsWrapper):
1918
+ logger.info("Clear dynamo cache and cached dynamo bytecode.")
1919
+ torch._dynamo.eval_frame.remove_from_cache(
1920
+ compiled_model.original_code_object()
1921
+ )
1922
+ # Reset the wrapper to re-initialize.
1923
+ compiled_model.compiled = False
1924
+ TorchCompileWithNoGuardsWrapper.__init__(compiled_model)
1925
+
1926
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1927
+ def select_hidden_states(self, hidden_states, indices_do_sample):
1928
+ return hidden_states[indices_do_sample]
1929
+
1930
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1931
+ def compute_logits(self, sample_hidden_states: torch.Tensor) -> torch.Tensor:
1932
+ return self.model.compute_logits(sample_hidden_states)
1933
+
1934
+ # TODO: Under SPMD mode, sample_from_logits has correctness issue.
1935
+ # Re-enable the torch.compile once the issue is fixed in torchxla.
1936
+ # @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1937
+ def sample_from_logits(
1938
+ self, logits: torch.Tensor, sampling_metadata: TPUSupportedSamplingMetadata
1939
+ ) -> torch.Tensor:
1940
+ """
1941
+ Sample with xla-friendly function. This function is to be traced
1942
+ separately from `forward` for lighter compilation overhead.
1943
+ """
1944
+ if sampling_metadata.all_greedy:
1945
+ out_tokens = torch.argmax(logits, dim=-1, keepdim=True)
1946
+ else:
1947
+ out_tokens = self.sampler(logits, sampling_metadata).sampled_token_ids
1948
+ return out_tokens
1949
+
1950
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1951
+ def gather_logprobs(
1952
+ self, logits: torch.Tensor, sampled_tokens: torch.Tensor
1953
+ ) -> LogprobsTensors:
1954
+ """
1955
+ Gather the top_logprobs with corresponding tokens. Use a fixed number
1956
+ of logprobs as an alternative to having multiple pre-compiled graphs.
1957
+ Select the number of logprobs actually demanded by each request on CPU.
1958
+ """
1959
+ logprobs = self.sampler.compute_logprobs(logits)
1960
+ return self.sampler.gather_logprobs(
1961
+ logprobs,
1962
+ self.model_config.max_logprobs,
1963
+ token_ids=sampled_tokens.squeeze(-1),
1964
+ )
1965
+
1966
+ @torch.compile(backend="openxla", fullgraph=True, dynamic=False)
1967
+ def structured_decode(
1968
+ self,
1969
+ require_struct_decoding: torch.Tensor,
1970
+ grammar_bitmask: torch.Tensor,
1971
+ logits: torch.Tensor,
1972
+ arange: torch.Tensor,
1973
+ ) -> torch.Tensor:
1974
+ return torch.where(
1975
+ require_struct_decoding,
1976
+ self.apply_grammar_bitmask(logits, grammar_bitmask, arange),
1977
+ logits,
1978
+ )
1979
+
1980
+ def apply_grammar_bitmask(
1981
+ self, logits: torch.Tensor, grammar_bitmask: torch.Tensor, arange: torch.Tensor
1982
+ ):
1983
+ assert logits.shape[0] == grammar_bitmask.shape[0]
1984
+ logits_cloned = logits.clone()
1985
+ for i in range(logits.shape[0]):
1986
+ unpacked_bitmask = (
1987
+ torch.bitwise_right_shift(grammar_bitmask[i][:, None], arange[None, :])
1988
+ & 1
1989
+ ) == 0
1990
+ unpacked_bitmask = unpacked_bitmask.reshape(-1)[: self.vocab_size]
1991
+ logits_cloned[i] = logits_cloned[i].masked_fill(
1992
+ unpacked_bitmask, -float("inf")
1993
+ )
1994
+ return logits_cloned
1995
+
1996
+ def embed_multimodal(self, *args, **kwargs):
1997
+ return self.model.embed_multimodal(*args, **kwargs)
1998
+
1999
+ def embed_input_ids(self, *args, **kwargs):
2000
+ return self.model.embed_input_ids(*args, **kwargs)
2001
+
2002
+ def prepare_structured_decoding_input(
2003
+ self, logits: torch.Tensor, grammar_output: "GrammarOutput"
2004
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
2005
+ grammar_bitmask = grammar_output.grammar_bitmask
2006
+ num_reqs, _ = logits.shape
2007
+
2008
+ # Reset pre-allocated tensors
2009
+ self.grammar_bitmask_cpu.zero_()
2010
+ self.require_structured_out_cpu.zero_()
2011
+
2012
+ cumulative_mask_idx = 0
2013
+ for req_id in grammar_output.structured_output_request_ids:
2014
+ if req_id not in self.input_batch.req_id_to_index:
2015
+ continue
2016
+ batch_index = self.input_batch.req_id_to_index[req_id]
2017
+ self.grammar_bitmask_cpu[batch_index] = torch.from_numpy(
2018
+ grammar_bitmask[cumulative_mask_idx]
2019
+ )
2020
+ # It's not guaranteed that all requests in this batch require
2021
+ # structured output, so create a bool tensor to represent
2022
+ # the requests that need structured output.
2023
+ self.require_structured_out_cpu[batch_index] = True
2024
+ cumulative_mask_idx += 1
2025
+
2026
+ return (
2027
+ self.require_structured_out_cpu[:num_reqs].to(logits.device),
2028
+ self.grammar_bitmask_cpu[:num_reqs].to(logits.device),
2029
+ self.structured_decode_arange.to(logits.device),
2030
+ )
2031
+
2032
+ def _get_mm_dummy_batch(
2033
+ self,
2034
+ modality: str,
2035
+ max_items_per_batch: int,
2036
+ ) -> BatchedTensorInputs:
2037
+ """Dummy data for profiling and precompiling multimodal models."""
2038
+ assert self.mm_budget is not None
2039
+
2040
+ dummy_decoder_data = self.mm_registry.get_decoder_dummy_data(
2041
+ model_config=self.model_config,
2042
+ seq_len=self.max_model_len,
2043
+ mm_counts={modality: 1},
2044
+ cache=self.mm_budget.cache,
2045
+ )
2046
+ dummy_mm_data = dummy_decoder_data.multi_modal_data
2047
+
2048
+ # Result in the maximum GPU consumption of the model
2049
+ dummy_mm_item = dummy_mm_data[modality][0]
2050
+ dummy_mm_items = [dummy_mm_item] * max_items_per_batch
2051
+
2052
+ return next(
2053
+ grouped_mm_kwargs
2054
+ for _, _, grouped_mm_kwargs in group_mm_kwargs_by_modality(
2055
+ dummy_mm_items,
2056
+ device=self.device,
2057
+ pin_memory=self.pin_memory,
2058
+ )
2059
+ )
2060
+
2061
+
2062
+ def _get_req_paddings(min_req_size: int, max_req_size: int) -> list[int]:
2063
+ logger.info("Preparing request paddings:")
2064
+ # assert min_req_size is power of 2
2065
+ assert (min_req_size & (min_req_size - 1) == 0) and min_req_size > 0
2066
+ paddings: list = []
2067
+ num = max(MIN_NUM_SEQS, min_req_size)
2068
+ while num <= max_req_size and (len(paddings) == 0 or paddings[-1] != num):
2069
+ paddings.append(num)
2070
+ logger.info(" %d", num)
2071
+ num = _get_padded_num_reqs_with_upper_limit(num + 1, max_req_size)
2072
+ return paddings
2073
+
2074
+
2075
+ def _get_padded_num_reqs_with_upper_limit(x: int, upper_limit: int) -> int:
2076
+ res = MIN_NUM_SEQS if x <= MIN_NUM_SEQS else 1 << (x - 1).bit_length()
2077
+ return min(res, upper_limit)
2078
+
2079
+
2080
+ def _get_token_paddings(
2081
+ min_token_size: int, max_token_size: int, padding_gap: int
2082
+ ) -> list[int]:
2083
+ """Generate a list of padding size, starting from min_token_size,
2084
+ ending with a number that can cover max_token_size
2085
+
2086
+ If padding_gap == 0 then:
2087
+ increase 2X each time (exponential)
2088
+ else:
2089
+ first increase the size to twice,
2090
+ then increase the padding size by padding_gap.
2091
+ """
2092
+ # assert min_token_size is power of 2
2093
+ assert (min_token_size & (min_token_size - 1) == 0) and min_token_size > 0
2094
+ paddings = []
2095
+ num = min_token_size
2096
+
2097
+ if padding_gap == 0:
2098
+ logger.info("Using exponential token paddings:")
2099
+ while True:
2100
+ logger.info(" %d", num)
2101
+ paddings.append(num)
2102
+ if num >= max_token_size:
2103
+ break
2104
+ num *= 2
2105
+ else:
2106
+ logger.info("Using incremental token paddings:")
2107
+ while num <= padding_gap:
2108
+ logger.info(" %d", num)
2109
+ paddings.append(num)
2110
+ num *= 2
2111
+ num //= 2
2112
+ while num < max_token_size:
2113
+ num += padding_gap
2114
+ logger.info(" %d", num)
2115
+ paddings.append(num)
2116
+
2117
+ return paddings
2118
+
2119
+
2120
+ def _get_padded_token_len(paddings: list[int], x: int) -> int:
2121
+ """Return the first element in paddings list greater or equal to x."""
2122
+ index = bisect.bisect_left(paddings, x)
2123
+ assert index < len(paddings)
2124
+ return paddings[index]
2125
+
2126
+
2127
+ def _get_padded_num_kv_cache_update_slices(
2128
+ num_tokens: int, max_num_reqs: int, page_size: int
2129
+ ) -> int:
2130
+ """Calculates the padded number of KV cache update slices to avoid
2131
+ recompilation."""
2132
+ # NOTE(chengjiyao): let's say R_i is the token num for i-th request,
2133
+ # so it occupies most 2 + R_i // page_size pages. The total maximum
2134
+ # possible number of pages needed is sum(2 + R_i // page_size), which
2135
+ # is <= 2 * max_num_reqs + sum(R_i) // page_size
2136
+ # = 2 * max_num_reqs + num_tokens // page_size
2137
+ padded_num_slices = 2 * max_num_reqs + num_tokens // page_size
2138
+ padded_num_slices = min(padded_num_slices, num_tokens)
2139
+ return padded_num_slices
2140
+
2141
+
2142
+ def _get_num_slices_per_kv_cache_update_block(page_size_bytes: int) -> int:
2143
+ """Find the optimum number of slices to copy per Pallas program instance.
2144
+
2145
+ Increasing the number of slices copied in one instance of the kernel program
2146
+ will increase HBM bandwidth utilization via more in-flight DMAs.
2147
+
2148
+ However, it will also use more VMEM, and experimentally, we observed
2149
+ performance regression at 128 slices on v6e, likely due to running
2150
+ out of scalar registers. Thus this function will limit the number of
2151
+ slices to 64.
2152
+ """
2153
+ # The default vmem_limit_bytes of a pallas kernel is 32MB. Here we
2154
+ # calculate num_slices_per_block based on 16MB in case any register spills.
2155
+ vmem_limit = 16 * 1024 * 1024
2156
+ num_slices_per_block = vmem_limit // page_size_bytes
2157
+ assert num_slices_per_block > 0, "Number of slices should be positive"
2158
+ num_slices_per_block = prev_power_of_2(num_slices_per_block)
2159
+ if num_slices_per_block > 64:
2160
+ num_slices_per_block = 64
2161
+ return num_slices_per_block
2162
+
2163
+
2164
+ def replace_set_lora(model):
2165
+ def _tpu_set_lora(
2166
+ self,
2167
+ index: int,
2168
+ lora_a: torch.Tensor,
2169
+ lora_b: torch.Tensor,
2170
+ embeddings_tensor: torch.Tensor | None,
2171
+ ):
2172
+ # TODO: The integer index leads to a recompilation, but converting it
2173
+ # to a tensor doesn't seem to work anymore. This might be fixed with a
2174
+ # later release of torch_xla.
2175
+ self._original_set_lora(index, lora_a, lora_b, embeddings_tensor)
2176
+ torch_xla.sync(wait=False)
2177
+
2178
+ def _tpu_reset_lora(self, index: int):
2179
+ self._original_reset_lora(index)
2180
+ torch_xla.sync(wait=False)
2181
+
2182
+ for _, module in model.named_modules():
2183
+ if isinstance(module, BaseLayerWithLoRA):
2184
+ module._original_set_lora = module.set_lora
2185
+ module._original_reset_lora = module.reset_lora
2186
+ module.set_lora = _tpu_set_lora.__get__( # type: ignore[method-assign]
2187
+ module, module.__class__
2188
+ )
2189
+ module.reset_lora = _tpu_reset_lora.__get__( # type: ignore[method-assign]
2190
+ module, module.__class__
2191
+ )