vllm-cpu 0.11.0.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1398) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2044 -0
  5. vllm/_ipex_ops.py +393 -0
  6. vllm/_version.py +34 -0
  7. vllm/assets/__init__.py +0 -0
  8. vllm/assets/audio.py +45 -0
  9. vllm/assets/base.py +41 -0
  10. vllm/assets/image.py +50 -0
  11. vllm/assets/video.py +145 -0
  12. vllm/attention/__init__.py +15 -0
  13. vllm/attention/backends/__init__.py +0 -0
  14. vllm/attention/backends/abstract.py +204 -0
  15. vllm/attention/backends/utils.py +33 -0
  16. vllm/attention/layer.py +645 -0
  17. vllm/attention/layers/__init__.py +0 -0
  18. vllm/attention/layers/chunked_local_attention.py +93 -0
  19. vllm/attention/layers/cross_attention.py +162 -0
  20. vllm/attention/layers/encoder_only_attention.py +86 -0
  21. vllm/attention/ops/__init__.py +0 -0
  22. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  23. vllm/attention/ops/common.py +345 -0
  24. vllm/attention/ops/flashmla.py +192 -0
  25. vllm/attention/ops/merge_attn_states.py +43 -0
  26. vllm/attention/ops/paged_attn.py +262 -0
  27. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  28. vllm/attention/ops/prefix_prefill.py +928 -0
  29. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  30. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  31. vllm/attention/ops/triton_decode_attention.py +691 -0
  32. vllm/attention/ops/triton_flash_attention.py +984 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +175 -0
  35. vllm/attention/ops/triton_unified_attention.py +894 -0
  36. vllm/attention/selector.py +245 -0
  37. vllm/attention/utils/__init__.py +0 -0
  38. vllm/attention/utils/fa_utils.py +85 -0
  39. vllm/attention/utils/kv_sharing_utils.py +33 -0
  40. vllm/beam_search.py +87 -0
  41. vllm/benchmarks/__init__.py +0 -0
  42. vllm/benchmarks/datasets.py +2723 -0
  43. vllm/benchmarks/latency.py +170 -0
  44. vllm/benchmarks/lib/__init__.py +3 -0
  45. vllm/benchmarks/lib/endpoint_request_func.py +533 -0
  46. vllm/benchmarks/lib/ready_checker.py +73 -0
  47. vllm/benchmarks/lib/utils.py +80 -0
  48. vllm/benchmarks/serve.py +1358 -0
  49. vllm/benchmarks/throughput.py +696 -0
  50. vllm/collect_env.py +823 -0
  51. vllm/compilation/__init__.py +0 -0
  52. vllm/compilation/activation_quant_fusion.py +189 -0
  53. vllm/compilation/backends.py +650 -0
  54. vllm/compilation/base_static_graph.py +56 -0
  55. vllm/compilation/collective_fusion.py +1188 -0
  56. vllm/compilation/compiler_interface.py +573 -0
  57. vllm/compilation/counter.py +47 -0
  58. vllm/compilation/cuda_graph.py +199 -0
  59. vllm/compilation/cuda_piecewise_backend.py +117 -0
  60. vllm/compilation/decorators.py +400 -0
  61. vllm/compilation/fix_functionalization.py +205 -0
  62. vllm/compilation/fusion.py +383 -0
  63. vllm/compilation/fusion_attn.py +295 -0
  64. vllm/compilation/fx_utils.py +84 -0
  65. vllm/compilation/inductor_pass.py +136 -0
  66. vllm/compilation/monitor.py +57 -0
  67. vllm/compilation/noop_elimination.py +158 -0
  68. vllm/compilation/pass_manager.py +125 -0
  69. vllm/compilation/post_cleanup.py +20 -0
  70. vllm/compilation/sequence_parallelism.py +478 -0
  71. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  72. vllm/compilation/vllm_inductor_pass.py +156 -0
  73. vllm/compilation/wrapper.py +136 -0
  74. vllm/config/__init__.py +814 -0
  75. vllm/config/cache.py +220 -0
  76. vllm/config/compilation.py +673 -0
  77. vllm/config/device.py +74 -0
  78. vllm/config/kv_events.py +50 -0
  79. vllm/config/kv_transfer.py +111 -0
  80. vllm/config/load.py +113 -0
  81. vllm/config/lora.py +132 -0
  82. vllm/config/model.py +1912 -0
  83. vllm/config/multimodal.py +129 -0
  84. vllm/config/observability.py +99 -0
  85. vllm/config/parallel.py +524 -0
  86. vllm/config/pooler.py +97 -0
  87. vllm/config/scheduler.py +287 -0
  88. vllm/config/speculative.py +568 -0
  89. vllm/config/speech_to_text.py +39 -0
  90. vllm/config/structured_outputs.py +64 -0
  91. vllm/config/utils.py +145 -0
  92. vllm/connections.py +186 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +311 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +41 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +440 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +317 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +295 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +323 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +28 -0
  106. vllm/distributed/device_communicators/pynccl.py +340 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +186 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +416 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +589 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +635 -0
  113. vllm/distributed/device_communicators/symm_mem.py +136 -0
  114. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  115. vllm/distributed/device_communicators/xpu_communicator.py +94 -0
  116. vllm/distributed/eplb/__init__.py +8 -0
  117. vllm/distributed/eplb/eplb_state.py +620 -0
  118. vllm/distributed/eplb/rebalance_algo.py +239 -0
  119. vllm/distributed/eplb/rebalance_execute.py +424 -0
  120. vllm/distributed/kv_events.py +362 -0
  121. vllm/distributed/kv_transfer/README.md +29 -0
  122. vllm/distributed/kv_transfer/__init__.py +13 -0
  123. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  124. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  125. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  126. vllm/distributed/kv_transfer/kv_connector/factory.py +113 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +261 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +388 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +168 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +100 -0
  132. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +328 -0
  133. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1473 -0
  134. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +485 -0
  135. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +488 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +550 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +267 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +418 -0
  140. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  141. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  142. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  144. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  145. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  146. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  147. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  148. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  149. vllm/distributed/parallel_state.py +1532 -0
  150. vllm/distributed/tpu_distributed_utils.py +178 -0
  151. vllm/distributed/utils.py +536 -0
  152. vllm/engine/__init__.py +0 -0
  153. vllm/engine/arg_utils.py +1778 -0
  154. vllm/engine/async_llm_engine.py +6 -0
  155. vllm/engine/llm_engine.py +6 -0
  156. vllm/engine/metrics.py +577 -0
  157. vllm/engine/metrics_types.py +84 -0
  158. vllm/engine/protocol.py +333 -0
  159. vllm/entrypoints/__init__.py +0 -0
  160. vllm/entrypoints/api_server.py +178 -0
  161. vllm/entrypoints/chat_utils.py +1705 -0
  162. vllm/entrypoints/cli/__init__.py +12 -0
  163. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  164. vllm/entrypoints/cli/benchmark/base.py +25 -0
  165. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  166. vllm/entrypoints/cli/benchmark/main.py +55 -0
  167. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  168. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  169. vllm/entrypoints/cli/collect_env.py +36 -0
  170. vllm/entrypoints/cli/main.py +60 -0
  171. vllm/entrypoints/cli/openai.py +233 -0
  172. vllm/entrypoints/cli/run_batch.py +67 -0
  173. vllm/entrypoints/cli/serve.py +232 -0
  174. vllm/entrypoints/cli/types.py +29 -0
  175. vllm/entrypoints/constants.py +10 -0
  176. vllm/entrypoints/context.py +481 -0
  177. vllm/entrypoints/harmony_utils.py +436 -0
  178. vllm/entrypoints/launcher.py +164 -0
  179. vllm/entrypoints/llm.py +1629 -0
  180. vllm/entrypoints/logger.py +79 -0
  181. vllm/entrypoints/openai/__init__.py +0 -0
  182. vllm/entrypoints/openai/api_server.py +1953 -0
  183. vllm/entrypoints/openai/cli_args.py +288 -0
  184. vllm/entrypoints/openai/logits_processors.py +90 -0
  185. vllm/entrypoints/openai/protocol.py +2757 -0
  186. vllm/entrypoints/openai/run_batch.py +491 -0
  187. vllm/entrypoints/openai/serving_chat.py +1597 -0
  188. vllm/entrypoints/openai/serving_classification.py +173 -0
  189. vllm/entrypoints/openai/serving_completion.py +692 -0
  190. vllm/entrypoints/openai/serving_embedding.py +631 -0
  191. vllm/entrypoints/openai/serving_engine.py +992 -0
  192. vllm/entrypoints/openai/serving_models.py +288 -0
  193. vllm/entrypoints/openai/serving_pooling.py +276 -0
  194. vllm/entrypoints/openai/serving_responses.py +1709 -0
  195. vllm/entrypoints/openai/serving_score.py +479 -0
  196. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  197. vllm/entrypoints/openai/serving_transcription.py +136 -0
  198. vllm/entrypoints/openai/speech_to_text.py +388 -0
  199. vllm/entrypoints/openai/tool_parsers/__init__.py +55 -0
  200. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  201. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  202. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  203. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  204. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  205. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  206. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +455 -0
  207. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  208. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  209. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  210. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  211. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  212. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  213. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +39 -0
  214. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  215. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  216. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +93 -0
  217. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  218. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  219. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  220. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1137 -0
  221. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  222. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  223. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  224. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  225. vllm/entrypoints/renderer.py +395 -0
  226. vllm/entrypoints/score_utils.py +232 -0
  227. vllm/entrypoints/ssl.py +75 -0
  228. vllm/entrypoints/tool.py +139 -0
  229. vllm/entrypoints/tool_server.py +206 -0
  230. vllm/entrypoints/utils.py +233 -0
  231. vllm/env_override.py +23 -0
  232. vllm/envs.py +1590 -0
  233. vllm/executor/__init__.py +0 -0
  234. vllm/executor/executor_base.py +381 -0
  235. vllm/executor/msgspec_utils.py +35 -0
  236. vllm/executor/ray_distributed_executor.py +699 -0
  237. vllm/executor/ray_utils.py +410 -0
  238. vllm/executor/uniproc_executor.py +176 -0
  239. vllm/forward_context.py +402 -0
  240. vllm/inputs/__init__.py +30 -0
  241. vllm/inputs/data.py +356 -0
  242. vllm/inputs/parse.py +151 -0
  243. vllm/inputs/preprocess.py +664 -0
  244. vllm/logger.py +229 -0
  245. vllm/logging_utils/__init__.py +10 -0
  246. vllm/logging_utils/dump_input.py +81 -0
  247. vllm/logging_utils/formatter.py +79 -0
  248. vllm/logging_utils/log_time.py +32 -0
  249. vllm/logits_process.py +119 -0
  250. vllm/logprobs.py +28 -0
  251. vllm/lora/__init__.py +0 -0
  252. vllm/lora/layers/__init__.py +34 -0
  253. vllm/lora/layers/base.py +69 -0
  254. vllm/lora/layers/base_linear.py +185 -0
  255. vllm/lora/layers/column_parallel_linear.py +609 -0
  256. vllm/lora/layers/logits_processor.py +247 -0
  257. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  258. vllm/lora/layers/replicated_linear.py +60 -0
  259. vllm/lora/layers/row_parallel_linear.py +196 -0
  260. vllm/lora/layers/utils.py +65 -0
  261. vllm/lora/layers/vocal_parallel_embedding.py +174 -0
  262. vllm/lora/lora_weights.py +199 -0
  263. vllm/lora/models.py +816 -0
  264. vllm/lora/ops/__init__.py +0 -0
  265. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  266. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  267. vllm/lora/ops/torch_ops/__init__.py +16 -0
  268. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  269. vllm/lora/ops/triton_ops/__init__.py +12 -0
  270. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  271. vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
  272. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  273. vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
  274. vllm/lora/ops/triton_ops/utils.py +126 -0
  275. vllm/lora/ops/xla_ops/__init__.py +7 -0
  276. vllm/lora/ops/xla_ops/lora_ops.py +144 -0
  277. vllm/lora/peft_helper.py +127 -0
  278. vllm/lora/punica_wrapper/__init__.py +10 -0
  279. vllm/lora/punica_wrapper/punica_base.py +458 -0
  280. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  281. vllm/lora/punica_wrapper/punica_gpu.py +272 -0
  282. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  283. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  284. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  285. vllm/lora/punica_wrapper/utils.py +136 -0
  286. vllm/lora/request.py +97 -0
  287. vllm/lora/resolver.py +85 -0
  288. vllm/lora/utils.py +246 -0
  289. vllm/lora/worker_manager.py +267 -0
  290. vllm/model_executor/__init__.py +12 -0
  291. vllm/model_executor/custom_op.py +194 -0
  292. vllm/model_executor/layers/__init__.py +0 -0
  293. vllm/model_executor/layers/activation.py +575 -0
  294. vllm/model_executor/layers/attention_layer_base.py +23 -0
  295. vllm/model_executor/layers/fla/__init__.py +8 -0
  296. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  297. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  298. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  299. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  300. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  301. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  302. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  303. vllm/model_executor/layers/fla/ops/index.py +39 -0
  304. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  305. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  306. vllm/model_executor/layers/fla/ops/op.py +39 -0
  307. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  308. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  309. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  310. vllm/model_executor/layers/fused_moe/__init__.py +89 -0
  311. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +322 -0
  312. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +141 -0
  313. vllm/model_executor/layers/fused_moe/config.py +804 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  545. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +300 -0
  546. vllm/model_executor/layers/fused_moe/cutlass_moe.py +957 -0
  547. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +362 -0
  548. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  549. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +361 -0
  550. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +274 -0
  551. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +268 -0
  552. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +300 -0
  553. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +184 -0
  554. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +993 -0
  555. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +239 -0
  556. vllm/model_executor/layers/fused_moe/fused_moe.py +1890 -0
  557. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +307 -0
  558. vllm/model_executor/layers/fused_moe/layer.py +2195 -0
  559. vllm/model_executor/layers/fused_moe/modular_kernel.py +1038 -0
  560. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  561. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  562. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  563. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  564. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +341 -0
  565. vllm/model_executor/layers/fused_moe/prepare_finalize.py +70 -0
  566. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +424 -0
  567. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  568. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  569. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +143 -0
  570. vllm/model_executor/layers/fused_moe/trtllm_moe.py +191 -0
  571. vllm/model_executor/layers/fused_moe/utils.py +274 -0
  572. vllm/model_executor/layers/layernorm.py +395 -0
  573. vllm/model_executor/layers/lightning_attn.py +661 -0
  574. vllm/model_executor/layers/linear.py +1603 -0
  575. vllm/model_executor/layers/logits_processor.py +106 -0
  576. vllm/model_executor/layers/mamba/__init__.py +0 -0
  577. vllm/model_executor/layers/mamba/abstract.py +42 -0
  578. vllm/model_executor/layers/mamba/linear_attn.py +403 -0
  579. vllm/model_executor/layers/mamba/mamba_mixer.py +466 -0
  580. vllm/model_executor/layers/mamba/mamba_mixer2.py +764 -0
  581. vllm/model_executor/layers/mamba/mamba_utils.py +186 -0
  582. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  583. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1092 -0
  584. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  585. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  586. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +242 -0
  587. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +527 -0
  588. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +724 -0
  589. vllm/model_executor/layers/mamba/ops/ssd_combined.py +238 -0
  590. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +200 -0
  591. vllm/model_executor/layers/mamba/short_conv.py +253 -0
  592. vllm/model_executor/layers/mla.py +173 -0
  593. vllm/model_executor/layers/pooler.py +719 -0
  594. vllm/model_executor/layers/quantization/__init__.py +157 -0
  595. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  596. vllm/model_executor/layers/quantization/awq.py +228 -0
  597. vllm/model_executor/layers/quantization/awq_marlin.py +554 -0
  598. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  599. vllm/model_executor/layers/quantization/base_config.py +170 -0
  600. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  601. vllm/model_executor/layers/quantization/bitsandbytes.py +627 -0
  602. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  603. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +797 -0
  604. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2074 -0
  605. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  606. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  607. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  608. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  609. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  610. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +185 -0
  611. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  612. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  613. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  614. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +157 -0
  615. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  616. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +238 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +153 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +46 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  625. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  626. vllm/model_executor/layers/quantization/experts_int8.py +223 -0
  627. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  628. vllm/model_executor/layers/quantization/fp8.py +1098 -0
  629. vllm/model_executor/layers/quantization/gguf.py +599 -0
  630. vllm/model_executor/layers/quantization/gptq.py +340 -0
  631. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  632. vllm/model_executor/layers/quantization/gptq_marlin.py +751 -0
  633. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  634. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  635. vllm/model_executor/layers/quantization/inc.py +61 -0
  636. vllm/model_executor/layers/quantization/input_quant_fp8.py +156 -0
  637. vllm/model_executor/layers/quantization/ipex_quant.py +415 -0
  638. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  639. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  640. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  641. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  642. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  643. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  644. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  645. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  646. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  647. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  648. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  649. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  650. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  651. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +161 -0
  652. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  653. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  654. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  655. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  656. vllm/model_executor/layers/quantization/kv_cache.py +143 -0
  657. vllm/model_executor/layers/quantization/modelopt.py +1596 -0
  658. vllm/model_executor/layers/quantization/moe_wna16.py +484 -0
  659. vllm/model_executor/layers/quantization/mxfp4.py +988 -0
  660. vllm/model_executor/layers/quantization/petit.py +306 -0
  661. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  662. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  663. vllm/model_executor/layers/quantization/quark/quark.py +432 -0
  664. vllm/model_executor/layers/quantization/quark/quark_moe.py +561 -0
  665. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  666. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  667. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +239 -0
  668. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  669. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  670. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  671. vllm/model_executor/layers/quantization/rtn.py +466 -0
  672. vllm/model_executor/layers/quantization/schema.py +86 -0
  673. vllm/model_executor/layers/quantization/torchao.py +214 -0
  674. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  675. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  676. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  677. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  888. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  889. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +79 -0
  890. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +248 -0
  891. vllm/model_executor/layers/quantization/utils/fp8_utils.py +949 -0
  892. vllm/model_executor/layers/quantization/utils/gptq_utils.py +146 -0
  893. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  894. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  895. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  896. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  897. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  898. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  899. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  900. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  901. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +141 -0
  902. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  903. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  904. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  905. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  906. vllm/model_executor/layers/quantization/utils/quant_utils.py +641 -0
  907. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  908. vllm/model_executor/layers/resampler.py +270 -0
  909. vllm/model_executor/layers/rotary_embedding/__init__.py +204 -0
  910. vllm/model_executor/layers/rotary_embedding/base.py +177 -0
  911. vllm/model_executor/layers/rotary_embedding/common.py +150 -0
  912. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +138 -0
  913. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  914. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  915. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  916. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  917. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  918. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  919. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  920. vllm/model_executor/layers/rotary_embedding/mrope.py +1321 -0
  921. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  922. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  923. vllm/model_executor/layers/rotary_embedding/rocm_aiter_rope_ops.py +86 -0
  924. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  925. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  926. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  927. vllm/model_executor/layers/utils.py +195 -0
  928. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  929. vllm/model_executor/model_loader/__init__.py +138 -0
  930. vllm/model_executor/model_loader/base_loader.py +52 -0
  931. vllm/model_executor/model_loader/bitsandbytes_loader.py +788 -0
  932. vllm/model_executor/model_loader/default_loader.py +277 -0
  933. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  934. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  935. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  936. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  937. vllm/model_executor/model_loader/tensorizer.py +738 -0
  938. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  939. vllm/model_executor/model_loader/tpu.py +114 -0
  940. vllm/model_executor/model_loader/utils.py +292 -0
  941. vllm/model_executor/model_loader/weight_utils.py +990 -0
  942. vllm/model_executor/models/__init__.py +33 -0
  943. vllm/model_executor/models/adapters.py +542 -0
  944. vllm/model_executor/models/aimv2.py +246 -0
  945. vllm/model_executor/models/apertus.py +579 -0
  946. vllm/model_executor/models/arcee.py +422 -0
  947. vllm/model_executor/models/arctic.py +558 -0
  948. vllm/model_executor/models/aria.py +650 -0
  949. vllm/model_executor/models/aya_vision.py +468 -0
  950. vllm/model_executor/models/baichuan.py +474 -0
  951. vllm/model_executor/models/bailing_moe.py +642 -0
  952. vllm/model_executor/models/bamba.py +514 -0
  953. vllm/model_executor/models/bert.py +665 -0
  954. vllm/model_executor/models/bert_with_rope.py +687 -0
  955. vllm/model_executor/models/blip.py +339 -0
  956. vllm/model_executor/models/blip2.py +712 -0
  957. vllm/model_executor/models/bloom.py +374 -0
  958. vllm/model_executor/models/chameleon.py +1139 -0
  959. vllm/model_executor/models/chatglm.py +476 -0
  960. vllm/model_executor/models/clip.py +407 -0
  961. vllm/model_executor/models/cohere2_vision.py +481 -0
  962. vllm/model_executor/models/commandr.py +465 -0
  963. vllm/model_executor/models/config.py +445 -0
  964. vllm/model_executor/models/dbrx.py +471 -0
  965. vllm/model_executor/models/deepseek.py +497 -0
  966. vllm/model_executor/models/deepseek_eagle.py +240 -0
  967. vllm/model_executor/models/deepseek_mtp.py +289 -0
  968. vllm/model_executor/models/deepseek_v2.py +1444 -0
  969. vllm/model_executor/models/deepseek_vl2.py +658 -0
  970. vllm/model_executor/models/dots1.py +546 -0
  971. vllm/model_executor/models/dots_ocr.py +873 -0
  972. vllm/model_executor/models/ernie45.py +43 -0
  973. vllm/model_executor/models/ernie45_moe.py +607 -0
  974. vllm/model_executor/models/ernie45_vl.py +1527 -0
  975. vllm/model_executor/models/ernie45_vl_moe.py +727 -0
  976. vllm/model_executor/models/ernie_mtp.py +268 -0
  977. vllm/model_executor/models/exaone.py +550 -0
  978. vllm/model_executor/models/exaone4.py +533 -0
  979. vllm/model_executor/models/fairseq2_llama.py +154 -0
  980. vllm/model_executor/models/falcon.py +509 -0
  981. vllm/model_executor/models/falcon_h1.py +674 -0
  982. vllm/model_executor/models/fuyu.py +399 -0
  983. vllm/model_executor/models/gemma.py +425 -0
  984. vllm/model_executor/models/gemma2.py +422 -0
  985. vllm/model_executor/models/gemma3.py +555 -0
  986. vllm/model_executor/models/gemma3_mm.py +721 -0
  987. vllm/model_executor/models/gemma3n.py +1113 -0
  988. vllm/model_executor/models/gemma3n_mm.py +761 -0
  989. vllm/model_executor/models/glm.py +23 -0
  990. vllm/model_executor/models/glm4.py +304 -0
  991. vllm/model_executor/models/glm4_1v.py +1690 -0
  992. vllm/model_executor/models/glm4_moe.py +727 -0
  993. vllm/model_executor/models/glm4_moe_mtp.py +301 -0
  994. vllm/model_executor/models/glm4v.py +654 -0
  995. vllm/model_executor/models/gpt2.py +380 -0
  996. vllm/model_executor/models/gpt_bigcode.py +344 -0
  997. vllm/model_executor/models/gpt_j.py +339 -0
  998. vllm/model_executor/models/gpt_neox.py +330 -0
  999. vllm/model_executor/models/gpt_oss.py +712 -0
  1000. vllm/model_executor/models/granite.py +489 -0
  1001. vllm/model_executor/models/granite_speech.py +794 -0
  1002. vllm/model_executor/models/granitemoe.py +550 -0
  1003. vllm/model_executor/models/granitemoehybrid.py +614 -0
  1004. vllm/model_executor/models/granitemoeshared.py +332 -0
  1005. vllm/model_executor/models/gritlm.py +262 -0
  1006. vllm/model_executor/models/grok1.py +547 -0
  1007. vllm/model_executor/models/h2ovl.py +536 -0
  1008. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1009. vllm/model_executor/models/hyperclovax_vision.py +1192 -0
  1010. vllm/model_executor/models/idefics2_vision_model.py +417 -0
  1011. vllm/model_executor/models/idefics3.py +756 -0
  1012. vllm/model_executor/models/interfaces.py +959 -0
  1013. vllm/model_executor/models/interfaces_base.py +192 -0
  1014. vllm/model_executor/models/intern_vit.py +441 -0
  1015. vllm/model_executor/models/internlm2.py +450 -0
  1016. vllm/model_executor/models/internlm2_ve.py +148 -0
  1017. vllm/model_executor/models/interns1.py +838 -0
  1018. vllm/model_executor/models/interns1_vit.py +418 -0
  1019. vllm/model_executor/models/internvl.py +1423 -0
  1020. vllm/model_executor/models/jais.py +373 -0
  1021. vllm/model_executor/models/jamba.py +591 -0
  1022. vllm/model_executor/models/jina_vl.py +144 -0
  1023. vllm/model_executor/models/keye.py +1680 -0
  1024. vllm/model_executor/models/keye_vl1_5.py +602 -0
  1025. vllm/model_executor/models/kimi_vl.py +618 -0
  1026. vllm/model_executor/models/lfm2.py +548 -0
  1027. vllm/model_executor/models/llama.py +669 -0
  1028. vllm/model_executor/models/llama4.py +746 -0
  1029. vllm/model_executor/models/llama4_eagle.py +239 -0
  1030. vllm/model_executor/models/llama_eagle.py +179 -0
  1031. vllm/model_executor/models/llama_eagle3.py +296 -0
  1032. vllm/model_executor/models/llava.py +870 -0
  1033. vllm/model_executor/models/llava_next.py +571 -0
  1034. vllm/model_executor/models/llava_next_video.py +476 -0
  1035. vllm/model_executor/models/llava_onevision.py +942 -0
  1036. vllm/model_executor/models/longcat_flash.py +715 -0
  1037. vllm/model_executor/models/longcat_flash_mtp.py +352 -0
  1038. vllm/model_executor/models/mamba.py +275 -0
  1039. vllm/model_executor/models/mamba2.py +291 -0
  1040. vllm/model_executor/models/medusa.py +169 -0
  1041. vllm/model_executor/models/midashenglm.py +792 -0
  1042. vllm/model_executor/models/mimo.py +188 -0
  1043. vllm/model_executor/models/mimo_mtp.py +280 -0
  1044. vllm/model_executor/models/minicpm.py +631 -0
  1045. vllm/model_executor/models/minicpm3.py +230 -0
  1046. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1047. vllm/model_executor/models/minicpmo.py +770 -0
  1048. vllm/model_executor/models/minicpmv.py +1784 -0
  1049. vllm/model_executor/models/minimax_text_01.py +986 -0
  1050. vllm/model_executor/models/minimax_vl_01.py +426 -0
  1051. vllm/model_executor/models/mistral3.py +628 -0
  1052. vllm/model_executor/models/mixtral.py +606 -0
  1053. vllm/model_executor/models/mllama4.py +1076 -0
  1054. vllm/model_executor/models/mlp_speculator.py +206 -0
  1055. vllm/model_executor/models/modernbert.py +374 -0
  1056. vllm/model_executor/models/module_mapping.py +72 -0
  1057. vllm/model_executor/models/molmo.py +1567 -0
  1058. vllm/model_executor/models/moonvit.py +673 -0
  1059. vllm/model_executor/models/motif.py +345 -0
  1060. vllm/model_executor/models/mpt.py +329 -0
  1061. vllm/model_executor/models/nano_nemotron_vl.py +1394 -0
  1062. vllm/model_executor/models/nemotron.py +507 -0
  1063. vllm/model_executor/models/nemotron_h.py +565 -0
  1064. vllm/model_executor/models/nemotron_nas.py +481 -0
  1065. vllm/model_executor/models/nemotron_vl.py +652 -0
  1066. vllm/model_executor/models/nvlm_d.py +203 -0
  1067. vllm/model_executor/models/olmo.py +404 -0
  1068. vllm/model_executor/models/olmo2.py +439 -0
  1069. vllm/model_executor/models/olmoe.py +483 -0
  1070. vllm/model_executor/models/opt.py +412 -0
  1071. vllm/model_executor/models/orion.py +348 -0
  1072. vllm/model_executor/models/ovis.py +559 -0
  1073. vllm/model_executor/models/ovis2_5.py +642 -0
  1074. vllm/model_executor/models/paligemma.py +411 -0
  1075. vllm/model_executor/models/persimmon.py +343 -0
  1076. vllm/model_executor/models/phi.py +356 -0
  1077. vllm/model_executor/models/phi3.py +19 -0
  1078. vllm/model_executor/models/phi3v.py +698 -0
  1079. vllm/model_executor/models/phi4_multimodal.py +1475 -0
  1080. vllm/model_executor/models/phi4mm.py +1279 -0
  1081. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1082. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1083. vllm/model_executor/models/phimoe.py +679 -0
  1084. vllm/model_executor/models/pixtral.py +1345 -0
  1085. vllm/model_executor/models/plamo2.py +978 -0
  1086. vllm/model_executor/models/qwen.py +361 -0
  1087. vllm/model_executor/models/qwen2.py +523 -0
  1088. vllm/model_executor/models/qwen2_5_omni_thinker.py +984 -0
  1089. vllm/model_executor/models/qwen2_5_vl.py +1481 -0
  1090. vllm/model_executor/models/qwen2_audio.py +489 -0
  1091. vllm/model_executor/models/qwen2_moe.py +558 -0
  1092. vllm/model_executor/models/qwen2_rm.py +122 -0
  1093. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1094. vllm/model_executor/models/qwen3.py +341 -0
  1095. vllm/model_executor/models/qwen3_moe.py +692 -0
  1096. vllm/model_executor/models/qwen3_next.py +1266 -0
  1097. vllm/model_executor/models/qwen3_next_mtp.py +281 -0
  1098. vllm/model_executor/models/qwen3_vl.py +1613 -0
  1099. vllm/model_executor/models/qwen3_vl_moe.py +358 -0
  1100. vllm/model_executor/models/qwen_vl.py +795 -0
  1101. vllm/model_executor/models/radio.py +576 -0
  1102. vllm/model_executor/models/registry.py +990 -0
  1103. vllm/model_executor/models/roberta.py +252 -0
  1104. vllm/model_executor/models/rvl.py +103 -0
  1105. vllm/model_executor/models/seed_oss.py +485 -0
  1106. vllm/model_executor/models/siglip.py +540 -0
  1107. vllm/model_executor/models/siglip2navit.py +689 -0
  1108. vllm/model_executor/models/skyworkr1v.py +911 -0
  1109. vllm/model_executor/models/smolvlm.py +44 -0
  1110. vllm/model_executor/models/solar.py +504 -0
  1111. vllm/model_executor/models/stablelm.py +341 -0
  1112. vllm/model_executor/models/starcoder2.py +354 -0
  1113. vllm/model_executor/models/step3_text.py +510 -0
  1114. vllm/model_executor/models/step3_vl.py +1072 -0
  1115. vllm/model_executor/models/swin.py +475 -0
  1116. vllm/model_executor/models/tarsier.py +639 -0
  1117. vllm/model_executor/models/telechat2.py +151 -0
  1118. vllm/model_executor/models/teleflm.py +79 -0
  1119. vllm/model_executor/models/terratorch.py +294 -0
  1120. vllm/model_executor/models/transformers.py +948 -0
  1121. vllm/model_executor/models/ultravox.py +654 -0
  1122. vllm/model_executor/models/utils.py +808 -0
  1123. vllm/model_executor/models/vision.py +404 -0
  1124. vllm/model_executor/models/voxtral.py +786 -0
  1125. vllm/model_executor/models/whisper.py +963 -0
  1126. vllm/model_executor/models/zamba2.py +960 -0
  1127. vllm/model_executor/parameter.py +620 -0
  1128. vllm/model_executor/utils.py +86 -0
  1129. vllm/model_executor/warmup/__init__.py +0 -0
  1130. vllm/model_executor/warmup/deep_gemm_warmup.py +230 -0
  1131. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1132. vllm/multimodal/__init__.py +33 -0
  1133. vllm/multimodal/audio.py +116 -0
  1134. vllm/multimodal/base.py +27 -0
  1135. vllm/multimodal/cache.py +697 -0
  1136. vllm/multimodal/evs.py +273 -0
  1137. vllm/multimodal/hasher.py +102 -0
  1138. vllm/multimodal/image.py +130 -0
  1139. vllm/multimodal/inputs.py +987 -0
  1140. vllm/multimodal/parse.py +511 -0
  1141. vllm/multimodal/processing.py +2148 -0
  1142. vllm/multimodal/profiling.py +284 -0
  1143. vllm/multimodal/registry.py +345 -0
  1144. vllm/multimodal/utils.py +503 -0
  1145. vllm/multimodal/video.py +319 -0
  1146. vllm/outputs.py +324 -0
  1147. vllm/platforms/__init__.py +263 -0
  1148. vllm/platforms/cpu.py +340 -0
  1149. vllm/platforms/cuda.py +668 -0
  1150. vllm/platforms/interface.py +620 -0
  1151. vllm/platforms/rocm.py +497 -0
  1152. vllm/platforms/tpu.py +233 -0
  1153. vllm/platforms/xpu.py +243 -0
  1154. vllm/plugins/__init__.py +72 -0
  1155. vllm/plugins/io_processors/__init__.py +68 -0
  1156. vllm/plugins/io_processors/interface.py +67 -0
  1157. vllm/plugins/lora_resolvers/README.md +16 -0
  1158. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1159. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1160. vllm/pooling_params.py +191 -0
  1161. vllm/profiler/__init__.py +0 -0
  1162. vllm/profiler/layerwise_profile.py +375 -0
  1163. vllm/profiler/utils.py +148 -0
  1164. vllm/py.typed +2 -0
  1165. vllm/ray/__init__.py +0 -0
  1166. vllm/ray/lazy_utils.py +22 -0
  1167. vllm/ray/ray_env.py +72 -0
  1168. vllm/reasoning/__init__.py +29 -0
  1169. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1170. vllm/reasoning/basic_parsers.py +156 -0
  1171. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1172. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1173. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1174. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1175. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1176. vllm/reasoning/mistral_reasoning_parser.py +56 -0
  1177. vllm/reasoning/qwen3_reasoning_parser.py +72 -0
  1178. vllm/reasoning/seedoss_reasoning_parser.py +28 -0
  1179. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1180. vllm/sampling_params.py +593 -0
  1181. vllm/scalar_type.py +349 -0
  1182. vllm/scripts.py +15 -0
  1183. vllm/sequence.py +103 -0
  1184. vllm/tasks.py +11 -0
  1185. vllm/test_utils.py +129 -0
  1186. vllm/third_party/__init__.py +0 -0
  1187. vllm/third_party/pynvml.py +6140 -0
  1188. vllm/tracing.py +136 -0
  1189. vllm/transformers_utils/__init__.py +24 -0
  1190. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1191. vllm/transformers_utils/chat_templates/registry.py +70 -0
  1192. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1193. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1194. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1195. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1196. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1197. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1198. vllm/transformers_utils/config.py +1102 -0
  1199. vllm/transformers_utils/config_parser_base.py +20 -0
  1200. vllm/transformers_utils/configs/__init__.py +63 -0
  1201. vllm/transformers_utils/configs/arctic.py +207 -0
  1202. vllm/transformers_utils/configs/chatglm.py +72 -0
  1203. vllm/transformers_utils/configs/deepseek_v3.py +101 -0
  1204. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1205. vllm/transformers_utils/configs/dotsocr.py +69 -0
  1206. vllm/transformers_utils/configs/eagle.py +84 -0
  1207. vllm/transformers_utils/configs/falcon.py +90 -0
  1208. vllm/transformers_utils/configs/jais.py +237 -0
  1209. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1210. vllm/transformers_utils/configs/medusa.py +63 -0
  1211. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1212. vllm/transformers_utils/configs/mistral.py +165 -0
  1213. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1214. vllm/transformers_utils/configs/moonvit.py +33 -0
  1215. vllm/transformers_utils/configs/nemotron.py +205 -0
  1216. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1217. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1218. vllm/transformers_utils/configs/olmo3.py +80 -0
  1219. vllm/transformers_utils/configs/ovis.py +176 -0
  1220. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1221. vllm/transformers_utils/configs/radio.py +91 -0
  1222. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1223. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1224. vllm/transformers_utils/configs/speculators/base.py +111 -0
  1225. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1226. vllm/transformers_utils/configs/ultravox.py +116 -0
  1227. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1228. vllm/transformers_utils/dynamic_module.py +60 -0
  1229. vllm/transformers_utils/processor.py +299 -0
  1230. vllm/transformers_utils/processors/__init__.py +16 -0
  1231. vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
  1232. vllm/transformers_utils/processors/ovis.py +420 -0
  1233. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1234. vllm/transformers_utils/runai_utils.py +104 -0
  1235. vllm/transformers_utils/s3_utils.py +93 -0
  1236. vllm/transformers_utils/tokenizer.py +292 -0
  1237. vllm/transformers_utils/tokenizer_base.py +154 -0
  1238. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1239. vllm/transformers_utils/tokenizers/mistral.py +521 -0
  1240. vllm/transformers_utils/utils.py +108 -0
  1241. vllm/triton_utils/__init__.py +16 -0
  1242. vllm/triton_utils/importing.py +96 -0
  1243. vllm/usage/__init__.py +0 -0
  1244. vllm/usage/usage_lib.py +259 -0
  1245. vllm/utils/__init__.py +3566 -0
  1246. vllm/utils/deep_gemm.py +319 -0
  1247. vllm/utils/flashinfer.py +443 -0
  1248. vllm/utils/jsontree.py +178 -0
  1249. vllm/utils/tensor_schema.py +235 -0
  1250. vllm/v1/__init__.py +0 -0
  1251. vllm/v1/attention/__init__.py +0 -0
  1252. vllm/v1/attention/backends/__init__.py +0 -0
  1253. vllm/v1/attention/backends/cpu_attn.py +919 -0
  1254. vllm/v1/attention/backends/flash_attn.py +795 -0
  1255. vllm/v1/attention/backends/flashinfer.py +1181 -0
  1256. vllm/v1/attention/backends/flex_attention.py +861 -0
  1257. vllm/v1/attention/backends/gdn_attn.py +332 -0
  1258. vllm/v1/attention/backends/linear_attn.py +67 -0
  1259. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1260. vllm/v1/attention/backends/mamba2_attn.py +232 -0
  1261. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1262. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1263. vllm/v1/attention/backends/mla/common.py +1783 -0
  1264. vllm/v1/attention/backends/mla/cutlass_mla.py +248 -0
  1265. vllm/v1/attention/backends/mla/flashattn_mla.py +271 -0
  1266. vllm/v1/attention/backends/mla/flashinfer_mla.py +114 -0
  1267. vllm/v1/attention/backends/mla/flashmla.py +203 -0
  1268. vllm/v1/attention/backends/mla/flashmla_sparse.py +544 -0
  1269. vllm/v1/attention/backends/mla/indexer.py +342 -0
  1270. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1271. vllm/v1/attention/backends/mla/triton_mla.py +177 -0
  1272. vllm/v1/attention/backends/pallas.py +409 -0
  1273. vllm/v1/attention/backends/rocm_aiter_fa.py +549 -0
  1274. vllm/v1/attention/backends/rocm_attn.py +426 -0
  1275. vllm/v1/attention/backends/short_conv_attn.py +94 -0
  1276. vllm/v1/attention/backends/tree_attn.py +451 -0
  1277. vllm/v1/attention/backends/triton_attn.py +361 -0
  1278. vllm/v1/attention/backends/utils.py +990 -0
  1279. vllm/v1/attention/backends/xformers.py +438 -0
  1280. vllm/v1/core/__init__.py +0 -0
  1281. vllm/v1/core/block_pool.py +416 -0
  1282. vllm/v1/core/encoder_cache_manager.py +333 -0
  1283. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1284. vllm/v1/core/kv_cache_manager.py +399 -0
  1285. vllm/v1/core/kv_cache_utils.py +1291 -0
  1286. vllm/v1/core/sched/__init__.py +0 -0
  1287. vllm/v1/core/sched/async_scheduler.py +47 -0
  1288. vllm/v1/core/sched/interface.py +158 -0
  1289. vllm/v1/core/sched/output.py +166 -0
  1290. vllm/v1/core/sched/request_queue.py +224 -0
  1291. vllm/v1/core/sched/scheduler.py +1296 -0
  1292. vllm/v1/core/sched/utils.py +69 -0
  1293. vllm/v1/core/single_type_kv_cache_manager.py +671 -0
  1294. vllm/v1/cudagraph_dispatcher.py +125 -0
  1295. vllm/v1/engine/__init__.py +203 -0
  1296. vllm/v1/engine/async_llm.py +742 -0
  1297. vllm/v1/engine/coordinator.py +357 -0
  1298. vllm/v1/engine/core.py +1235 -0
  1299. vllm/v1/engine/core_client.py +1334 -0
  1300. vllm/v1/engine/detokenizer.py +349 -0
  1301. vllm/v1/engine/exceptions.py +17 -0
  1302. vllm/v1/engine/llm_engine.py +370 -0
  1303. vllm/v1/engine/logprobs.py +201 -0
  1304. vllm/v1/engine/output_processor.py +576 -0
  1305. vllm/v1/engine/parallel_sampling.py +133 -0
  1306. vllm/v1/engine/processor.py +545 -0
  1307. vllm/v1/engine/utils.py +860 -0
  1308. vllm/v1/executor/__init__.py +0 -0
  1309. vllm/v1/executor/abstract.py +137 -0
  1310. vllm/v1/executor/multiproc_executor.py +726 -0
  1311. vllm/v1/executor/ray_distributed_executor.py +108 -0
  1312. vllm/v1/executor/utils.py +23 -0
  1313. vllm/v1/kv_cache_interface.py +375 -0
  1314. vllm/v1/kv_offload/__init__.py +0 -0
  1315. vllm/v1/kv_offload/abstract.py +165 -0
  1316. vllm/v1/kv_offload/backend.py +96 -0
  1317. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1318. vllm/v1/kv_offload/backends/cpu.py +61 -0
  1319. vllm/v1/kv_offload/cpu.py +75 -0
  1320. vllm/v1/kv_offload/factory.py +56 -0
  1321. vllm/v1/kv_offload/lru_manager.py +132 -0
  1322. vllm/v1/kv_offload/mediums.py +39 -0
  1323. vllm/v1/kv_offload/spec.py +61 -0
  1324. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1325. vllm/v1/kv_offload/worker/cpu_gpu.py +171 -0
  1326. vllm/v1/kv_offload/worker/worker.py +142 -0
  1327. vllm/v1/metrics/__init__.py +0 -0
  1328. vllm/v1/metrics/loggers.py +741 -0
  1329. vllm/v1/metrics/prometheus.py +82 -0
  1330. vllm/v1/metrics/ray_wrappers.py +152 -0
  1331. vllm/v1/metrics/reader.py +246 -0
  1332. vllm/v1/metrics/stats.py +257 -0
  1333. vllm/v1/outputs.py +161 -0
  1334. vllm/v1/pool/__init__.py +0 -0
  1335. vllm/v1/pool/metadata.py +77 -0
  1336. vllm/v1/request.py +241 -0
  1337. vllm/v1/sample/__init__.py +0 -0
  1338. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1339. vllm/v1/sample/logits_processor/builtin.py +275 -0
  1340. vllm/v1/sample/logits_processor/interface.py +97 -0
  1341. vllm/v1/sample/logits_processor/state.py +161 -0
  1342. vllm/v1/sample/metadata.py +43 -0
  1343. vllm/v1/sample/ops/__init__.py +0 -0
  1344. vllm/v1/sample/ops/bad_words.py +39 -0
  1345. vllm/v1/sample/ops/logprobs.py +26 -0
  1346. vllm/v1/sample/ops/penalties.py +43 -0
  1347. vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
  1348. vllm/v1/sample/rejection_sampler.py +623 -0
  1349. vllm/v1/sample/sampler.py +285 -0
  1350. vllm/v1/sample/tpu/__init__.py +0 -0
  1351. vllm/v1/sample/tpu/metadata.py +124 -0
  1352. vllm/v1/sample/tpu/sampler.py +213 -0
  1353. vllm/v1/serial_utils.py +423 -0
  1354. vllm/v1/spec_decode/__init__.py +0 -0
  1355. vllm/v1/spec_decode/eagle.py +1011 -0
  1356. vllm/v1/spec_decode/medusa.py +66 -0
  1357. vllm/v1/spec_decode/metadata.py +62 -0
  1358. vllm/v1/spec_decode/metrics.py +211 -0
  1359. vllm/v1/spec_decode/ngram_proposer.py +276 -0
  1360. vllm/v1/spec_decode/utils.py +14 -0
  1361. vllm/v1/structured_output/__init__.py +295 -0
  1362. vllm/v1/structured_output/backend_guidance.py +245 -0
  1363. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1364. vllm/v1/structured_output/backend_outlines.py +320 -0
  1365. vllm/v1/structured_output/backend_types.py +134 -0
  1366. vllm/v1/structured_output/backend_xgrammar.py +327 -0
  1367. vllm/v1/structured_output/request.py +86 -0
  1368. vllm/v1/structured_output/utils.py +454 -0
  1369. vllm/v1/utils.py +396 -0
  1370. vllm/v1/worker/__init__.py +0 -0
  1371. vllm/v1/worker/block_table.py +210 -0
  1372. vllm/v1/worker/cpu_model_runner.py +175 -0
  1373. vllm/v1/worker/cpu_worker.py +156 -0
  1374. vllm/v1/worker/gpu_input_batch.py +863 -0
  1375. vllm/v1/worker/gpu_model_runner.py +4160 -0
  1376. vllm/v1/worker/gpu_ubatch_wrapper.py +399 -0
  1377. vllm/v1/worker/gpu_worker.py +710 -0
  1378. vllm/v1/worker/kv_connector_model_runner_mixin.py +132 -0
  1379. vllm/v1/worker/lora_model_runner_mixin.py +183 -0
  1380. vllm/v1/worker/tpu_input_batch.py +587 -0
  1381. vllm/v1/worker/tpu_model_runner.py +1946 -0
  1382. vllm/v1/worker/tpu_worker.py +346 -0
  1383. vllm/v1/worker/ubatch_splitting.py +192 -0
  1384. vllm/v1/worker/ubatch_utils.py +27 -0
  1385. vllm/v1/worker/ubatching.py +224 -0
  1386. vllm/v1/worker/utils.py +344 -0
  1387. vllm/v1/worker/worker_base.py +65 -0
  1388. vllm/v1/worker/xpu_model_runner.py +57 -0
  1389. vllm/v1/worker/xpu_worker.py +179 -0
  1390. vllm/version.py +41 -0
  1391. vllm/vllm_flash_attn/.gitkeep +0 -0
  1392. vllm/worker/__init__.py +0 -0
  1393. vllm/worker/worker_base.py +279 -0
  1394. vllm_cpu-0.11.0.post2.dist-info/METADATA +348 -0
  1395. vllm_cpu-0.11.0.post2.dist-info/RECORD +1398 -0
  1396. vllm_cpu-0.11.0.post2.dist-info/WHEEL +5 -0
  1397. vllm_cpu-0.11.0.post2.dist-info/entry_points.txt +5 -0
  1398. vllm_cpu-0.11.0.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1603 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import itertools
5
+ from abc import abstractmethod
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch.nn.parameter import Parameter, UninitializedParameter
11
+
12
+ from vllm.distributed import (divide, get_tensor_model_parallel_rank,
13
+ get_tensor_model_parallel_world_size,
14
+ split_tensor_along_last_dim,
15
+ tensor_model_parallel_all_gather,
16
+ tensor_model_parallel_all_reduce)
17
+ from vllm.logger import init_logger
18
+ from vllm.model_executor.custom_op import CustomOp
19
+ from vllm.model_executor.layers.quantization.base_config import (
20
+ QuantizationConfig, QuantizeMethodBase)
21
+ from vllm.model_executor.layers.utils import dispatch_unquantized_gemm
22
+ # yapf: disable
23
+ from vllm.model_executor.parameter import (BasevLLMParameter,
24
+ BlockQuantScaleParameter,
25
+ ModelWeightParameter,
26
+ PackedColumnParameter,
27
+ PackedvLLMParameter,
28
+ PerTensorScaleParameter,
29
+ RowvLLMParameter)
30
+ # yapf: enable
31
+ from vllm.model_executor.utils import set_weight_attrs
32
+ from vllm.platforms import current_platform
33
+ from vllm.utils import GiB_bytes
34
+
35
+ logger = init_logger(__name__)
36
+
37
+ WEIGHT_LOADER_V2_SUPPORTED = [
38
+ "UnquantizedLinearMethod",
39
+ "CompressedTensorsLinearMethod",
40
+ "CompressedTensorsLinearTransformMethod",
41
+ "BitBLASLinearMethod",
42
+ "GPTQBitBLASLinearMethod",
43
+ "AWQMarlinLinearMethod",
44
+ "AWQLinearMethod",
45
+ "GPTQMarlinLinearMethod",
46
+ "Fp8LinearMethod",
47
+ "MarlinLinearMethod",
48
+ "GPTQMarlin24LinearMethod",
49
+ "TPUInt8LinearMethod",
50
+ "GPTQLinearMethod",
51
+ "FBGEMMFp8LinearMethod",
52
+ "ModelOptFp8LinearMethod",
53
+ "IPEXAWQLinearMethod",
54
+ "IPEXGPTQLinearMethod",
55
+ "HQQMarlinMethod",
56
+ "QuarkLinearMethod",
57
+ "ModelOptNvFp4LinearMethod",
58
+ "PetitNvFp4LinearMethod",
59
+ ]
60
+
61
+
62
+ def adjust_bitblas_shard(param, shard_size, shard_offset):
63
+ bitblas_tile_size = getattr(param, "bitblas_tile_size", None)
64
+ if bitblas_tile_size is not None:
65
+ return (shard_size // bitblas_tile_size,
66
+ shard_offset // bitblas_tile_size)
67
+
68
+ return shard_size, shard_offset
69
+
70
+
71
+ def adjust_marlin_shard(param, shard_size, shard_offset):
72
+ marlin_tile_size = getattr(param, "marlin_tile_size", None)
73
+ if marlin_tile_size is None:
74
+ return shard_size, shard_offset
75
+
76
+ return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
77
+
78
+
79
+ def adjust_bitsandbytes_4bit_shard(param: Parameter,
80
+ shard_offsets: dict[str, tuple[int, int]],
81
+ loaded_shard_id: str) -> tuple[int, int]:
82
+ """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
83
+
84
+ total, _ = shard_offsets["total"]
85
+ orig_offset, orig_size = shard_offsets[loaded_shard_id]
86
+
87
+ quantized_total = param.data.shape[0]
88
+ quantized_offset = orig_offset * quantized_total // total
89
+ quantized_size = orig_size * quantized_total // total
90
+
91
+ return quantized_size, quantized_offset
92
+
93
+
94
+ def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
95
+ """For fused modules (QKV and MLP) we have an array of length
96
+ N that holds 1 scale for each "logical" matrix. So the param
97
+ is an array of length N. The loaded_weight corresponds to
98
+ one of the shards on disk. Here, we slice the param based on
99
+ the shard_id for loading.
100
+ """
101
+ qkv_idxs = {"q": 0, "k": 1, "v": 2}
102
+
103
+ if isinstance(shard_id, str):
104
+ shard_id = qkv_idxs[shard_id]
105
+ elif not isinstance(shard_id, int):
106
+ raise ValueError(f"Unknown Shard Id {shard_id}")
107
+
108
+ # AutoFP8 scales do not have a shape
109
+ # compressed-tensors scales do have a shape
110
+ if len(loaded_weight.shape) != 0:
111
+ assert loaded_weight.shape[0] == 1
112
+ loaded_weight = loaded_weight[0]
113
+
114
+ return param[shard_id], loaded_weight
115
+
116
+
117
+ # TODO(Isotr0py): We might need a more flexible structure to handle
118
+ # bitsandbytes shard offsets.
119
+ def left_shift_bitsandbytes_4bit_shard(bnb_weight_attrs: dict[str, Any]):
120
+ """
121
+ Separate the BitsAndBytes 4-bit shard.
122
+
123
+ For example, given bnb weight attributes as below:
124
+ {
125
+ 'bnb_shard_offsets': array([0, 4, 8, 16]),
126
+ 'bnb_quant_state': {0: ..., 1: ..., 2: ...},
127
+ }
128
+
129
+ The function will return:
130
+ {
131
+ 'bnb_shard_offsets': array([0, 4]),
132
+ 'bnb_quant_state': {0: ...},
133
+ }
134
+ and
135
+ {
136
+ 'bnb_shard_offsets': array([0, 4, 12]),
137
+ 'bnb_quant_state': {0: ..., 1: ...},
138
+ }
139
+ """
140
+ shard_offsets = bnb_weight_attrs["bnb_shard_offsets"]
141
+ offset_l = shard_offsets[:2]
142
+ offset_r = shard_offsets[1:] - shard_offsets[1]
143
+ quant_state_l = {0: bnb_weight_attrs["bnb_quant_state"][0]}
144
+ quant_state_r = {
145
+ i - 1: bnb_weight_attrs["bnb_quant_state"][i]
146
+ for i in range(1,
147
+ len(shard_offsets) - 1)
148
+ }
149
+ left = dict(bnb_shard_offsets=offset_l, bnb_quant_state=quant_state_l)
150
+ right = dict(bnb_shard_offsets=offset_r, bnb_quant_state=quant_state_r)
151
+ return left, right
152
+
153
+
154
+ class LinearMethodBase(QuantizeMethodBase):
155
+ """Base class for different (maybe quantized) linear methods."""
156
+
157
+ @abstractmethod
158
+ def create_weights(self, layer: torch.nn.Module,
159
+ input_size_per_partition: int,
160
+ output_partition_sizes: list[int], input_size: int,
161
+ output_size: int, params_dtype: torch.dtype,
162
+ **extra_weight_attrs):
163
+ """Create weights for a linear layer.
164
+ The weights will be set as attributes of the layer.
165
+
166
+ Args:
167
+ layer: The layer that is using the LinearMethodBase factory.
168
+ input_size_per_partition: Size of the weight input dim on rank X.
169
+ output_partition_sizes: Sizes of the output dim of each logical
170
+ weight on rank X. E.g., output_partition_sizes for QKVLinear
171
+ is a list contains the width of Wq, Wk, Wv on rank X.
172
+ input_size: Size of the input dim of the weight across all ranks.
173
+ output_size: Size of the output dim of the weight across all ranks.
174
+ params_dtype: Datatype of the parameters.
175
+ """
176
+ raise NotImplementedError
177
+
178
+ @abstractmethod
179
+ def apply(self,
180
+ layer: torch.nn.Module,
181
+ x: torch.Tensor,
182
+ bias: Optional[torch.Tensor] = None) -> torch.Tensor:
183
+ """Apply the weights in layer to the input tensor.
184
+ Expects create_weights to have been called before on the layer."""
185
+ raise NotImplementedError
186
+
187
+
188
+ class UnquantizedLinearMethod(LinearMethodBase):
189
+ """Linear method without quantization."""
190
+
191
+ def create_weights(self, layer: torch.nn.Module,
192
+ input_size_per_partition: int,
193
+ output_partition_sizes: list[int], input_size: int,
194
+ output_size: int, params_dtype: torch.dtype,
195
+ **extra_weight_attrs):
196
+ # This method creates unquantized linear weights.
197
+ # The weights are not quantized, and they are not sharded.
198
+ # The amount of memory allocated for the weights is
199
+ # sum(output_partition_sizes) * input_size_per_partition.
200
+ try:
201
+ weight_loader = extra_weight_attrs.pop("weight_loader")
202
+ weight = ModelWeightParameter(data=torch.empty(
203
+ sum(output_partition_sizes),
204
+ input_size_per_partition,
205
+ dtype=params_dtype),
206
+ input_dim=1,
207
+ output_dim=0,
208
+ weight_loader=weight_loader)
209
+ except torch.cuda.OutOfMemoryError as e:
210
+ logger.error("Failed to create unquantized linear weights: %s", e)
211
+ if torch.cuda.is_available():
212
+ logger.debug("CUDA device: %s", torch.cuda.current_device())
213
+ logger.debug("Allocated: %.2f GiB",
214
+ torch.cuda.memory_allocated() / GiB_bytes)
215
+ logger.debug("Reserved: %.2f GiB",
216
+ torch.cuda.memory_reserved() / GiB_bytes)
217
+ raise RuntimeError(
218
+ "Failed to create unquantized linear weights. "
219
+ "This may be caused by insufficient memory to allocate "
220
+ "the weight.") from e
221
+
222
+ layer.register_parameter("weight", weight)
223
+ set_weight_attrs(weight, extra_weight_attrs)
224
+
225
+ def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
226
+ if current_platform.is_cpu():
227
+ from vllm.model_executor.layers.utils import (
228
+ dispatch_cpu_unquantized_gemm)
229
+ dispatch_cpu_unquantized_gemm(layer, remove_weight=True)
230
+
231
+ def apply(self,
232
+ layer: torch.nn.Module,
233
+ x: torch.Tensor,
234
+ bias: Optional[torch.Tensor] = None) -> torch.Tensor:
235
+
236
+ return dispatch_unquantized_gemm()(layer, x, layer.weight, bias)
237
+
238
+
239
+ class LinearBase(CustomOp):
240
+ """Base linear layer.
241
+
242
+ Args:
243
+ input_size: input dimension of the linear layer.
244
+ output_size: output dimension of the linear layer.
245
+ skip_bias_add: If true, skip adding bias but instead return it.
246
+ params_dtype: Data type for the parameters.
247
+ quant_config: Quantization configure.
248
+ prefix: Prefix for parameter names.
249
+ return_bias: If true, return bias together with outputs in forward pass.
250
+ disable_tp: If true, tensor parallelism will be disabled for this layer.
251
+ """
252
+
253
+ def __init__(
254
+ self,
255
+ input_size: int,
256
+ output_size: int,
257
+ skip_bias_add: bool = False,
258
+ params_dtype: Optional[torch.dtype] = None,
259
+ quant_config: Optional[QuantizationConfig] = None,
260
+ prefix: str = "",
261
+ *,
262
+ return_bias: bool = True,
263
+ disable_tp: bool = False,
264
+ ):
265
+ super().__init__()
266
+
267
+ # Keep input parameters
268
+ self.input_size = input_size
269
+ self.output_size = output_size
270
+ self.skip_bias_add = skip_bias_add
271
+ if params_dtype is None:
272
+ params_dtype = torch.get_default_dtype()
273
+ self.params_dtype = params_dtype
274
+ self.quant_config = quant_config
275
+ self.prefix = prefix
276
+ if quant_config is None:
277
+ self.quant_method: Optional[
278
+ QuantizeMethodBase] = UnquantizedLinearMethod()
279
+ else:
280
+ self.quant_method = quant_config.get_quant_method(self,
281
+ prefix=prefix)
282
+ self.return_bias = return_bias
283
+ self.disable_tp = disable_tp
284
+ self.tp_rank = (get_tensor_model_parallel_rank()
285
+ if not disable_tp else 0)
286
+ self.tp_size = (get_tensor_model_parallel_world_size()
287
+ if not disable_tp else 1)
288
+
289
+ def update_param_tp_status(self):
290
+ for param in self.parameters():
291
+ if isinstance(param, BasevLLMParameter):
292
+ param.tp_rank = self.tp_rank
293
+ param.tp_size = self.tp_size
294
+
295
+
296
+ @CustomOp.register("replicated_linear")
297
+ class ReplicatedLinear(LinearBase):
298
+ """Replicated linear layer.
299
+
300
+ Args:
301
+ input_size: input dimension of the linear layer.
302
+ output_size: output dimension of the linear layer.
303
+ bias: If true, add bias.
304
+ skip_bias_add: If true, skip adding bias but instead return it.
305
+ params_dtype: Data type for the parameters.
306
+ quant_config: Quantization configure.
307
+ prefix: The name of the layer in the state dict, including all parents
308
+ (e.g. model.layers.0.qkv_proj)
309
+ return_bias: If true, return bias together with outputs in forward pass.
310
+ disable_tp: Take no effect for replicated linear layers.
311
+ """
312
+
313
+ def __init__(
314
+ self,
315
+ input_size: int,
316
+ output_size: int,
317
+ bias: bool = True,
318
+ skip_bias_add: bool = False,
319
+ params_dtype: Optional[torch.dtype] = None,
320
+ quant_config: Optional[QuantizationConfig] = None,
321
+ prefix: str = "",
322
+ *,
323
+ return_bias: bool = True,
324
+ disable_tp: bool = False,
325
+ ):
326
+ # If MergedReplicatedLinear, use output size of each partition.
327
+ if hasattr(self, "output_sizes"):
328
+ self.output_partition_sizes = self.output_sizes
329
+ else:
330
+ self.output_partition_sizes = [output_size]
331
+
332
+ super().__init__(input_size,
333
+ output_size,
334
+ skip_bias_add,
335
+ params_dtype,
336
+ quant_config,
337
+ prefix=prefix,
338
+ return_bias=return_bias,
339
+ disable_tp=disable_tp)
340
+
341
+ # All the linear layer supports quant method.
342
+ assert self.quant_method is not None
343
+ self.quant_method.create_weights(self,
344
+ self.input_size,
345
+ self.output_partition_sizes,
346
+ self.input_size,
347
+ self.output_size,
348
+ self.params_dtype,
349
+ weight_loader=self.weight_loader)
350
+
351
+ if bias:
352
+ self.bias = Parameter(
353
+ torch.empty(self.output_size, dtype=self.params_dtype))
354
+ set_weight_attrs(self.bias, {
355
+ "output_dim": 0,
356
+ "weight_loader": self.weight_loader,
357
+ })
358
+ else:
359
+ self.register_parameter("bias", None)
360
+
361
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
362
+ # If the weight on disk does not have a shape, give it one
363
+ # (such scales for AutoFp8).
364
+ # Special case for GGUF
365
+
366
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
367
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
368
+ if is_gguf_weight_type:
369
+ param.weight_type = loaded_weight.item()
370
+
371
+ # Materialize GGUF UninitializedParameter
372
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
373
+ param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)
374
+
375
+ if len(loaded_weight.shape) == 0:
376
+ loaded_weight = loaded_weight.reshape(1)
377
+
378
+ assert param.size() == loaded_weight.size(), (
379
+ f"Tried to load weights of size {loaded_weight.size()}"
380
+ f"to a parameter of size {param.size()}")
381
+ param.data.copy_(loaded_weight)
382
+
383
+ def forward(
384
+ self,
385
+ x: torch.Tensor,
386
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
387
+ bias = self.bias if not self.skip_bias_add else None
388
+ assert self.quant_method is not None
389
+
390
+ output = self.quant_method.apply(self, x, bias)
391
+ output_bias = self.bias if self.skip_bias_add else None
392
+
393
+ if not self.return_bias:
394
+ return output
395
+ return output, output_bias
396
+
397
+ def extra_repr(self) -> str:
398
+ s = f"in_features={self.input_size}"
399
+ s += f", output_features={self.output_size}"
400
+ s += f", bias={self.bias is not None}"
401
+ return s
402
+
403
+
404
+ @CustomOp.register("column_parallel_linear")
405
+ class ColumnParallelLinear(LinearBase):
406
+ """Linear layer with column parallelism.
407
+
408
+ The linear layer is defined as Y = XA + b. A is parallelized along
409
+ its second dimension as A = [A_1, ..., A_p].
410
+
411
+ Args:
412
+ input_size: first dimension of matrix A.
413
+ output_size: second dimension of matrix A.
414
+ bias: If true, add bias.
415
+ gather_output: If true, call all-gather on output and make Y available
416
+ to all GPUs, otherwise, every GPU will have its output
417
+ which is Y_i = XA_i
418
+ skip_bias_add: This was added to enable performance optimizations where
419
+ bias can be fused with other element-wise operations. we
420
+ skip adding bias but instead return it.
421
+ params_dtype: Data type for the parameters.
422
+ quant_config: Quantization configure.
423
+ output_sizes: list of output sizes packed into one output, like for QKV
424
+ the list would be size 3.
425
+ prefix: The name of the layer in the state dict, including all parents
426
+ (e.g. model.layers.0.qkv_proj)
427
+ return_bias: If true, return bias together with outputs in forward pass.
428
+ disable_tp: If true, weights matrix won't be sharded through tp rank.
429
+ """
430
+
431
+ def __init__(
432
+ self,
433
+ input_size: int,
434
+ output_size: int,
435
+ bias: bool = True,
436
+ gather_output: bool = False,
437
+ skip_bias_add: bool = False,
438
+ params_dtype: Optional[torch.dtype] = None,
439
+ quant_config: Optional[QuantizationConfig] = None,
440
+ output_sizes: Optional[list[int]] = None,
441
+ prefix: str = "",
442
+ *,
443
+ return_bias: bool = True,
444
+ disable_tp: bool = False,
445
+ ):
446
+ # Divide the weight matrix along the last dimension.
447
+ self.tp_rank = (get_tensor_model_parallel_rank()
448
+ if not disable_tp else 0)
449
+ self.tp_size = (get_tensor_model_parallel_world_size()
450
+ if not disable_tp else 1)
451
+ self.input_size_per_partition = input_size
452
+ self.output_size_per_partition = divide(output_size, self.tp_size)
453
+ self.output_partition_sizes = [self.output_size_per_partition]
454
+ # If QKV or MergedColumn, use output size of each partition.
455
+ if hasattr(self, "output_sizes"):
456
+ self.output_partition_sizes = [
457
+ divide(output_size, self.tp_size)
458
+ for output_size in self.output_sizes
459
+ ]
460
+
461
+ super().__init__(input_size,
462
+ output_size,
463
+ skip_bias_add,
464
+ params_dtype,
465
+ quant_config,
466
+ prefix,
467
+ return_bias=return_bias,
468
+ disable_tp=disable_tp)
469
+
470
+ self.gather_output = gather_output
471
+
472
+ if output_sizes is None:
473
+ output_sizes = [output_size]
474
+
475
+ assert self.quant_method is not None
476
+ self.quant_method.create_weights(
477
+ layer=self,
478
+ input_size_per_partition=self.input_size_per_partition,
479
+ output_partition_sizes=self.output_partition_sizes,
480
+ input_size=self.input_size,
481
+ output_size=self.output_size,
482
+ params_dtype=self.params_dtype,
483
+ weight_loader=(
484
+ self.weight_loader_v2 if self.quant_method.__class__.__name__
485
+ in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
486
+ if bias:
487
+ self.bias = Parameter(
488
+ torch.empty(self.output_size_per_partition,
489
+ dtype=params_dtype))
490
+ set_weight_attrs(self.bias, {
491
+ "output_dim": 0,
492
+ "weight_loader": self.weight_loader,
493
+ })
494
+ else:
495
+ self.register_parameter("bias", None)
496
+ self.update_param_tp_status()
497
+
498
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
499
+
500
+ output_dim = getattr(param, "output_dim", None)
501
+
502
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
503
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
504
+ # bitsandbytes loads the weights of the specific portion
505
+ # no need to narrow
506
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
507
+
508
+ # Special case for GGUF
509
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
510
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
511
+ if is_gguf_weight_type:
512
+ param.weight_type = loaded_weight.item()
513
+
514
+ # Materialize GGUF UninitializedParameter
515
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
516
+ final_shape = list(loaded_weight.shape)
517
+ if output_dim is not None:
518
+ assert final_shape[output_dim] % self.tp_size == 0
519
+ final_shape[output_dim] = (final_shape[output_dim] //
520
+ self.tp_size)
521
+ param.materialize(final_shape, dtype=loaded_weight.dtype)
522
+
523
+ param_data = param.data
524
+ if output_dim is not None and not is_sharded_weight:
525
+ shard_size = param_data.shape[output_dim]
526
+ start_idx = self.tp_rank * shard_size
527
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
528
+ shard_size)
529
+
530
+ # Special case for loading scales off disk, which often do not
531
+ # have a shape (such as in the case of AutoFP8).
532
+ if len(loaded_weight.shape) == 0:
533
+ loaded_weight = loaded_weight.reshape(1)
534
+
535
+ assert param_data.shape == loaded_weight.shape
536
+ param_data.copy_(loaded_weight)
537
+
538
+ def weight_loader_v2(self, param: BasevLLMParameter,
539
+ loaded_weight: torch.Tensor):
540
+ # Special case for loading scales off disk, which often do not
541
+ # have a shape (such as in the case of AutoFP8).
542
+ if len(loaded_weight.shape) == 0:
543
+ assert loaded_weight.numel() == 1
544
+ loaded_weight = loaded_weight.reshape(1)
545
+ param.load_column_parallel_weight(loaded_weight=loaded_weight)
546
+
547
+ def forward(
548
+ self,
549
+ input_,
550
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
551
+ bias = self.bias if not self.skip_bias_add else None
552
+
553
+ # Matrix multiply.
554
+ assert self.quant_method is not None
555
+ output_parallel = self.quant_method.apply(self, input_, bias)
556
+
557
+ if self.gather_output and self.tp_size > 1:
558
+ # All-gather across the partitions.
559
+ output = tensor_model_parallel_all_gather(output_parallel)
560
+ else:
561
+ output = output_parallel
562
+ output_bias = self.bias if self.skip_bias_add else None
563
+ if not self.return_bias:
564
+ return output
565
+ return output, output_bias
566
+
567
+ def extra_repr(self) -> str:
568
+ s = f"in_features={self.input_size}"
569
+ s += f", output_features={self.output_size_per_partition}"
570
+ s += f", bias={self.bias is not None}"
571
+ s += f", tp_size={self.tp_size}"
572
+ s += f", gather_output={self.gather_output}"
573
+ return s
574
+
575
+
576
+ class MergedColumnParallelLinear(ColumnParallelLinear):
577
+ """Packed linear layers with column parallelism.
578
+
579
+ Similar to ColumnParallelLinear, but the weight matrix is concatenated
580
+ along the output dimension. When the weight matrix is loaded, the
581
+ different partitions are sharded separately.
582
+
583
+ Args:
584
+ input_size: input dimension of the linear layer.
585
+ output_sizes: list of output dimensions of the linear layer.
586
+ bias: If true, add bias.
587
+ gather_output: If true, call all-gather on output and make the output
588
+ available to all GPUs, otherwise, every GPU will have
589
+ its own output.
590
+ skip_bias_add: This was added to enable performance optimizations where
591
+ bias can be fused with other element-wise operations. we
592
+ skip adding bias but instead return it.
593
+ params_dtype: Data type for the parameters.
594
+ quant_config: Quantization configure.
595
+ prefix: The name of the layer in the state dict, including all parents
596
+ (e.g. model.layers.0.qkv_proj)
597
+ return_bias: If true, return bias together with outputs in forward pass.
598
+ disable_tp: If true, all weights matrix won't be sharded, this layer
599
+ will be treated as a "Replicated" MergedLinear.
600
+ """
601
+
602
+ def __init__(
603
+ self,
604
+ input_size: int,
605
+ output_sizes: list[int],
606
+ bias: bool = True,
607
+ gather_output: bool = False,
608
+ skip_bias_add: bool = False,
609
+ params_dtype: Optional[torch.dtype] = None,
610
+ quant_config: Optional[QuantizationConfig] = None,
611
+ prefix: str = "",
612
+ *,
613
+ return_bias: bool = True,
614
+ disable_tp: bool = False,
615
+ ):
616
+ self.output_sizes = output_sizes
617
+ self.tp_size = (get_tensor_model_parallel_world_size()
618
+ if not disable_tp else 1)
619
+ self.tp_rank = (get_tensor_model_parallel_rank()
620
+ if not disable_tp else 0)
621
+
622
+ assert all(output_size % self.tp_size == 0
623
+ for output_size in output_sizes)
624
+ super().__init__(input_size=input_size,
625
+ output_size=sum(output_sizes),
626
+ bias=bias,
627
+ gather_output=gather_output,
628
+ skip_bias_add=skip_bias_add,
629
+ params_dtype=params_dtype,
630
+ quant_config=quant_config,
631
+ prefix=prefix,
632
+ return_bias=return_bias,
633
+ disable_tp=disable_tp)
634
+
635
+ def weight_loader(self,
636
+ param: Parameter,
637
+ loaded_weight: torch.Tensor,
638
+ loaded_shard_id: Optional[int] = None):
639
+
640
+ # Special case for GGUF
641
+ # initialize GGUF param after we know the quantize type
642
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
643
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
644
+ if is_gguf_weight_type:
645
+ if loaded_shard_id is not None:
646
+ param.data[loaded_shard_id].copy_(loaded_weight)
647
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
648
+ else:
649
+ param.shard_weight_type = {
650
+ i: loaded_weight.item()
651
+ for i, _ in enumerate(self.output_sizes)
652
+ }
653
+ return
654
+
655
+ if is_gguf_weight:
656
+
657
+ output_dim = getattr(param, "output_dim", None)
658
+ shard_size = loaded_weight.size(output_dim) // self.tp_size
659
+ start_idx = self.tp_rank * shard_size
660
+
661
+ if loaded_shard_id is not None:
662
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
663
+ shard_size)
664
+ param.shard_id.append(loaded_shard_id)
665
+ param.shard_id_map[loaded_shard_id] = len(param.data_container)
666
+ param.data_container.append(loaded_weight)
667
+ return
668
+
669
+ param_data = param.data
670
+ output_dim = getattr(param, "output_dim", None)
671
+ # Special case for per-tensor scale to load scalar into fused array.
672
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
673
+
674
+ if loaded_shard_id is None:
675
+ # Loaded weight is already fused on disk (mlp).
676
+ # (e.g., Phi-3's gate_up_proj).
677
+ if output_dim is None:
678
+ if needs_scalar_to_array:
679
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
680
+ param_data, loaded_weight, 0)
681
+
682
+ assert param_data.shape == loaded_weight.shape
683
+ param_data.copy_(loaded_weight)
684
+ return
685
+ current_shard_offset = 0
686
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
687
+ False)
688
+ shard_offsets: list[tuple[int, int, int]] = []
689
+ for i, output_size in enumerate(self.output_sizes):
690
+ shard_offsets.append((i, current_shard_offset, output_size))
691
+ current_shard_offset += output_size
692
+ packed_dim = getattr(param, "packed_dim", None)
693
+ for shard_id, shard_offset, shard_size in shard_offsets:
694
+ # Special case for Quantization.
695
+ # If quantized, we need to adjust the offset and size to account
696
+ # for the packing.
697
+ if packed_dim == output_dim:
698
+ shard_size = shard_size // param.packed_factor
699
+ shard_offset = shard_offset // param.packed_factor
700
+ # Special case for Marlin.
701
+ shard_size, shard_offset = adjust_marlin_shard(
702
+ param, shard_size, shard_offset)
703
+
704
+ shard_size, shard_offset = adjust_bitblas_shard(
705
+ param, shard_size, shard_offset)
706
+
707
+ if use_bitsandbytes_4bit:
708
+ index = list(itertools.accumulate([0] + self.output_sizes))
709
+ orig_offsets = {
710
+ str(i): (index[i], size)
711
+ for i, size in enumerate(self.output_sizes)
712
+ }
713
+ orig_offsets["total"] = (self.output_size, 0)
714
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
715
+ param, orig_offsets, str(shard_id))
716
+
717
+ loaded_weight_shard = loaded_weight.narrow(
718
+ output_dim, shard_offset, shard_size)
719
+ self.weight_loader(param, loaded_weight_shard, shard_id)
720
+ return
721
+
722
+ assert loaded_shard_id < len(self.output_sizes)
723
+ if output_dim is not None:
724
+ shard_offset = (sum(self.output_sizes[:loaded_shard_id]) //
725
+ self.tp_size)
726
+ shard_size = self.output_sizes[loaded_shard_id] // self.tp_size
727
+ # Special case for quantization.
728
+ # If quantized, we need to adjust the offset and size to account
729
+ # for the packing.
730
+ packed_dim = getattr(param, "packed_dim", None)
731
+ if packed_dim == output_dim:
732
+ shard_size = shard_size // param.packed_factor
733
+ shard_offset = shard_offset // param.packed_factor
734
+ # Special case for Marlin.
735
+ shard_size, shard_offset = adjust_marlin_shard(
736
+ param, shard_size, shard_offset)
737
+ shard_size, shard_offset = adjust_bitblas_shard(
738
+ param, shard_size, shard_offset)
739
+
740
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
741
+ False)
742
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
743
+ # bitsandbytes loads the weights of the specific portion
744
+ # no need to narrow
745
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
746
+
747
+ if use_bitsandbytes_4bit:
748
+ shard_size = loaded_weight.shape[output_dim]
749
+ shard_offset = loaded_weight.shape[output_dim] * \
750
+ loaded_shard_id
751
+
752
+ param_data = param_data.narrow(output_dim, shard_offset,
753
+ shard_size)
754
+ start_idx = self.tp_rank * shard_size
755
+ if not is_sharded_weight:
756
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
757
+ shard_size)
758
+ # Special case for per-tensor scales in fused case.
759
+ elif needs_scalar_to_array:
760
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
761
+ param_data, loaded_weight, loaded_shard_id)
762
+
763
+ else:
764
+ ignore_warning = getattr(param, "ignore_warning", False)
765
+ if not ignore_warning:
766
+ logger.warning(
767
+ "Loading a weight without `output_dim` attribute in "
768
+ "MergedColumnParallelLinear, assume the weight is "
769
+ "the same for all partitions.")
770
+
771
+ assert param_data.shape == loaded_weight.shape
772
+ param_data.copy_(loaded_weight)
773
+
774
+ def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
775
+ loaded_weight: torch.Tensor):
776
+ """
777
+ Handle special case for models where MLP layers are already
778
+ fused on disk. In this case, we have no shard id. This function
779
+ determines the shard id by splitting these layers and then calls
780
+ the weight loader using the shard id.
781
+
782
+ An example of a model with these fused layers:
783
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
784
+ """
785
+
786
+ current_shard_offset = 0
787
+ shard_offsets: list[tuple[int, int, int]] = []
788
+ for i, output_size in enumerate(self.output_sizes):
789
+ shard_offsets.append((i, current_shard_offset, output_size))
790
+ current_shard_offset += output_size
791
+
792
+ for shard_id, shard_offset, shard_size in shard_offsets:
793
+ # Special case for Quantization.
794
+ # If quantized, we need to adjust the offset and size to account
795
+ # for the packing.
796
+ if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
797
+ )) and param.packed_dim == param.output_dim:
798
+ shard_size, shard_offset = \
799
+ param.adjust_shard_indexes_for_packing(
800
+ shard_size=shard_size, shard_offset=shard_offset)
801
+
802
+ loaded_weight_shard = loaded_weight.narrow(param.output_dim,
803
+ shard_offset,
804
+ shard_size)
805
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
806
+
807
+ def weight_loader_v2(self,
808
+ param: BasevLLMParameter,
809
+ loaded_weight: torch.Tensor,
810
+ loaded_shard_id: Optional[int] = None):
811
+ if loaded_shard_id is None:
812
+ if isinstance(param, PerTensorScaleParameter):
813
+ param.load_merged_column_weight(loaded_weight=loaded_weight,
814
+ shard_id=0)
815
+ return
816
+ elif type(param) in (RowvLLMParameter, BasevLLMParameter):
817
+ param.load_merged_column_weight(loaded_weight=loaded_weight)
818
+ return
819
+ # TODO: @dsikka - move to parameter.py
820
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
821
+ return
822
+
823
+ assert loaded_shard_id < len(self.output_sizes)
824
+
825
+ if isinstance(param, BlockQuantScaleParameter):
826
+ assert self.quant_method is not None
827
+ # Assume the weight block size has been set by quant method
828
+ assert hasattr(self, "weight_block_size")
829
+ weight_block_size = self.weight_block_size
830
+ assert weight_block_size is not None
831
+ block_n, _ = weight_block_size[0], weight_block_size[1]
832
+ shard_offset = (
833
+ (sum(self.output_sizes[:loaded_shard_id]) + block_n - 1) //
834
+ block_n) // self.tp_size
835
+ shard_size = ((self.output_sizes[loaded_shard_id] + block_n - 1) //
836
+ block_n // self.tp_size)
837
+ else:
838
+ shard_offset = sum(
839
+ self.output_sizes[:loaded_shard_id]) // self.tp_size
840
+ shard_size = self.output_sizes[loaded_shard_id] // self.tp_size
841
+
842
+ param.load_merged_column_weight(loaded_weight=loaded_weight,
843
+ shard_id=loaded_shard_id,
844
+ shard_offset=shard_offset,
845
+ shard_size=shard_size,
846
+ tp_rank=self.tp_rank)
847
+
848
+
849
+ class QKVParallelLinear(ColumnParallelLinear):
850
+ """Linear layers for the attention's QKV transformation.
851
+
852
+ Linear layers for the linear transformation of the query, key, and value
853
+ vectors in the attention layer. The weight matrix is concatenated along
854
+ the output dimension. The layer is parallelized along the head dimension.
855
+ When the number of key/value heads is smaller than the number of query
856
+ heads (e.g., multi-query/grouped-query attention), the key/value head may
857
+ be replicated while the query heads are partitioned.
858
+
859
+ Args:
860
+ hidden_size: input hidden state size of the transformer.
861
+ head_size: size of each attention head.
862
+ total_num_heads: total number of attention query heads.
863
+ total_num_kv_heads: total number of attention key/value heads. If
864
+ None, assume total_num_kv_heads = total_num_heads.
865
+ bias: If true, add bias.
866
+ skip_bias_add: This was added to enable performance optimizations where
867
+ bias can be fused with other element-wise operations. we
868
+ skip adding bias but instead return it.
869
+ params_dtype: Data type for the parameters.
870
+ quant_config: Quantization configure.
871
+ prefix: The name of the layer in the state dict, including all parents
872
+ (e.g. model.layers.0.qkv_proj)
873
+ return_bias: If true, return bias together with outputs in forward pass.
874
+ disable_tp: If true, weights matrix won't be sharded through tp rank.
875
+ """
876
+
877
+ def __init__(
878
+ self,
879
+ hidden_size: int,
880
+ head_size: int,
881
+ total_num_heads: int,
882
+ total_num_kv_heads: Optional[int] = None,
883
+ bias: bool = True,
884
+ skip_bias_add: bool = False,
885
+ params_dtype: Optional[torch.dtype] = None,
886
+ quant_config: Optional[QuantizationConfig] = None,
887
+ prefix: str = "",
888
+ *,
889
+ return_bias: bool = True,
890
+ disable_tp: bool = False,
891
+ ):
892
+ self.hidden_size = hidden_size
893
+ self.head_size = head_size
894
+ self.total_num_heads = total_num_heads
895
+ if total_num_kv_heads is None:
896
+ total_num_kv_heads = total_num_heads
897
+ self.total_num_kv_heads = total_num_kv_heads
898
+ # Divide the weight matrix along the last dimension.
899
+ tp_size = (get_tensor_model_parallel_world_size()
900
+ if not disable_tp else 1)
901
+ self.num_heads = divide(self.total_num_heads, tp_size)
902
+ if tp_size >= self.total_num_kv_heads:
903
+ self.num_kv_heads = 1
904
+ self.num_kv_head_replicas = divide(tp_size,
905
+ self.total_num_kv_heads)
906
+ else:
907
+ self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
908
+ self.num_kv_head_replicas = 1
909
+ input_size = self.hidden_size
910
+ output_size = (self.num_heads +
911
+ 2 * self.num_kv_heads) * tp_size * self.head_size
912
+ self.output_sizes = [
913
+ self.num_heads * self.head_size * tp_size, # q_proj
914
+ self.num_kv_heads * self.head_size * tp_size, # k_proj
915
+ self.num_kv_heads * self.head_size * tp_size, # v_proj
916
+ ]
917
+
918
+ super().__init__(input_size=input_size,
919
+ output_size=output_size,
920
+ bias=bias,
921
+ gather_output=False,
922
+ skip_bias_add=skip_bias_add,
923
+ params_dtype=params_dtype,
924
+ quant_config=quant_config,
925
+ prefix=prefix,
926
+ return_bias=return_bias,
927
+ disable_tp=disable_tp)
928
+
929
+ def _get_shard_offset_mapping(self, loaded_shard_id: str):
930
+ shard_offset_mapping = {
931
+ "q": 0,
932
+ "k": self.num_heads * self.head_size,
933
+ "v": (self.num_heads + self.num_kv_heads) * self.head_size,
934
+ "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size
935
+ }
936
+ return shard_offset_mapping.get(loaded_shard_id)
937
+
938
+ def _get_shard_size_mapping(self, loaded_shard_id: str):
939
+ shard_size_mapping = {
940
+ "q": self.num_heads * self.head_size,
941
+ "k": self.num_kv_heads * self.head_size,
942
+ "v": self.num_kv_heads * self.head_size,
943
+ }
944
+ return shard_size_mapping.get(loaded_shard_id)
945
+
946
+ def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
947
+ loaded_weight: torch.Tensor):
948
+ """
949
+ Handle special case for models where QKV layers are already
950
+ fused on disk. In this case, we have no shard id. This function
951
+ determines the shard id by splitting these layers and then calls
952
+ the weight loader using the shard id.
953
+
954
+ An example of a model with these fused layers:
955
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
956
+ """
957
+ shard_offsets = [
958
+ # (shard_id, shard_offset, shard_size)
959
+ ("q", 0, self.total_num_heads * self.head_size),
960
+ ("k", self.total_num_heads * self.head_size,
961
+ self.total_num_kv_heads * self.head_size),
962
+ ("v",
963
+ (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
964
+ self.total_num_kv_heads * self.head_size),
965
+ ]
966
+
967
+ for shard_id, shard_offset, shard_size in shard_offsets:
968
+ # Special case for Quantization.
969
+ # If quantized, we need to adjust the offset and size to account
970
+ # for the packing.
971
+ if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
972
+ )) and param.packed_dim == param.output_dim:
973
+ shard_size, shard_offset = \
974
+ param.adjust_shard_indexes_for_packing(
975
+ shard_size=shard_size, shard_offset=shard_offset)
976
+
977
+ loaded_weight_shard = loaded_weight.narrow(param.output_dim,
978
+ shard_offset,
979
+ shard_size)
980
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
981
+
982
+ def weight_loader_v2(self,
983
+ param: BasevLLMParameter,
984
+ loaded_weight: torch.Tensor,
985
+ loaded_shard_id: Optional[str] = None):
986
+ if loaded_shard_id is None: # special case for certain models
987
+ if isinstance(param, PerTensorScaleParameter):
988
+ param.load_qkv_weight(loaded_weight=loaded_weight,
989
+ shard_id=0,
990
+ tp_rank=self.tp_rank)
991
+ return
992
+ elif type(param) in (RowvLLMParameter, BasevLLMParameter):
993
+ param.load_qkv_weight(loaded_weight=loaded_weight,
994
+ tp_rank=self.tp_rank)
995
+ return
996
+ # TODO: @dsikka - move to parameter.py
997
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
998
+ return
999
+
1000
+ assert loaded_shard_id in ["q", "k", "v"]
1001
+
1002
+ shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
1003
+ shard_size = self._get_shard_size_mapping(loaded_shard_id)
1004
+
1005
+ # Note(simon): This is needed for Qwen3's fp8 quantization.
1006
+ if isinstance(param, BlockQuantScaleParameter):
1007
+ assert self.quant_method is not None
1008
+ # Assume the weight block size has been set by quant method
1009
+ assert hasattr(self, "weight_block_size")
1010
+ weight_block_size = self.weight_block_size
1011
+ assert weight_block_size is not None
1012
+ block_n, _ = weight_block_size[0], weight_block_size[1]
1013
+ shard_offset = (shard_offset + block_n - 1) // block_n
1014
+ shard_size = (shard_size + block_n - 1) // block_n
1015
+
1016
+ param.load_qkv_weight(loaded_weight=loaded_weight,
1017
+ num_heads=self.num_kv_head_replicas,
1018
+ shard_id=loaded_shard_id,
1019
+ shard_offset=shard_offset,
1020
+ shard_size=shard_size,
1021
+ tp_rank=self.tp_rank)
1022
+
1023
+ def weight_loader(self,
1024
+ param: Parameter,
1025
+ loaded_weight: torch.Tensor,
1026
+ loaded_shard_id: Optional[str] = None):
1027
+
1028
+ # Special case for GGUF
1029
+ # initialize GGUF param after we know the quantize type
1030
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
1031
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
1032
+ if is_gguf_weight_type:
1033
+ idx_map = {"q": 0, "k": 1, "v": 2}
1034
+ if loaded_shard_id is not None:
1035
+ param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
1036
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
1037
+ else:
1038
+ param.shard_weight_type = {
1039
+ k: loaded_weight.item()
1040
+ for k in idx_map
1041
+ }
1042
+ return
1043
+
1044
+ if is_gguf_weight:
1045
+ output_dim = getattr(param, "output_dim", None)
1046
+ shard_size = loaded_weight.size(output_dim) // self.tp_size
1047
+ start_idx = self.tp_rank * shard_size
1048
+
1049
+ if loaded_shard_id is not None:
1050
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
1051
+ shard_size)
1052
+ param.shard_id.append(loaded_shard_id)
1053
+ param.shard_id_map[loaded_shard_id] = len(param.data_container)
1054
+ param.data_container.append(loaded_weight)
1055
+ return
1056
+
1057
+ param_data = param.data
1058
+ output_dim = getattr(param, "output_dim", None)
1059
+
1060
+ # Special case for per-tensor scales in fused case.
1061
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
1062
+
1063
+ if loaded_shard_id is None:
1064
+ # Loaded weight is already fused on disk (qkv).
1065
+ # (e.g., Phi-3's qkv_proj).
1066
+ if output_dim is None:
1067
+ if needs_scalar_to_array:
1068
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
1069
+ param_data, loaded_weight, 0)
1070
+
1071
+ assert param_data.shape == loaded_weight.shape
1072
+ param_data.copy_(loaded_weight)
1073
+ return
1074
+ shard_offsets = [
1075
+ # (shard_id, shard_offset, shard_size)
1076
+ ("q", 0, self.total_num_heads * self.head_size),
1077
+ ("k", self.total_num_heads * self.head_size,
1078
+ self.total_num_kv_heads * self.head_size),
1079
+ ("v", (self.total_num_heads + self.total_num_kv_heads) *
1080
+ self.head_size, self.total_num_kv_heads * self.head_size),
1081
+ ]
1082
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
1083
+ False)
1084
+
1085
+ packed_dim = getattr(param, "packed_dim", None)
1086
+ for shard_id, shard_offset, shard_size in shard_offsets:
1087
+ # Special case for Quantized Weights.
1088
+ # If quantized, we need to adjust the offset and size to account
1089
+ # for the packing.
1090
+ if packed_dim == output_dim:
1091
+ shard_size = shard_size // param.packed_factor
1092
+ shard_offset = shard_offset // param.packed_factor
1093
+
1094
+ # Special case for Marlin.
1095
+ shard_size, shard_offset = adjust_marlin_shard(
1096
+ param, shard_size, shard_offset)
1097
+
1098
+ if use_bitsandbytes_4bit:
1099
+ orig_qkv_offsets = {
1100
+ "q": (0, self.total_num_heads * self.head_size),
1101
+ "k": (self.total_num_heads * self.head_size,
1102
+ self.total_num_kv_heads * self.head_size),
1103
+ "v":
1104
+ ((self.total_num_heads + self.total_num_kv_heads) *
1105
+ self.head_size,
1106
+ self.total_num_kv_heads * self.head_size),
1107
+ "total":
1108
+ ((self.total_num_heads + 2 * self.total_num_kv_heads) *
1109
+ self.head_size, 0)
1110
+ }
1111
+
1112
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
1113
+ param, orig_qkv_offsets, shard_id)
1114
+
1115
+ loaded_weight_shard = loaded_weight.narrow(
1116
+ output_dim, shard_offset, shard_size)
1117
+ self.weight_loader(param, loaded_weight_shard, shard_id)
1118
+ return
1119
+
1120
+ assert loaded_shard_id in ["q", "k", "v"]
1121
+
1122
+ # If output dim is defined, use the default loading process.
1123
+ if output_dim is not None:
1124
+ if loaded_shard_id == "q":
1125
+ shard_offset = 0
1126
+ shard_size = self.num_heads * self.head_size
1127
+ elif loaded_shard_id == "k":
1128
+ shard_offset = self.num_heads * self.head_size
1129
+ shard_size = self.num_kv_heads * self.head_size
1130
+ elif loaded_shard_id == "v":
1131
+ shard_offset = (self.num_heads +
1132
+ self.num_kv_heads) * self.head_size
1133
+ shard_size = self.num_kv_heads * self.head_size
1134
+ # Special case for Quantized Weights.
1135
+ # If quantized, we need to adjust the offset and size to account
1136
+ # for the packing.
1137
+ packed_dim = getattr(param, "packed_dim", None)
1138
+ if packed_dim == output_dim:
1139
+ shard_size = shard_size // param.packed_factor
1140
+ shard_offset = shard_offset // param.packed_factor
1141
+
1142
+ # Special case for Marlin.
1143
+ shard_size, shard_offset = adjust_marlin_shard(
1144
+ param, shard_size, shard_offset)
1145
+
1146
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
1147
+ False)
1148
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
1149
+ # bitsandbytes loads the weights of the specific portion
1150
+ # no need to narrow
1151
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
1152
+
1153
+ if use_bitsandbytes_4bit:
1154
+ orig_qkv_offsets = {
1155
+ "q": (0, self.num_heads * self.head_size),
1156
+ "k": (self.num_heads * self.head_size,
1157
+ self.num_kv_heads * self.head_size),
1158
+ "v":
1159
+ ((self.num_heads + self.num_kv_heads) * self.head_size,
1160
+ self.num_kv_heads * self.head_size),
1161
+ "total":
1162
+ ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
1163
+ 0)
1164
+ }
1165
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
1166
+ param, orig_qkv_offsets, loaded_shard_id)
1167
+
1168
+ param_data = param_data.narrow(output_dim, shard_offset,
1169
+ shard_size)
1170
+ if loaded_shard_id == "q":
1171
+ shard_id = self.tp_rank
1172
+ else:
1173
+ shard_id = self.tp_rank // self.num_kv_head_replicas
1174
+ start_idx = shard_id * shard_size
1175
+
1176
+ if not is_sharded_weight:
1177
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
1178
+ shard_size)
1179
+
1180
+ # Special case for per-tensor scales in fused case.
1181
+ elif needs_scalar_to_array:
1182
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
1183
+ param_data, loaded_weight, loaded_shard_id)
1184
+ else:
1185
+ ignore_warning = getattr(param, "ignore_warning", False)
1186
+ if not ignore_warning:
1187
+ logger.warning(
1188
+ "Loading a weight without `output_dim` attribute in "
1189
+ "QKVParallelLinear, assume the weight is the same "
1190
+ "for all partitions.")
1191
+
1192
+ assert param_data.shape == loaded_weight.shape
1193
+ param_data.copy_(loaded_weight)
1194
+
1195
+
1196
+ @CustomOp.register("row_parallel_linear")
1197
+ class RowParallelLinear(LinearBase):
1198
+ """Linear layer with row parallelism.
1199
+
1200
+ The linear layer is defined as Y = XA + b. A is parallelized along
1201
+ its first dimension and X along its second dimension as:
1202
+ - -
1203
+ | A_1 |
1204
+ | . |
1205
+ A = | . | X = [X_1, ..., X_p]
1206
+ | . |
1207
+ | A_p |
1208
+ - -
1209
+ Arguments:
1210
+ input_size: first dimension of matrix A.
1211
+ output_size: second dimension of matrix A.
1212
+ bias: If true, add bias. Note that bias is not parallelized.
1213
+ input_is_parallel: If true, we assume that the input is already
1214
+ split across the GPUs and we do not split
1215
+ again.
1216
+ skip_bias_add: This was added to enable performance optimization where
1217
+ bias can be fused with other element-wise operations.
1218
+ We skip adding bias but instead return it.
1219
+ params_dtype: Data type for the parameters.
1220
+ reduce_results: If true, call all-reduce on output and make Y available
1221
+ to all GPUs, otherwise, every GPU will have its output
1222
+ which is Y = X_iA_i
1223
+ quant_config: Quantization configure.
1224
+ prefix: The name of the layer in the state dict, including all parents
1225
+ (e.g. model.layers.0.down_proj)
1226
+ return_bias: If true, return bias together with outputs in forward pass.
1227
+ disable_tp: If true, weights matrix won't be sharded through tp rank.
1228
+ """
1229
+
1230
+ def __init__(
1231
+ self,
1232
+ input_size: int,
1233
+ output_size: int,
1234
+ bias: bool = True,
1235
+ input_is_parallel: bool = True,
1236
+ skip_bias_add: bool = False,
1237
+ params_dtype: Optional[torch.dtype] = None,
1238
+ reduce_results: bool = True,
1239
+ quant_config: Optional[QuantizationConfig] = None,
1240
+ prefix: str = "",
1241
+ *,
1242
+ return_bias: bool = True,
1243
+ disable_tp: bool = False,
1244
+ ):
1245
+ # Divide the weight matrix along the first dimension.
1246
+ self.tp_rank = (get_tensor_model_parallel_rank()
1247
+ if not disable_tp else 0)
1248
+ self.tp_size = (get_tensor_model_parallel_world_size()
1249
+ if not disable_tp else 1)
1250
+ self.input_size_per_partition = divide(input_size, self.tp_size)
1251
+ self.output_size_per_partition = output_size
1252
+ self.output_partition_sizes = [output_size]
1253
+
1254
+ super().__init__(input_size,
1255
+ output_size,
1256
+ skip_bias_add,
1257
+ params_dtype,
1258
+ quant_config,
1259
+ prefix,
1260
+ return_bias=return_bias,
1261
+ disable_tp=disable_tp)
1262
+
1263
+ self.input_is_parallel = input_is_parallel
1264
+ self.reduce_results = reduce_results
1265
+
1266
+ assert self.quant_method is not None
1267
+ self.quant_method.create_weights(
1268
+ layer=self,
1269
+ input_size_per_partition=self.input_size_per_partition,
1270
+ output_partition_sizes=self.output_partition_sizes,
1271
+ input_size=self.input_size,
1272
+ output_size=self.output_size,
1273
+ params_dtype=self.params_dtype,
1274
+ weight_loader=(
1275
+ self.weight_loader_v2 if self.quant_method.__class__.__name__
1276
+ in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
1277
+ if not reduce_results and (bias and not skip_bias_add):
1278
+ raise ValueError("When not reduce the results, adding bias to the "
1279
+ "results can lead to incorrect results")
1280
+
1281
+ if bias:
1282
+ self.bias = Parameter(
1283
+ torch.empty(self.output_size, dtype=params_dtype))
1284
+ set_weight_attrs(self.bias, {
1285
+ "output_dim": 0,
1286
+ "weight_loader": self.weight_loader,
1287
+ })
1288
+ else:
1289
+ self.register_parameter("bias", None)
1290
+ self.update_param_tp_status()
1291
+
1292
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
1293
+ input_dim = getattr(param, "input_dim", None)
1294
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
1295
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
1296
+ # bitsandbytes loads the weights of the specific portion
1297
+ # no need to narrow
1298
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
1299
+
1300
+ # Special case for GGUF
1301
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
1302
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
1303
+ if is_gguf_weight_type:
1304
+ param.weight_type = loaded_weight.item()
1305
+
1306
+ # Materialize GGUF UninitializedParameter
1307
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
1308
+ weight_shape = list(loaded_weight.shape)
1309
+ if input_dim:
1310
+ weight_shape[input_dim] = (weight_shape[input_dim] //
1311
+ self.tp_size)
1312
+ param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)
1313
+
1314
+ param_data = param.data
1315
+ if input_dim is not None and not is_sharded_weight:
1316
+ shard_size = param_data.shape[input_dim]
1317
+ start_idx = self.tp_rank * shard_size
1318
+ loaded_weight = loaded_weight.narrow(input_dim, start_idx,
1319
+ shard_size)
1320
+
1321
+ # Special case for loading scales off disk, which often do not
1322
+ # have a shape (such as in the case of AutoFP8).
1323
+ if len(loaded_weight.shape) == 0:
1324
+ loaded_weight = loaded_weight.reshape(1)
1325
+
1326
+ assert param_data.shape == loaded_weight.shape
1327
+ param_data.copy_(loaded_weight)
1328
+
1329
+ def weight_loader_v2(self, param: BasevLLMParameter,
1330
+ loaded_weight: torch.Tensor):
1331
+
1332
+ # Special case for loading scales off disk, which often do not
1333
+ # have a shape (such as in the case of AutoFP8).
1334
+ if len(loaded_weight.shape) == 0:
1335
+ assert loaded_weight.numel() == 1
1336
+ loaded_weight = loaded_weight.reshape(1)
1337
+
1338
+ param.load_row_parallel_weight(loaded_weight=loaded_weight)
1339
+
1340
+ def forward(
1341
+ self,
1342
+ input_,
1343
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
1344
+ if self.input_is_parallel:
1345
+ input_parallel = input_
1346
+ else:
1347
+ splitted_input = split_tensor_along_last_dim(
1348
+ input_, num_partitions=self.tp_size)
1349
+ input_parallel = splitted_input[self.tp_rank].contiguous()
1350
+
1351
+ # Matrix multiply.
1352
+ assert self.quant_method is not None
1353
+ # Only fuse bias add into GEMM for rank 0 (this ensures that
1354
+ # bias will not get added more than once in TP>1 case)
1355
+ bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
1356
+ output_parallel = self.quant_method.apply(self, input_parallel, bias_)
1357
+
1358
+ if self.reduce_results and self.tp_size > 1:
1359
+ output = tensor_model_parallel_all_reduce(output_parallel)
1360
+ else:
1361
+ output = output_parallel
1362
+
1363
+ output_bias = self.bias if self.skip_bias_add else None
1364
+
1365
+ if not self.return_bias:
1366
+ return output
1367
+ return output, output_bias
1368
+
1369
+ def extra_repr(self) -> str:
1370
+ s = f"in_features={self.input_size_per_partition}"
1371
+ s += f", output_features={self.output_size}"
1372
+ s += f", bias={self.bias is not None}"
1373
+ s += f", tp_size={self.tp_size}"
1374
+ s += f", reduce_results={self.reduce_results}"
1375
+ return s
1376
+
1377
+
1378
+ @CustomOp.register("qkv_cross_parallel_linear")
1379
+ class QKVCrossParallelLinear(LinearBase):
1380
+ """Linear layers for efficient cross-attention's QKV transformation.
1381
+
1382
+ Args:
1383
+ hidden_size: input hidden state size of the transformer.
1384
+ head_size: size of each attention head.
1385
+ total_num_heads: total number of attention query heads.
1386
+ total_num_kv_heads: total number of attention key/value heads. If
1387
+ None, assume total_num_kv_heads = total_num_heads.
1388
+ bias: If true, add bias.
1389
+ skip_bias_add: This was added to enable performance optimizations where
1390
+ bias can be fused with other element-wise operations. we
1391
+ skip adding bias but instead return it.
1392
+ params_dtype: Data type for the parameters.
1393
+ quant_config: Quantization configure.
1394
+ prefix: The name of the layer in the state dict, including all parents
1395
+ (e.g. model.layers.0.qkv_proj)
1396
+ """
1397
+
1398
+ def __init__(self,
1399
+ hidden_size: int,
1400
+ head_size: int,
1401
+ total_num_heads: int,
1402
+ total_num_kv_heads: Optional[int] = None,
1403
+ bias: bool = True,
1404
+ skip_bias_add: bool = False,
1405
+ params_dtype: Optional[torch.dtype] = None,
1406
+ quant_config: Optional[QuantizationConfig] = None,
1407
+ prefix: str = ""):
1408
+ # input_size and output_size are not used, just for alignment
1409
+ input_size = hidden_size
1410
+ output_size = (total_num_heads + (total_num_kv_heads or 0)) * head_size
1411
+ super().__init__(input_size=input_size,
1412
+ output_size=output_size,
1413
+ skip_bias_add=skip_bias_add,
1414
+ params_dtype=params_dtype,
1415
+ quant_config=quant_config,
1416
+ prefix=prefix)
1417
+
1418
+ self.quant_config = quant_config
1419
+
1420
+ # Empty placeholders for loading as a single module.
1421
+ placeholder_size = 0
1422
+ assert self.quant_method is not None
1423
+ self.quant_method.create_weights(self,
1424
+ placeholder_size, [placeholder_size],
1425
+ placeholder_size,
1426
+ placeholder_size,
1427
+ self.params_dtype,
1428
+ weight_loader=self.weight_loader)
1429
+
1430
+ # Use a dictionary to avoid submodules parameters auto-registration:
1431
+ # drop-in replacement for a `QKVParallelLinear` module.
1432
+ self.proj = dict()
1433
+ self.proj["q_proj_decoder"] = ColumnParallelLinear(
1434
+ input_size=hidden_size,
1435
+ output_size=total_num_heads * head_size,
1436
+ bias=bias,
1437
+ quant_config=quant_config,
1438
+ skip_bias_add=skip_bias_add,
1439
+ params_dtype=params_dtype,
1440
+ prefix=f"{prefix}.q_proj_decoder")
1441
+
1442
+ self.proj["kv_proj_encoder"] = QKVParallelLinear(
1443
+ hidden_size=hidden_size,
1444
+ head_size=head_size,
1445
+ total_num_heads=0,
1446
+ total_num_kv_heads=total_num_kv_heads,
1447
+ bias=bias,
1448
+ quant_config=quant_config,
1449
+ skip_bias_add=skip_bias_add,
1450
+ params_dtype=params_dtype,
1451
+ prefix=f"{prefix}.kv_proj_encoder")
1452
+
1453
+ # `kv_proj_encoder.num_kv_heads` accounts for sharding with tp>1.
1454
+ self.q_size = self.q_proj_decoder.output_size_per_partition
1455
+ self.kv_size = self.kv_proj_encoder.num_kv_heads * head_size
1456
+
1457
+ if bias:
1458
+ self.bias = torch.nn.Parameter()
1459
+ set_weight_attrs(self.bias, {
1460
+ "output_dim": 0,
1461
+ "weight_loader": self.weight_loader_v1,
1462
+ })
1463
+ else:
1464
+ self.bias = None
1465
+
1466
+ def process_weights_after_loading(self):
1467
+ for layer in self.proj.values():
1468
+ if self.quant_method is not None:
1469
+ self.quant_method.process_weights_after_loading(layer)
1470
+
1471
+ @property
1472
+ def q_proj_decoder(self) -> ColumnParallelLinear:
1473
+ layer = self.proj["q_proj_decoder"]
1474
+ for name, param in self.named_parameters():
1475
+ target_param = getattr(layer, name, None)
1476
+ if target_param is not None:
1477
+ self.sync_weight_attrs(param,
1478
+ target_param,
1479
+ mode="q_proj_decoder")
1480
+ return layer
1481
+
1482
+ @property
1483
+ def kv_proj_encoder(self) -> QKVParallelLinear:
1484
+ layer = self.proj["kv_proj_encoder"]
1485
+ for name, param in self.named_parameters():
1486
+ target_param = getattr(layer, name, None)
1487
+ if target_param is not None:
1488
+ self.sync_weight_attrs(param,
1489
+ target_param,
1490
+ mode="kv_proj_encoder")
1491
+ return layer
1492
+
1493
+ def sync_weight_attrs(
1494
+ self,
1495
+ src_param: nn.Parameter,
1496
+ tgt_param: nn.Parameter,
1497
+ mode: Literal["q_proj_decoder", "kv_proj_encoder"],
1498
+ ):
1499
+ missing_attrs_dict = {
1500
+ k: getattr(src_param, k)
1501
+ for k in (set(vars(src_param).keys()) -
1502
+ set(vars(tgt_param).keys()))
1503
+ }
1504
+ # TODO(Isotr0py): handle bitsandbytes 8bit
1505
+ use_bitsandbytes_4bit = getattr(src_param, "use_bitsandbytes_4bit",
1506
+ False)
1507
+ if (missing_attrs_dict and use_bitsandbytes_4bit):
1508
+ q_proj_attrs, kv_proj_attrs = left_shift_bitsandbytes_4bit_shard(
1509
+ missing_attrs_dict)
1510
+ if mode == "q_proj_decoder":
1511
+ set_weight_attrs(tgt_param, q_proj_attrs)
1512
+ elif mode == "kv_proj_encoder":
1513
+ set_weight_attrs(tgt_param, kv_proj_attrs)
1514
+ else:
1515
+ set_weight_attrs(tgt_param, missing_attrs_dict)
1516
+
1517
+ def _is_same_param(
1518
+ self,
1519
+ src_param: torch.nn.Parameter,
1520
+ map_param: torch.nn.Parameter,
1521
+ ) -> bool:
1522
+ """Check if two parameters are exactly pointing to same things."""
1523
+ # ignore weight_loader because it's always different
1524
+ key_to_ignore = ["weight_loader", "_weight_loader"]
1525
+ has_same_type_name = type(src_param) is type(map_param)
1526
+ src_param_attrs = {
1527
+ k: v
1528
+ for k, v in src_param.__dict__.items() if k not in key_to_ignore
1529
+ }
1530
+ map_param_attrs = {
1531
+ k: v
1532
+ for k, v in map_param.__dict__.items() if k not in key_to_ignore
1533
+ }
1534
+ has_same_attrs = src_param_attrs == map_param_attrs
1535
+ return has_same_type_name and has_same_attrs
1536
+
1537
+ def select_proj_params(
1538
+ self,
1539
+ layer: nn.Module,
1540
+ param: nn.Parameter,
1541
+ ) -> nn.Parameter:
1542
+ """
1543
+ Given the placeholder param,
1544
+ return the corresponding param in the proj layers.
1545
+ """
1546
+ target_param_list = [
1547
+ v for _, v in layer.named_parameters()
1548
+ if self._is_same_param(param, v)
1549
+ ]
1550
+ assert len(target_param_list) == 1
1551
+ target_param = target_param_list[0]
1552
+ return target_param
1553
+
1554
+ def forward( # type: ignore[override]
1555
+ self,
1556
+ decoder_hidden_states: torch.Tensor,
1557
+ encoder_hidden_states: torch.Tensor,
1558
+ ) -> tuple[torch.Tensor, ...]:
1559
+ q, _ = self.q_proj_decoder(decoder_hidden_states)
1560
+ if encoder_hidden_states is None:
1561
+ # Encoder KV already cached.
1562
+ k = None
1563
+ v = None
1564
+ else:
1565
+ # Prefill phase, encoder KV cached here.
1566
+ kv_enc, _ = self.kv_proj_encoder(encoder_hidden_states)
1567
+ # Split kv in half
1568
+ k, v = kv_enc.split(self.kv_size, dim=-1)
1569
+ return q, k, v
1570
+
1571
+ def weight_loader_v1(self,
1572
+ param: torch.nn.Parameter,
1573
+ loaded_weight: torch.Tensor,
1574
+ loaded_shard_id: Optional[str] = None):
1575
+ # just like all other parameters, does not yet
1576
+ # support loading bias with weight_loader_v2
1577
+ layer = (self.q_proj_decoder
1578
+ if loaded_shard_id == "q" else self.kv_proj_encoder)
1579
+ target_param = self.select_proj_params(layer, param)
1580
+ shard_id_args = (loaded_shard_id, ) if loaded_shard_id != "q" else ()
1581
+ layer.weight_loader(target_param, loaded_weight, *shard_id_args)
1582
+
1583
+ def weight_loader(self,
1584
+ param: torch.nn.Parameter,
1585
+ loaded_weight: torch.Tensor,
1586
+ loaded_shard_id: Optional[str] = None):
1587
+ layer = (self.q_proj_decoder
1588
+ if loaded_shard_id == "q" else self.kv_proj_encoder)
1589
+ target_param = self.select_proj_params(layer, param)
1590
+ shard_id_args = (loaded_shard_id, ) if loaded_shard_id != "q" else ()
1591
+ if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED:
1592
+ layer.weight_loader_v2(target_param, loaded_weight, *shard_id_args)
1593
+ else:
1594
+ layer.weight_loader(target_param, loaded_weight, *shard_id_args)
1595
+
1596
+ def extra_repr(self) -> str:
1597
+ s = f"in_features={self.input_size}"
1598
+ s += f", q_size={self.q_size}"
1599
+ s += f", kv_size={self.kv_size}"
1600
+ s += f", bias={self.bias is not None}"
1601
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
1602
+ s += ", gather_output=False"
1603
+ return s