vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1518 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import itertools
4
+ from abc import abstractmethod
5
+ from typing import Any, Literal, Optional, Union
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torch.nn.parameter import Parameter, UninitializedParameter
10
+
11
+ from vllm.distributed import (divide, get_tensor_model_parallel_rank,
12
+ get_tensor_model_parallel_world_size,
13
+ split_tensor_along_last_dim,
14
+ tensor_model_parallel_all_gather,
15
+ tensor_model_parallel_all_reduce)
16
+ from vllm.logger import init_logger
17
+ from vllm.model_executor.layers.quantization.base_config import (
18
+ QuantizationConfig, QuantizeMethodBase)
19
+ from vllm.model_executor.layers.utils import dispatch_unquantized_gemm
20
+ # yapf: disable
21
+ from vllm.model_executor.parameter import (BasevLLMParameter,
22
+ BlockQuantScaleParameter,
23
+ PackedColumnParameter,
24
+ PackedvLLMParameter,
25
+ PerTensorScaleParameter,
26
+ RowvLLMParameter)
27
+ # yapf: enable
28
+ from vllm.model_executor.utils import set_weight_attrs
29
+
30
+ logger = init_logger(__name__)
31
+
32
+ WEIGHT_LOADER_V2_SUPPORTED = [
33
+ "CompressedTensorsLinearMethod",
34
+ "BitBLASLinearMethod",
35
+ "GPTQBitBLASLinearMethod",
36
+ "AWQMarlinLinearMethod",
37
+ "AWQLinearMethod",
38
+ "GPTQMarlinLinearMethod",
39
+ "Fp8LinearMethod",
40
+ "MarlinLinearMethod",
41
+ "QQQLinearMethod",
42
+ "GPTQMarlin24LinearMethod",
43
+ "TPUInt8LinearMethod",
44
+ "GPTQLinearMethod",
45
+ "FBGEMMFp8LinearMethod",
46
+ "ModelOptFp8LinearMethod",
47
+ "IPEXAWQLinearMethod",
48
+ "IPEXGPTQLinearMethod",
49
+ "HQQMarlinMethod",
50
+ "QuarkLinearMethod",
51
+ "ModelOptNvFp4LinearMethod",
52
+ ]
53
+
54
+
55
+ def adjust_bitblas_shard(param, shard_size, shard_offset):
56
+ bitblas_tile_size = getattr(param, "bitblas_tile_size", None)
57
+ if bitblas_tile_size is not None:
58
+ return (shard_size // bitblas_tile_size,
59
+ shard_offset // bitblas_tile_size)
60
+
61
+ return shard_size, shard_offset
62
+
63
+
64
+ def adjust_marlin_shard(param, shard_size, shard_offset):
65
+ marlin_tile_size = getattr(param, "marlin_tile_size", None)
66
+ if marlin_tile_size is None:
67
+ return shard_size, shard_offset
68
+
69
+ return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
70
+
71
+
72
+ def adjust_bitsandbytes_4bit_shard(param: Parameter,
73
+ shard_offsets: dict[str, tuple[int, int]],
74
+ loaded_shard_id: str) -> tuple[int, int]:
75
+ """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
76
+
77
+ total, _ = shard_offsets["total"]
78
+ orig_offset, orig_size = shard_offsets[loaded_shard_id]
79
+
80
+ quantized_total = param.data.shape[0]
81
+ quantized_offset = orig_offset * quantized_total // total
82
+ quantized_size = orig_size * quantized_total // total
83
+
84
+ return quantized_size, quantized_offset
85
+
86
+
87
+ def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
88
+ """For fused modules (QKV and MLP) we have an array of length
89
+ N that holds 1 scale for each "logical" matrix. So the param
90
+ is an array of length N. The loaded_weight corresponds to
91
+ one of the shards on disk. Here, we slice the param based on
92
+ the shard_id for loading.
93
+ """
94
+ qkv_idxs = {"q": 0, "k": 1, "v": 2}
95
+
96
+ if isinstance(shard_id, str):
97
+ shard_id = qkv_idxs[shard_id]
98
+ elif not isinstance(shard_id, int):
99
+ raise ValueError(f"Unknown Shard Id {shard_id}")
100
+
101
+ # AutoFP8 scales do not have a shape
102
+ # compressed-tensors scales do have a shape
103
+ if len(loaded_weight.shape) != 0:
104
+ assert loaded_weight.shape[0] == 1
105
+ loaded_weight = loaded_weight[0]
106
+
107
+ return param[shard_id], loaded_weight
108
+
109
+
110
+ # TODO(Isotr0py): We might need a more flexible structure to handle
111
+ # bitsandbytes shard offsets.
112
+ def left_shift_bitsandbytes_4bit_shard(bnb_weight_attrs: dict[str, Any]):
113
+ """
114
+ Separate the BitsAndBytes 4-bit shard.
115
+
116
+ For example, given bnb weight attributes as below:
117
+ {
118
+ 'bnb_shard_offsets': array([0, 4, 8, 16]),
119
+ 'bnb_quant_state': {0: ..., 1: ..., 2: ...},
120
+ }
121
+
122
+ The function will return:
123
+ {
124
+ 'bnb_shard_offsets': array([0, 4]),
125
+ 'bnb_quant_state': {0: ...},
126
+ }
127
+ and
128
+ {
129
+ 'bnb_shard_offsets': array([0, 4, 12]),
130
+ 'bnb_quant_state': {0: ..., 1: ...},
131
+ }
132
+ """
133
+ shard_offsets = bnb_weight_attrs["bnb_shard_offsets"]
134
+ offset_l = shard_offsets[:2]
135
+ offset_r = shard_offsets[1:] - shard_offsets[1]
136
+ quant_state_l = {0: bnb_weight_attrs["bnb_quant_state"][0]}
137
+ quant_state_r = {
138
+ i - 1: bnb_weight_attrs["bnb_quant_state"][i]
139
+ for i in range(1,
140
+ len(shard_offsets) - 1)
141
+ }
142
+ left = dict(bnb_shard_offsets=offset_l, bnb_quant_state=quant_state_l)
143
+ right = dict(bnb_shard_offsets=offset_r, bnb_quant_state=quant_state_r)
144
+ return left, right
145
+
146
+
147
+ class LinearMethodBase(QuantizeMethodBase):
148
+ """Base class for different (maybe quantized) linear methods."""
149
+
150
+ @abstractmethod
151
+ def create_weights(self, layer: torch.nn.Module,
152
+ input_size_per_partition: int,
153
+ output_partition_sizes: list[int], input_size: int,
154
+ output_size: int, params_dtype: torch.dtype,
155
+ **extra_weight_attrs):
156
+ """Create weights for a linear layer.
157
+ The weights will be set as attributes of the layer.
158
+
159
+ Args:
160
+ layer: The layer that is using the LinearMethodBase factory.
161
+ input_size_per_partition: Size of the weight input dim on rank X.
162
+ output_partition_sizes: Sizes of the output dim of each logical
163
+ weight on rank X. E.g., output_partition_sizes for QKVLinear
164
+ is a list contains the width of Wq, Wk, Wv on rank X.
165
+ input_size: Size of the input dim of the weight across all ranks.
166
+ output_size: Size of the output dim of the weight across all ranks.
167
+ params_dtype: Datatype of the parameters.
168
+ """
169
+ raise NotImplementedError
170
+
171
+ @abstractmethod
172
+ def apply(self,
173
+ layer: torch.nn.Module,
174
+ x: torch.Tensor,
175
+ bias: Optional[torch.Tensor] = None) -> torch.Tensor:
176
+ """Apply the weights in layer to the input tensor.
177
+ Expects create_weights to have been called before on the layer."""
178
+ raise NotImplementedError
179
+
180
+
181
+ class UnquantizedLinearMethod(LinearMethodBase):
182
+ """Linear method without quantization."""
183
+
184
+ def create_weights(self, layer: torch.nn.Module,
185
+ input_size_per_partition: int,
186
+ output_partition_sizes: list[int], input_size: int,
187
+ output_size: int, params_dtype: torch.dtype,
188
+ **extra_weight_attrs):
189
+ weight = Parameter(torch.empty(sum(output_partition_sizes),
190
+ input_size_per_partition,
191
+ dtype=params_dtype),
192
+ requires_grad=False)
193
+ set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
194
+ layer.register_parameter("weight", weight)
195
+ set_weight_attrs(weight, extra_weight_attrs)
196
+
197
+ def apply(self,
198
+ layer: torch.nn.Module,
199
+ x: torch.Tensor,
200
+ bias: Optional[torch.Tensor] = None) -> torch.Tensor:
201
+
202
+ return dispatch_unquantized_gemm()(x, layer.weight, bias)
203
+
204
+
205
+ class LinearBase(torch.nn.Module):
206
+ """Base linear layer.
207
+
208
+ Args:
209
+ input_size: input dimension of the linear layer.
210
+ output_size: output dimension of the linear layer.
211
+ bias: If true, add bias.
212
+ skip_bias_add: If true, skip adding bias but instead return it.
213
+ params_dtype: Data type for the parameters.
214
+ quant_config: Quantization configure.
215
+ return_bias: If true, return bias together with outputs in forward pass.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ input_size: int,
221
+ output_size: int,
222
+ skip_bias_add: bool = False,
223
+ params_dtype: Optional[torch.dtype] = None,
224
+ quant_config: Optional[QuantizationConfig] = None,
225
+ prefix: str = "",
226
+ *,
227
+ return_bias: bool = True,
228
+ ):
229
+ super().__init__()
230
+
231
+ # Keep input parameters
232
+ self.input_size = input_size
233
+ self.output_size = output_size
234
+ self.skip_bias_add = skip_bias_add
235
+ if params_dtype is None:
236
+ params_dtype = torch.get_default_dtype()
237
+ self.params_dtype = params_dtype
238
+ if quant_config is None:
239
+ self.quant_method: Optional[
240
+ QuantizeMethodBase] = UnquantizedLinearMethod()
241
+ else:
242
+ self.quant_method = quant_config.get_quant_method(self,
243
+ prefix=prefix)
244
+ self.return_bias = return_bias
245
+
246
+ def forward(
247
+ self, x: torch.Tensor
248
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
249
+ raise NotImplementedError
250
+
251
+
252
+ class ReplicatedLinear(LinearBase):
253
+ """Replicated linear layer.
254
+
255
+ Args:
256
+ input_size: input dimension of the linear layer.
257
+ output_size: output dimension of the linear layer.
258
+ bias: If true, add bias.
259
+ skip_bias_add: If true, skip adding bias but instead return it.
260
+ params_dtype: Data type for the parameters.
261
+ quant_config: Quantization configure.
262
+ prefix: The name of the layer in the state dict, including all parents
263
+ (e.g. model.layers.0.qkv_proj)
264
+ """
265
+
266
+ def __init__(
267
+ self,
268
+ input_size: int,
269
+ output_size: int,
270
+ bias: bool = True,
271
+ skip_bias_add: bool = False,
272
+ params_dtype: Optional[torch.dtype] = None,
273
+ quant_config: Optional[QuantizationConfig] = None,
274
+ prefix: str = "",
275
+ *,
276
+ return_bias: bool = True,
277
+ ):
278
+ super().__init__(input_size,
279
+ output_size,
280
+ skip_bias_add,
281
+ params_dtype,
282
+ quant_config,
283
+ prefix=prefix,
284
+ return_bias=return_bias)
285
+
286
+ # All the linear layer supports quant method.
287
+ assert self.quant_method is not None
288
+ self.quant_method.create_weights(self,
289
+ self.input_size, [self.output_size],
290
+ self.input_size,
291
+ self.output_size,
292
+ self.params_dtype,
293
+ weight_loader=self.weight_loader)
294
+
295
+ if bias:
296
+ self.bias = Parameter(
297
+ torch.empty(self.output_size, dtype=self.params_dtype))
298
+ set_weight_attrs(self.bias, {
299
+ "output_dim": 0,
300
+ "weight_loader": self.weight_loader,
301
+ })
302
+ else:
303
+ self.register_parameter("bias", None)
304
+
305
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
306
+ # If the weight on disk does not have a shape, give it one
307
+ # (such scales for AutoFp8).
308
+ # Special case for GGUF
309
+
310
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
311
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
312
+ if is_gguf_weight_type:
313
+ param.weight_type = loaded_weight.item()
314
+
315
+ # Materialize GGUF UninitializedParameter
316
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
317
+ param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)
318
+
319
+ if len(loaded_weight.shape) == 0:
320
+ loaded_weight = loaded_weight.reshape(1)
321
+
322
+ assert param.size() == loaded_weight.size(), (
323
+ f"Tried to load weights of size {loaded_weight.size()}"
324
+ f"to a parameter of size {param.size()}")
325
+ param.data.copy_(loaded_weight)
326
+
327
+ def forward(
328
+ self, x: torch.Tensor
329
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
330
+ bias = self.bias if not self.skip_bias_add else None
331
+ assert self.quant_method is not None
332
+ output = self.quant_method.apply(self, x, bias)
333
+ output_bias = self.bias if self.skip_bias_add else None
334
+ if not self.return_bias:
335
+ return output
336
+ return output, output_bias
337
+
338
+ def extra_repr(self) -> str:
339
+ s = f"in_features={self.input_size}"
340
+ s += f", output_features={self.output_size}"
341
+ s += f", bias={self.bias is not None}"
342
+ return s
343
+
344
+
345
+ class ColumnParallelLinear(LinearBase):
346
+ """Linear layer with column parallelism.
347
+
348
+ The linear layer is defined as Y = XA + b. A is parallelized along
349
+ its second dimension as A = [A_1, ..., A_p].
350
+
351
+ Args:
352
+ input_size: first dimension of matrix A.
353
+ output_size: second dimension of matrix A.
354
+ bias: If true, add bias.
355
+ gather_output: If true, call all-gather on output and make Y available
356
+ to all GPUs, otherwise, every GPU will have its output
357
+ which is Y_i = XA_i
358
+ skip_bias_add: This was added to enable performance optimizations where
359
+ bias can be fused with other element-wise operations. we
360
+ skip adding bias but instead return it.
361
+ params_dtype: Data type for the parameters.
362
+ quant_config: Quantization configure.
363
+ output_sizes: list of output sizes packed into one output, like for QKV
364
+ the list would be size 3.
365
+ prefix: The name of the layer in the state dict, including all parents
366
+ (e.g. model.layers.0.qkv_proj)
367
+ """
368
+
369
+ def __init__(
370
+ self,
371
+ input_size: int,
372
+ output_size: int,
373
+ bias: bool = True,
374
+ gather_output: bool = False,
375
+ skip_bias_add: bool = False,
376
+ params_dtype: Optional[torch.dtype] = None,
377
+ quant_config: Optional[QuantizationConfig] = None,
378
+ output_sizes: Optional[list[int]] = None,
379
+ prefix: str = "",
380
+ *,
381
+ return_bias: bool = True,
382
+ ):
383
+ # Divide the weight matrix along the last dimension.
384
+ self.tp_size = get_tensor_model_parallel_world_size()
385
+ self.input_size_per_partition = input_size
386
+ self.output_size_per_partition = divide(output_size, self.tp_size)
387
+ self.output_partition_sizes = [self.output_size_per_partition]
388
+ # If QKV or MergedColumn, use output size of each partition.
389
+ if hasattr(self, "output_sizes"):
390
+ self.output_partition_sizes = [
391
+ divide(output_size, self.tp_size)
392
+ for output_size in self.output_sizes
393
+ ]
394
+
395
+ super().__init__(input_size,
396
+ output_size,
397
+ skip_bias_add,
398
+ params_dtype,
399
+ quant_config,
400
+ prefix,
401
+ return_bias=return_bias)
402
+
403
+ self.gather_output = gather_output
404
+
405
+ if output_sizes is None:
406
+ output_sizes = [output_size]
407
+
408
+ assert self.quant_method is not None
409
+ self.quant_method.create_weights(
410
+ layer=self,
411
+ input_size_per_partition=self.input_size_per_partition,
412
+ output_partition_sizes=self.output_partition_sizes,
413
+ input_size=self.input_size,
414
+ output_size=self.output_size,
415
+ params_dtype=self.params_dtype,
416
+ weight_loader=(
417
+ self.weight_loader_v2 if self.quant_method.__class__.__name__
418
+ in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
419
+ if bias:
420
+ self.bias = Parameter(
421
+ torch.empty(self.output_size_per_partition,
422
+ dtype=params_dtype))
423
+ set_weight_attrs(self.bias, {
424
+ "output_dim": 0,
425
+ "weight_loader": self.weight_loader,
426
+ })
427
+ else:
428
+ self.register_parameter("bias", None)
429
+
430
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
431
+ tp_rank = get_tensor_model_parallel_rank()
432
+ output_dim = getattr(param, "output_dim", None)
433
+
434
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
435
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
436
+ # bitsandbytes loads the weights of the specific portion
437
+ # no need to narrow
438
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
439
+
440
+ # Special case for GGUF
441
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
442
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
443
+ if is_gguf_weight_type:
444
+ param.weight_type = loaded_weight.item()
445
+
446
+ # Materialize GGUF UninitializedParameter
447
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
448
+ final_shape = list(loaded_weight.shape)
449
+ if output_dim is not None:
450
+ tp_size = get_tensor_model_parallel_world_size()
451
+ assert final_shape[output_dim] % tp_size == 0
452
+ final_shape[output_dim] = final_shape[output_dim] // tp_size
453
+ param.materialize(final_shape, dtype=loaded_weight.dtype)
454
+
455
+ param_data = param.data
456
+ if output_dim is not None and not is_sharded_weight:
457
+ shard_size = param_data.shape[output_dim]
458
+ start_idx = tp_rank * shard_size
459
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
460
+ shard_size)
461
+
462
+ # Special case for loading scales off disk, which often do not
463
+ # have a shape (such as in the case of AutoFP8).
464
+ if len(loaded_weight.shape) == 0:
465
+ loaded_weight = loaded_weight.reshape(1)
466
+
467
+ assert param_data.shape == loaded_weight.shape
468
+ param_data.copy_(loaded_weight)
469
+
470
+ def weight_loader_v2(self, param: Parameter, loaded_weight: torch.Tensor):
471
+ # Special case for loading scales off disk, which often do not
472
+ # have a shape (such as in the case of AutoFP8).
473
+ if len(loaded_weight.shape) == 0:
474
+ assert loaded_weight.numel() == 1
475
+ loaded_weight = loaded_weight.reshape(1)
476
+ param.load_column_parallel_weight(loaded_weight=loaded_weight)
477
+
478
+ def forward(
479
+ self, input_
480
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
481
+ bias = self.bias if not self.skip_bias_add else None
482
+
483
+ # Matrix multiply.
484
+ assert self.quant_method is not None
485
+ output_parallel = self.quant_method.apply(self, input_, bias)
486
+ if self.gather_output:
487
+ # All-gather across the partitions.
488
+ output = tensor_model_parallel_all_gather(output_parallel)
489
+ else:
490
+ output = output_parallel
491
+ output_bias = self.bias if self.skip_bias_add else None
492
+ if not self.return_bias:
493
+ return output
494
+ return output, output_bias
495
+
496
+ def extra_repr(self) -> str:
497
+ s = f"in_features={self.input_size}"
498
+ s += f", output_features={self.output_size_per_partition}"
499
+ s += f", bias={self.bias is not None}"
500
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
501
+ s += f", gather_output={self.gather_output}"
502
+ return s
503
+
504
+
505
+ class MergedColumnParallelLinear(ColumnParallelLinear):
506
+ """Packed linear layers with column parallelism.
507
+
508
+ Similar to ColumnParallelLinear, but the weight matrix is concatenated
509
+ along the output dimension. When the weight matrix is loaded, the
510
+ different partitions are sharded separately.
511
+
512
+ Args:
513
+ input_size: input dimension of the linear layer.
514
+ output_sizes: list of output dimensions of the linear layer.
515
+ bias: If true, add bias.
516
+ gather_output: If true, call all-gather on output and make the output
517
+ available to all GPUs, otherwise, every GPU will have
518
+ its own output.
519
+ skip_bias_add: This was added to enable performance optimizations where
520
+ bias can be fused with other element-wise operations. we
521
+ skip adding bias but instead return it.
522
+ params_dtype: Data type for the parameters.
523
+ quant_config: Quantization configure.
524
+ prefix: The name of the layer in the state dict, including all parents
525
+ (e.g. model.layers.0.qkv_proj)
526
+ """
527
+
528
+ def __init__(
529
+ self,
530
+ input_size: int,
531
+ output_sizes: list[int],
532
+ bias: bool = True,
533
+ gather_output: bool = False,
534
+ skip_bias_add: bool = False,
535
+ params_dtype: Optional[torch.dtype] = None,
536
+ quant_config: Optional[QuantizationConfig] = None,
537
+ prefix: str = "",
538
+ *,
539
+ return_bias: bool = True,
540
+ ):
541
+ self.output_sizes = output_sizes
542
+ tp_size = get_tensor_model_parallel_world_size()
543
+ assert all(output_size % tp_size == 0 for output_size in output_sizes)
544
+ super().__init__(input_size=input_size,
545
+ output_size=sum(output_sizes),
546
+ bias=bias,
547
+ gather_output=gather_output,
548
+ skip_bias_add=skip_bias_add,
549
+ params_dtype=params_dtype,
550
+ quant_config=quant_config,
551
+ prefix=prefix,
552
+ return_bias=return_bias)
553
+
554
+ def weight_loader(self,
555
+ param: Parameter,
556
+ loaded_weight: torch.Tensor,
557
+ loaded_shard_id: Optional[int] = None):
558
+
559
+ # Special case for GGUF
560
+ # initialize GGUF param after we know the quantize type
561
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
562
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
563
+ if is_gguf_weight_type:
564
+ if loaded_shard_id is not None:
565
+ param.data[loaded_shard_id].copy_(loaded_weight)
566
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
567
+ else:
568
+ param.shard_weight_type = {
569
+ i: loaded_weight.item()
570
+ for i, _ in enumerate(self.output_sizes)
571
+ }
572
+ return
573
+
574
+ if is_gguf_weight:
575
+ tp_size = get_tensor_model_parallel_world_size()
576
+ tp_rank = get_tensor_model_parallel_rank()
577
+
578
+ output_dim = getattr(param, "output_dim", None)
579
+ shard_size = loaded_weight.size(output_dim) // tp_size
580
+ start_idx = tp_rank * shard_size
581
+
582
+ if loaded_shard_id is not None:
583
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
584
+ shard_size)
585
+ param.shard_id.append(loaded_shard_id)
586
+ param.shard_id_map[loaded_shard_id] = len(param.data_container)
587
+ param.data_container.append(loaded_weight)
588
+ if len(param.data_container) == 2:
589
+ self.qweight = param.materialize_nested()
590
+ return
591
+
592
+ param_data = param.data
593
+ output_dim = getattr(param, "output_dim", None)
594
+ # Special case for AQLM codebooks.
595
+ is_metadata = getattr(param, "is_metadata", False)
596
+ # Special case for per-tensor scale to load scalar into fused array.
597
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
598
+
599
+ if loaded_shard_id is None:
600
+ # Loaded weight is already fused on disk (mlp).
601
+ # (e.g., Phi-3's gate_up_proj).
602
+ if output_dim is None:
603
+ if needs_scalar_to_array:
604
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
605
+ param_data, loaded_weight, 0)
606
+
607
+ assert param_data.shape == loaded_weight.shape
608
+ param_data.copy_(loaded_weight)
609
+ return
610
+ current_shard_offset = 0
611
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
612
+ False)
613
+ shard_offsets: list[tuple[int, int, int]] = []
614
+ for i, output_size in enumerate(self.output_sizes):
615
+ shard_offsets.append((i, current_shard_offset, output_size))
616
+ current_shard_offset += output_size
617
+ packed_dim = getattr(param, "packed_dim", None)
618
+ for shard_id, shard_offset, shard_size in shard_offsets:
619
+ # Special case for Quantization.
620
+ # If quantized, we need to adjust the offset and size to account
621
+ # for the packing.
622
+ if packed_dim == output_dim:
623
+ shard_size = shard_size // param.pack_factor
624
+ shard_offset = shard_offset // param.pack_factor
625
+ # Special case for Marlin.
626
+ shard_size, shard_offset = adjust_marlin_shard(
627
+ param, shard_size, shard_offset)
628
+
629
+ shard_size, shard_offset = adjust_bitblas_shard(
630
+ param, shard_size, shard_offset)
631
+
632
+ if use_bitsandbytes_4bit:
633
+ index = list(itertools.accumulate([0] + self.output_sizes))
634
+ orig_offsets = {
635
+ str(i): (index[i], size)
636
+ for i, size in enumerate(self.output_sizes)
637
+ }
638
+ orig_offsets["total"] = (self.output_size, 0)
639
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
640
+ param, orig_offsets, str(shard_id))
641
+
642
+ loaded_weight_shard = loaded_weight.narrow(
643
+ output_dim, shard_offset, shard_size)
644
+ self.weight_loader(param, loaded_weight_shard, shard_id)
645
+ return
646
+
647
+ assert loaded_shard_id < len(self.output_sizes)
648
+ tp_rank = get_tensor_model_parallel_rank()
649
+ tp_size = get_tensor_model_parallel_world_size()
650
+ if output_dim is not None:
651
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
652
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
653
+ # Special case for quantization.
654
+ # If quantized, we need to adjust the offset and size to account
655
+ # for the packing.
656
+ packed_dim = getattr(param, "packed_dim", None)
657
+ if packed_dim == output_dim:
658
+ shard_size = shard_size // param.pack_factor
659
+ shard_offset = shard_offset // param.pack_factor
660
+ # Special case for Marlin.
661
+ shard_size, shard_offset = adjust_marlin_shard(
662
+ param, shard_size, shard_offset)
663
+ shard_size, shard_offset = adjust_bitblas_shard(
664
+ param, shard_size, shard_offset)
665
+
666
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
667
+ False)
668
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
669
+ # bitsandbytes loads the weights of the specific portion
670
+ # no need to narrow
671
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
672
+
673
+ if use_bitsandbytes_4bit:
674
+ shard_size = loaded_weight.shape[output_dim]
675
+ shard_offset = loaded_weight.shape[output_dim] * \
676
+ loaded_shard_id
677
+
678
+ param_data = param_data.narrow(output_dim, shard_offset,
679
+ shard_size)
680
+ start_idx = tp_rank * shard_size
681
+ if not is_sharded_weight:
682
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
683
+ shard_size)
684
+ # Special case for AQLM codebooks.
685
+ elif is_metadata:
686
+ # metadata indicates fixed size concatenated along dim 0
687
+ shard_size = loaded_weight.shape[0]
688
+ shard_offset = loaded_shard_id * shard_size
689
+ param_data = param_data.narrow(0, shard_offset, shard_size)
690
+
691
+ # Special case for per-tensor scales in fused case.
692
+ elif needs_scalar_to_array:
693
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
694
+ param_data, loaded_weight, loaded_shard_id)
695
+
696
+ else:
697
+ ignore_warning = getattr(param, "ignore_warning", False)
698
+ if not ignore_warning:
699
+ logger.warning(
700
+ "Loading a weight without `output_dim` attribute in "
701
+ "MergedColumnParallelLinear, assume the weight is "
702
+ "the same for all partitions.")
703
+
704
+ assert param_data.shape == loaded_weight.shape
705
+ param_data.copy_(loaded_weight)
706
+
707
+ def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
708
+ loaded_weight: torch.Tensor):
709
+ """
710
+ Handle special case for models where MLP layers are already
711
+ fused on disk. In this case, we have no shard id. This function
712
+ determmines the shard id by splitting these layers and then calls
713
+ the weight loader using the shard id.
714
+
715
+ An example of a model with these fused layers:
716
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
717
+ """
718
+
719
+ current_shard_offset = 0
720
+ shard_offsets: list[tuple[int, int, int]] = []
721
+ for i, output_size in enumerate(self.output_sizes):
722
+ shard_offsets.append((i, current_shard_offset, output_size))
723
+ current_shard_offset += output_size
724
+
725
+ for shard_id, shard_offset, shard_size in shard_offsets:
726
+ # Special case for Quantization.
727
+ # If quantized, we need to adjust the offset and size to account
728
+ # for the packing.
729
+ if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
730
+ )) and param.packed_dim == param.output_dim:
731
+ shard_size, shard_offset = \
732
+ param.adjust_shard_indexes_for_packing(
733
+ shard_size=shard_size, shard_offset=shard_offset)
734
+
735
+ loaded_weight_shard = loaded_weight.narrow(param.output_dim,
736
+ shard_offset,
737
+ shard_size)
738
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
739
+
740
+ def weight_loader_v2(self,
741
+ param: BasevLLMParameter,
742
+ loaded_weight: torch.Tensor,
743
+ loaded_shard_id: Optional[int] = None):
744
+ if loaded_shard_id is None:
745
+ if isinstance(param, PerTensorScaleParameter):
746
+ param.load_merged_column_weight(loaded_weight=loaded_weight,
747
+ shard_id=0)
748
+ return
749
+ elif type(param) in (RowvLLMParameter, BasevLLMParameter):
750
+ param.load_merged_column_weight(loaded_weight=loaded_weight)
751
+ return
752
+ # TODO: @dsikka - move to parameter.py
753
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
754
+ return
755
+
756
+ assert loaded_shard_id < len(self.output_sizes)
757
+
758
+ tp_size = get_tensor_model_parallel_world_size()
759
+
760
+ if isinstance(param, BlockQuantScaleParameter):
761
+ from vllm.model_executor.layers.quantization.fp8 import (
762
+ Fp8LinearMethod, Fp8MoEMethod)
763
+ assert self.quant_method is not None
764
+ assert isinstance(self.quant_method,
765
+ (Fp8LinearMethod, Fp8MoEMethod))
766
+ weight_block_size = self.quant_method.quant_config.weight_block_size
767
+ assert weight_block_size is not None
768
+ block_n, _ = weight_block_size[0], weight_block_size[1]
769
+ shard_offset = (
770
+ (sum(self.output_sizes[:loaded_shard_id]) + block_n - 1) //
771
+ block_n) // tp_size
772
+ shard_size = ((self.output_sizes[loaded_shard_id] + block_n - 1) //
773
+ block_n // tp_size)
774
+ else:
775
+ shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
776
+ shard_size = self.output_sizes[loaded_shard_id] // tp_size
777
+
778
+ param.load_merged_column_weight(loaded_weight=loaded_weight,
779
+ shard_id=loaded_shard_id,
780
+ shard_offset=shard_offset,
781
+ shard_size=shard_size)
782
+
783
+
784
+ class QKVParallelLinear(ColumnParallelLinear):
785
+ """Linear layers for the attention's QKV transformation.
786
+
787
+ Linear layers for the linear transformation of the query, key, and value
788
+ vectors in the attention layer. The weight matrix is concatenated along
789
+ the output dimension. The layer is parallelized along the head dimension.
790
+ When the number of key/value heads is smaller than the number of query
791
+ heads (e.g., multi-query/grouped-query attention), the key/value head may
792
+ be replicated while the query heads are partitioned.
793
+
794
+ Args:
795
+ hidden_size: input hidden state size of the transformer.
796
+ head_size: size of each attention head.
797
+ total_num_heads: total number of attention query heads.
798
+ total_num_kv_heads: total number of attention key/value heads. If
799
+ None, assume total_num_kv_heads = total_num_heads.
800
+ bias: If true, add bias.
801
+ skip_bias_add: This was added to enable performance optimizations where
802
+ bias can be fused with other element-wise operations. we
803
+ skip adding bias but instead return it.
804
+ params_dtype: Data type for the parameters.
805
+ quant_config: Quantization configure.
806
+ prefix: The name of the layer in the state dict, including all parents
807
+ (e.g. model.layers.0.qkv_proj)
808
+ """
809
+
810
+ def __init__(
811
+ self,
812
+ hidden_size: int,
813
+ head_size: int,
814
+ total_num_heads: int,
815
+ total_num_kv_heads: Optional[int] = None,
816
+ bias: bool = True,
817
+ skip_bias_add: bool = False,
818
+ params_dtype: Optional[torch.dtype] = None,
819
+ quant_config: Optional[QuantizationConfig] = None,
820
+ prefix: str = "",
821
+ *,
822
+ return_bias: bool = True,
823
+ ):
824
+ self.hidden_size = hidden_size
825
+ self.head_size = head_size
826
+ self.total_num_heads = total_num_heads
827
+ if total_num_kv_heads is None:
828
+ total_num_kv_heads = total_num_heads
829
+ self.total_num_kv_heads = total_num_kv_heads
830
+ # Divide the weight matrix along the last dimension.
831
+ tp_size = get_tensor_model_parallel_world_size()
832
+ self.num_heads = divide(self.total_num_heads, tp_size)
833
+ if tp_size >= self.total_num_kv_heads:
834
+ self.num_kv_heads = 1
835
+ self.num_kv_head_replicas = divide(tp_size,
836
+ self.total_num_kv_heads)
837
+ else:
838
+ self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
839
+ self.num_kv_head_replicas = 1
840
+ input_size = self.hidden_size
841
+ output_size = (self.num_heads +
842
+ 2 * self.num_kv_heads) * tp_size * self.head_size
843
+ self.output_sizes = [
844
+ self.num_heads * self.head_size * tp_size, # q_proj
845
+ self.num_kv_heads * self.head_size * tp_size, # k_proj
846
+ self.num_kv_heads * self.head_size * tp_size, # v_proj
847
+ ]
848
+
849
+ super().__init__(input_size=input_size,
850
+ output_size=output_size,
851
+ bias=bias,
852
+ gather_output=False,
853
+ skip_bias_add=skip_bias_add,
854
+ params_dtype=params_dtype,
855
+ quant_config=quant_config,
856
+ prefix=prefix,
857
+ return_bias=return_bias)
858
+
859
+ def _get_shard_offset_mapping(self, loaded_shard_id: str):
860
+ shard_offset_mapping = {
861
+ "q": 0,
862
+ "k": self.num_heads * self.head_size,
863
+ "v": (self.num_heads + self.num_kv_heads) * self.head_size,
864
+ "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size
865
+ }
866
+ return shard_offset_mapping.get(loaded_shard_id)
867
+
868
+ def _get_shard_size_mapping(self, loaded_shard_id: str):
869
+ shard_size_mapping = {
870
+ "q": self.num_heads * self.head_size,
871
+ "k": self.num_kv_heads * self.head_size,
872
+ "v": self.num_kv_heads * self.head_size,
873
+ }
874
+ return shard_size_mapping.get(loaded_shard_id)
875
+
876
+ def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
877
+ loaded_weight: torch.Tensor):
878
+ """
879
+ Handle special case for models where QKV layers are already
880
+ fused on disk. In this case, we have no shard id. This function
881
+ determmines the shard id by splitting these layers and then calls
882
+ the weight loader using the shard id.
883
+
884
+ An example of a model with these fused layers:
885
+ https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
886
+ """
887
+ shard_offsets = [
888
+ # (shard_id, shard_offset, shard_size)
889
+ ("q", 0, self.total_num_heads * self.head_size),
890
+ ("k", self.total_num_heads * self.head_size,
891
+ self.total_num_kv_heads * self.head_size),
892
+ ("v",
893
+ (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
894
+ self.total_num_kv_heads * self.head_size),
895
+ ]
896
+
897
+ for shard_id, shard_offset, shard_size in shard_offsets:
898
+ # Special case for Quantization.
899
+ # If quantized, we need to adjust the offset and size to account
900
+ # for the packing.
901
+ if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
902
+ )) and param.packed_dim == param.output_dim:
903
+ shard_size, shard_offset = \
904
+ param.adjust_shard_indexes_for_packing(
905
+ shard_size=shard_size, shard_offset=shard_offset)
906
+
907
+ loaded_weight_shard = loaded_weight.narrow(param.output_dim,
908
+ shard_offset,
909
+ shard_size)
910
+ self.weight_loader_v2(param, loaded_weight_shard, shard_id)
911
+
912
+ def weight_loader_v2(self,
913
+ param: BasevLLMParameter,
914
+ loaded_weight: torch.Tensor,
915
+ loaded_shard_id: Optional[str] = None):
916
+ if loaded_shard_id is None: # special case for certain models
917
+ if isinstance(param, PerTensorScaleParameter):
918
+ param.load_qkv_weight(loaded_weight=loaded_weight, shard_id=0)
919
+ return
920
+ elif type(param) in (RowvLLMParameter, BasevLLMParameter):
921
+ param.load_qkv_weight(loaded_weight=loaded_weight)
922
+ return
923
+ # TODO: @dsikka - move to parameter.py
924
+ self._load_fused_module_from_checkpoint(param, loaded_weight)
925
+ return
926
+
927
+ assert loaded_shard_id in ["q", "k", "v"]
928
+
929
+ shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
930
+ shard_size = self._get_shard_size_mapping(loaded_shard_id)
931
+
932
+ # Note(simon): This is needed for Qwen3's fp8 quantization.
933
+ if isinstance(param, BlockQuantScaleParameter):
934
+ assert self.quant_method is not None
935
+ assert hasattr(self.quant_method, "quant_config")
936
+ weight_block_size = self.quant_method.quant_config.weight_block_size
937
+ block_n, _ = weight_block_size[0], weight_block_size[1]
938
+ shard_offset = (shard_offset + block_n - 1) // block_n
939
+ shard_size = (shard_size + block_n - 1) // block_n
940
+
941
+ param.load_qkv_weight(loaded_weight=loaded_weight,
942
+ num_heads=self.num_kv_head_replicas,
943
+ shard_id=loaded_shard_id,
944
+ shard_offset=shard_offset,
945
+ shard_size=shard_size)
946
+
947
+ def weight_loader(self,
948
+ param: Parameter,
949
+ loaded_weight: torch.Tensor,
950
+ loaded_shard_id: Optional[str] = None):
951
+
952
+ # Special case for GGUF
953
+ # initialize GGUF param after we know the quantize type
954
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
955
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
956
+ if is_gguf_weight_type:
957
+ idx_map = {"q": 0, "k": 1, "v": 2}
958
+ if loaded_shard_id is not None:
959
+ param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
960
+ param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
961
+ else:
962
+ param.shard_weight_type = {
963
+ k: loaded_weight.item()
964
+ for k in idx_map
965
+ }
966
+ return
967
+
968
+ if is_gguf_weight:
969
+ tp_size = get_tensor_model_parallel_world_size()
970
+ tp_rank = get_tensor_model_parallel_rank()
971
+
972
+ output_dim = getattr(param, "output_dim", None)
973
+ shard_size = loaded_weight.size(output_dim) // tp_size
974
+ start_idx = tp_rank * shard_size
975
+
976
+ if loaded_shard_id is not None:
977
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
978
+ shard_size)
979
+ param.shard_id.append(loaded_shard_id)
980
+ param.shard_id_map[loaded_shard_id] = len(param.data_container)
981
+ param.data_container.append(loaded_weight)
982
+ if len(param.data_container) == 3:
983
+ self.qweight = param.materialize_nested()
984
+ return
985
+
986
+ param_data = param.data
987
+ output_dim = getattr(param, "output_dim", None)
988
+ # Special case for AQLM codebooks.
989
+ is_metadata = getattr(param, "is_metadata", False)
990
+
991
+ # Special case for per-tensor scales in fused case.
992
+ needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
993
+
994
+ if loaded_shard_id is None:
995
+ # Loaded weight is already fused on disk (qkv).
996
+ # (e.g., Phi-3's qkv_proj).
997
+ if output_dim is None:
998
+ if needs_scalar_to_array:
999
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
1000
+ param_data, loaded_weight, 0)
1001
+
1002
+ assert param_data.shape == loaded_weight.shape
1003
+ param_data.copy_(loaded_weight)
1004
+ return
1005
+ shard_offsets = [
1006
+ # (shard_id, shard_offset, shard_size)
1007
+ ("q", 0, self.total_num_heads * self.head_size),
1008
+ ("k", self.total_num_heads * self.head_size,
1009
+ self.total_num_kv_heads * self.head_size),
1010
+ ("v", (self.total_num_heads + self.total_num_kv_heads) *
1011
+ self.head_size, self.total_num_kv_heads * self.head_size),
1012
+ ]
1013
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
1014
+ False)
1015
+
1016
+ packed_dim = getattr(param, "packed_dim", None)
1017
+ for shard_id, shard_offset, shard_size in shard_offsets:
1018
+ # Special case for Quantized Weights.
1019
+ # If quantized, we need to adjust the offset and size to account
1020
+ # for the packing.
1021
+ if packed_dim == output_dim:
1022
+ shard_size = shard_size // param.pack_factor
1023
+ shard_offset = shard_offset // param.pack_factor
1024
+
1025
+ # Special case for Marlin.
1026
+ shard_size, shard_offset = adjust_marlin_shard(
1027
+ param, shard_size, shard_offset)
1028
+
1029
+ if use_bitsandbytes_4bit:
1030
+ orig_qkv_offsets = {
1031
+ "q": (0, self.total_num_heads * self.head_size),
1032
+ "k": (self.total_num_heads * self.head_size,
1033
+ self.total_num_kv_heads * self.head_size),
1034
+ "v":
1035
+ ((self.total_num_heads + self.total_num_kv_heads) *
1036
+ self.head_size,
1037
+ self.total_num_kv_heads * self.head_size),
1038
+ "total":
1039
+ ((self.total_num_heads + 2 * self.total_num_kv_heads) *
1040
+ self.head_size, 0)
1041
+ }
1042
+
1043
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
1044
+ param, orig_qkv_offsets, shard_id)
1045
+
1046
+ loaded_weight_shard = loaded_weight.narrow(
1047
+ output_dim, shard_offset, shard_size)
1048
+ self.weight_loader(param, loaded_weight_shard, shard_id)
1049
+ return
1050
+
1051
+ tp_rank = get_tensor_model_parallel_rank()
1052
+ assert loaded_shard_id in ["q", "k", "v"]
1053
+
1054
+ # If output dim is defined, use the default loading process.
1055
+ if output_dim is not None:
1056
+ if loaded_shard_id == "q":
1057
+ shard_offset = 0
1058
+ shard_size = self.num_heads * self.head_size
1059
+ elif loaded_shard_id == "k":
1060
+ shard_offset = self.num_heads * self.head_size
1061
+ shard_size = self.num_kv_heads * self.head_size
1062
+ elif loaded_shard_id == "v":
1063
+ shard_offset = (self.num_heads +
1064
+ self.num_kv_heads) * self.head_size
1065
+ shard_size = self.num_kv_heads * self.head_size
1066
+ # Special case for Quantized Weights.
1067
+ # If quantized, we need to adjust the offset and size to account
1068
+ # for the packing.
1069
+ packed_dim = getattr(param, "packed_dim", None)
1070
+ if packed_dim == output_dim:
1071
+ shard_size = shard_size // param.pack_factor
1072
+ shard_offset = shard_offset // param.pack_factor
1073
+
1074
+ # Special case for Marlin.
1075
+ shard_size, shard_offset = adjust_marlin_shard(
1076
+ param, shard_size, shard_offset)
1077
+
1078
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
1079
+ False)
1080
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
1081
+ # bitsandbytes loads the weights of the specific portion
1082
+ # no need to narrow
1083
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
1084
+
1085
+ if use_bitsandbytes_4bit:
1086
+ orig_qkv_offsets = {
1087
+ "q": (0, self.num_heads * self.head_size),
1088
+ "k": (self.num_heads * self.head_size,
1089
+ self.num_kv_heads * self.head_size),
1090
+ "v":
1091
+ ((self.num_heads + self.num_kv_heads) * self.head_size,
1092
+ self.num_kv_heads * self.head_size),
1093
+ "total":
1094
+ ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
1095
+ 0)
1096
+ }
1097
+ shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
1098
+ param, orig_qkv_offsets, loaded_shard_id)
1099
+
1100
+ param_data = param_data.narrow(output_dim, shard_offset,
1101
+ shard_size)
1102
+ if loaded_shard_id == "q":
1103
+ shard_id = tp_rank
1104
+ else:
1105
+ shard_id = tp_rank // self.num_kv_head_replicas
1106
+ start_idx = shard_id * shard_size
1107
+
1108
+ if not is_sharded_weight:
1109
+ loaded_weight = loaded_weight.narrow(output_dim, start_idx,
1110
+ shard_size)
1111
+
1112
+ # Special case for for AQLM codebooks.
1113
+ elif is_metadata:
1114
+ # metadata indicates fixed size concatenated along dim 0
1115
+ shard_size = loaded_weight.shape[0]
1116
+ shard_index = ["q", "k", "v"].index(loaded_shard_id)
1117
+ param_data = param_data.narrow(0, shard_index * shard_size,
1118
+ shard_size)
1119
+ # Special case for per-tensor scales in fused case.
1120
+ elif needs_scalar_to_array:
1121
+ param_data, loaded_weight = adjust_scalar_to_fused_array(
1122
+ param_data, loaded_weight, loaded_shard_id)
1123
+ else:
1124
+ ignore_warning = getattr(param, "ignore_warning", False)
1125
+ if not ignore_warning:
1126
+ logger.warning(
1127
+ "Loading a weight without `output_dim` attribute in "
1128
+ "QKVParallelLinear, assume the weight is the same "
1129
+ "for all partitions.")
1130
+
1131
+ assert param_data.shape == loaded_weight.shape
1132
+ param_data.copy_(loaded_weight)
1133
+
1134
+
1135
+ class RowParallelLinear(LinearBase):
1136
+ """Linear layer with row parallelism.
1137
+
1138
+ The linear layer is defined as Y = XA + b. A is parallelized along
1139
+ its first dimension and X along its second dimension as:
1140
+ - -
1141
+ | A_1 |
1142
+ | . |
1143
+ A = | . | X = [X_1, ..., X_p]
1144
+ | . |
1145
+ | A_p |
1146
+ - -
1147
+ Arguments:
1148
+ input_size: first dimension of matrix A.
1149
+ output_size: second dimension of matrix A.
1150
+ bias: If true, add bias. Note that bias is not parallelized.
1151
+ input_is_parallel: If true, we assume that the input is already
1152
+ split across the GPUs and we do not split
1153
+ again.
1154
+ skip_bias_add: This was added to enable performance optimization where
1155
+ bias can be fused with other element-wise operations.
1156
+ We skip adding bias but instead return it.
1157
+ params_dtype: Data type for the parameters.
1158
+ quant_config: Quantization configure.
1159
+ """
1160
+
1161
+ def __init__(
1162
+ self,
1163
+ input_size: int,
1164
+ output_size: int,
1165
+ bias: bool = True,
1166
+ input_is_parallel: bool = True,
1167
+ skip_bias_add: bool = False,
1168
+ params_dtype: Optional[torch.dtype] = None,
1169
+ reduce_results: bool = True,
1170
+ quant_config: Optional[QuantizationConfig] = None,
1171
+ prefix: str = "",
1172
+ *,
1173
+ return_bias: bool = True,
1174
+ ):
1175
+ # Divide the weight matrix along the first dimension.
1176
+ self.tp_rank = get_tensor_model_parallel_rank()
1177
+ self.tp_size = get_tensor_model_parallel_world_size()
1178
+ self.input_size_per_partition = divide(input_size, self.tp_size)
1179
+ self.output_size_per_partition = output_size
1180
+ self.output_partition_sizes = [output_size]
1181
+
1182
+ super().__init__(input_size,
1183
+ output_size,
1184
+ skip_bias_add,
1185
+ params_dtype,
1186
+ quant_config,
1187
+ prefix,
1188
+ return_bias=return_bias)
1189
+
1190
+ self.input_is_parallel = input_is_parallel
1191
+ self.reduce_results = reduce_results
1192
+
1193
+ assert self.quant_method is not None
1194
+ self.quant_method.create_weights(
1195
+ layer=self,
1196
+ input_size_per_partition=self.input_size_per_partition,
1197
+ output_partition_sizes=self.output_partition_sizes,
1198
+ input_size=self.input_size,
1199
+ output_size=self.output_size,
1200
+ params_dtype=self.params_dtype,
1201
+ weight_loader=(
1202
+ self.weight_loader_v2 if self.quant_method.__class__.__name__
1203
+ in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
1204
+ if not reduce_results and (bias and not skip_bias_add):
1205
+ raise ValueError("When not reduce the results, adding bias to the "
1206
+ "results can lead to incorrect results")
1207
+
1208
+ if bias:
1209
+ self.bias = Parameter(
1210
+ torch.empty(self.output_size, dtype=params_dtype))
1211
+ set_weight_attrs(self.bias, {
1212
+ "output_dim": 0,
1213
+ "weight_loader": self.weight_loader,
1214
+ })
1215
+ else:
1216
+ self.register_parameter("bias", None)
1217
+
1218
+ def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
1219
+ tp_rank = get_tensor_model_parallel_rank()
1220
+ tp_size = get_tensor_model_parallel_world_size()
1221
+ input_dim = getattr(param, "input_dim", None)
1222
+ use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
1223
+ is_sharded_weight = getattr(param, "is_sharded_weight", False)
1224
+ # bitsandbytes loads the weights of the specific portion
1225
+ # no need to narrow
1226
+ is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
1227
+
1228
+ # Special case for GGUF
1229
+ is_gguf_weight = getattr(param, "is_gguf_weight", False)
1230
+ is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
1231
+ if is_gguf_weight_type:
1232
+ param.weight_type = loaded_weight.item()
1233
+
1234
+ # Materialize GGUF UninitializedParameter
1235
+ if is_gguf_weight and isinstance(param, UninitializedParameter):
1236
+ weight_shape = list(loaded_weight.shape)
1237
+ if input_dim:
1238
+ weight_shape[input_dim] = weight_shape[input_dim] // tp_size
1239
+ param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)
1240
+
1241
+ param_data = param.data
1242
+ if input_dim is not None and not is_sharded_weight:
1243
+ shard_size = param_data.shape[input_dim]
1244
+ start_idx = tp_rank * shard_size
1245
+ loaded_weight = loaded_weight.narrow(input_dim, start_idx,
1246
+ shard_size)
1247
+
1248
+ # Special case for loading scales off disk, which often do not
1249
+ # have a shape (such as in the case of AutoFP8).
1250
+ if len(loaded_weight.shape) == 0:
1251
+ loaded_weight = loaded_weight.reshape(1)
1252
+
1253
+ assert param_data.shape == loaded_weight.shape
1254
+ param_data.copy_(loaded_weight)
1255
+
1256
+ def weight_loader_v2(self, param: BasevLLMParameter,
1257
+ loaded_weight: torch.Tensor):
1258
+
1259
+ # Special case for loading scales off disk, which often do not
1260
+ # have a shape (such as in the case of AutoFP8).
1261
+ if len(loaded_weight.shape) == 0:
1262
+ assert loaded_weight.numel() == 1
1263
+ loaded_weight = loaded_weight.reshape(1)
1264
+
1265
+ param.load_row_parallel_weight(loaded_weight=loaded_weight)
1266
+
1267
+ def forward(
1268
+ self, input_
1269
+ ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
1270
+ if self.input_is_parallel:
1271
+ input_parallel = input_
1272
+ else:
1273
+ tp_rank = get_tensor_model_parallel_rank()
1274
+ splitted_input = split_tensor_along_last_dim(
1275
+ input_, num_partitions=self.tp_size)
1276
+ input_parallel = splitted_input[tp_rank].contiguous()
1277
+
1278
+ # Matrix multiply.
1279
+ assert self.quant_method is not None
1280
+ # Only fuse bias add into GEMM for rank 0 (this ensures that
1281
+ # bias will not get added more than once in TP>1 case)
1282
+ bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
1283
+ output_parallel = self.quant_method.apply(self,
1284
+ input_parallel,
1285
+ bias=bias_)
1286
+ if self.reduce_results and self.tp_size > 1:
1287
+ output = tensor_model_parallel_all_reduce(output_parallel)
1288
+ else:
1289
+ output = output_parallel
1290
+
1291
+ output_bias = self.bias if self.skip_bias_add else None
1292
+
1293
+ if not self.return_bias:
1294
+ return output
1295
+ return output, output_bias
1296
+
1297
+ def extra_repr(self) -> str:
1298
+ s = f"input_features={self.input_size_per_partition}"
1299
+ s += f", output_features={self.output_size}"
1300
+ s += f", bias={self.bias is not None}"
1301
+ s += f", tp_size={self.tp_size}"
1302
+ s += f", reduce_results={self.reduce_results}"
1303
+ return s
1304
+
1305
+
1306
+ class QKVCrossParallelLinear(LinearBase):
1307
+ """Linear layers for efficient cross-attention's QKV transformation.
1308
+
1309
+ Args:
1310
+ hidden_size: input hidden state size of the transformer.
1311
+ head_size: size of each attention head.
1312
+ total_num_heads: total number of attention query heads.
1313
+ total_num_kv_heads: total number of attention key/value heads. If
1314
+ None, assume total_num_kv_heads = total_num_heads.
1315
+ bias: If true, add bias.
1316
+ skip_bias_add: This was added to enable performance optimizations where
1317
+ bias can be fused with other element-wise operations. we
1318
+ skip adding bias but instead return it.
1319
+ params_dtype: Data type for the parameters.
1320
+ quant_config: Quantization configure.
1321
+ prefix: The name of the layer in the state dict, including all parents
1322
+ (e.g. model.layers.0.qkv_proj)
1323
+ """
1324
+
1325
+ def __init__(self,
1326
+ hidden_size: int,
1327
+ head_size: int,
1328
+ total_num_heads: int,
1329
+ total_num_kv_heads: Optional[int] = None,
1330
+ bias: bool = True,
1331
+ skip_bias_add: bool = False,
1332
+ params_dtype: Optional[torch.dtype] = None,
1333
+ quant_config: Optional[QuantizationConfig] = None,
1334
+ prefix: str = ""):
1335
+ # input_size and output_size are not used, just for alignment
1336
+ input_size = hidden_size
1337
+ output_size = (total_num_heads + (total_num_kv_heads or 0)) * head_size
1338
+ super().__init__(input_size=input_size,
1339
+ output_size=output_size,
1340
+ skip_bias_add=skip_bias_add,
1341
+ params_dtype=params_dtype,
1342
+ quant_config=quant_config,
1343
+ prefix=prefix)
1344
+
1345
+ self.quant_config = quant_config
1346
+
1347
+ # Empty placeholders for loading as a single module.
1348
+ placeholder_size = 0
1349
+ assert self.quant_method is not None
1350
+ self.quant_method.create_weights(self,
1351
+ placeholder_size, [placeholder_size],
1352
+ placeholder_size,
1353
+ placeholder_size,
1354
+ self.params_dtype,
1355
+ weight_loader=self.weight_loader)
1356
+
1357
+ # Use a dictionary to avoid submodules parameters auto-registration:
1358
+ # drop-in replacement for a `QKVParallelLinear` module.
1359
+ self.proj = dict()
1360
+ self.proj["q_proj_decoder"] = ColumnParallelLinear(
1361
+ input_size=hidden_size,
1362
+ output_size=total_num_heads * head_size,
1363
+ bias=bias,
1364
+ quant_config=quant_config,
1365
+ skip_bias_add=skip_bias_add,
1366
+ params_dtype=params_dtype,
1367
+ prefix=f"{prefix}.q_proj_decoder")
1368
+
1369
+ self.proj["kv_proj_encoder"] = QKVParallelLinear(
1370
+ hidden_size=hidden_size,
1371
+ head_size=head_size,
1372
+ total_num_heads=0,
1373
+ total_num_kv_heads=total_num_kv_heads,
1374
+ bias=bias,
1375
+ quant_config=quant_config,
1376
+ skip_bias_add=skip_bias_add,
1377
+ params_dtype=params_dtype,
1378
+ prefix=f"{prefix}.kv_proj_encoder")
1379
+
1380
+ # `kv_proj_encoder.num_kv_heads` accounts for sharding with tp>1.
1381
+ self.q_size = self.q_proj_decoder.output_size_per_partition
1382
+ self.kv_size = self.kv_proj_encoder.num_kv_heads * head_size
1383
+
1384
+ if bias:
1385
+ self.bias = torch.nn.Parameter()
1386
+ set_weight_attrs(self.bias, {
1387
+ "output_dim": 0,
1388
+ "weight_loader": self.weight_loader,
1389
+ })
1390
+ else:
1391
+ self.bias = None
1392
+
1393
+ def process_weights_after_loading(self):
1394
+ for layer in self.proj.values():
1395
+ if self.quant_method is not None:
1396
+ self.quant_method.process_weights_after_loading(layer)
1397
+
1398
+ @property
1399
+ def q_proj_decoder(self) -> ColumnParallelLinear:
1400
+ layer = self.proj["q_proj_decoder"]
1401
+ for name, param in self.named_parameters():
1402
+ target_param = getattr(layer, name, None)
1403
+ if target_param is not None:
1404
+ self.sync_weight_attrs(param,
1405
+ target_param,
1406
+ mode="q_proj_decoder")
1407
+ return layer
1408
+
1409
+ @property
1410
+ def kv_proj_encoder(self) -> QKVParallelLinear:
1411
+ layer = self.proj["kv_proj_encoder"]
1412
+ for name, param in self.named_parameters():
1413
+ target_param = getattr(layer, name, None)
1414
+ if target_param is not None:
1415
+ self.sync_weight_attrs(param,
1416
+ target_param,
1417
+ mode="kv_proj_encoder")
1418
+ return layer
1419
+
1420
+ def sync_weight_attrs(
1421
+ self,
1422
+ src_param: nn.Parameter,
1423
+ tgt_param: nn.Parameter,
1424
+ mode: Literal["q_proj_decoder", "kv_proj_encoder"],
1425
+ ):
1426
+ missing_attrs_dict = {
1427
+ k: getattr(src_param, k)
1428
+ for k in (set(src_param.__dict__.keys()) -
1429
+ set(tgt_param.__dict__.keys()))
1430
+ }
1431
+ # TODO(Isotr0py): handle bitsandbytes 8bit
1432
+ use_bitsandbytes_4bit = getattr(src_param, "use_bitsandbytes_4bit",
1433
+ False)
1434
+ if (missing_attrs_dict and use_bitsandbytes_4bit):
1435
+ q_proj_attrs, kv_proj_attrs = left_shift_bitsandbytes_4bit_shard(
1436
+ missing_attrs_dict)
1437
+ if mode == "q_proj_decoder":
1438
+ set_weight_attrs(tgt_param, q_proj_attrs)
1439
+ elif mode == "kv_proj_encoder":
1440
+ set_weight_attrs(tgt_param, kv_proj_attrs)
1441
+ else:
1442
+ set_weight_attrs(tgt_param, missing_attrs_dict)
1443
+
1444
+ def _is_same_param(
1445
+ self,
1446
+ src_param: torch.nn.Parameter,
1447
+ map_param: torch.nn.Parameter,
1448
+ ) -> bool:
1449
+ """Check if two parameters are exactly pointing to same things."""
1450
+ # ignore weight_loader because it's always different
1451
+ key_to_ignore = ["weight_loader", "_weight_loader"]
1452
+ has_same_type_name = type(src_param) is type(map_param)
1453
+ src_param_attrs = {
1454
+ k: v
1455
+ for k, v in src_param.__dict__.items() if k not in key_to_ignore
1456
+ }
1457
+ map_param_attrs = {
1458
+ k: v
1459
+ for k, v in map_param.__dict__.items() if k not in key_to_ignore
1460
+ }
1461
+ has_same_attrs = src_param_attrs == map_param_attrs
1462
+ return has_same_type_name and has_same_attrs
1463
+
1464
+ def select_proj_params(
1465
+ self,
1466
+ layer: nn.Module,
1467
+ param: nn.Parameter,
1468
+ ) -> nn.Parameter:
1469
+ """
1470
+ Given the placeholder param,
1471
+ return the corresponding param in the proj layers.
1472
+ """
1473
+ target_param_list = [
1474
+ v for _, v in layer.named_parameters()
1475
+ if self._is_same_param(param, v)
1476
+ ]
1477
+ assert len(target_param_list) == 1
1478
+ target_param = target_param_list[0]
1479
+ return target_param
1480
+
1481
+ def forward( # type: ignore[override]
1482
+ self,
1483
+ decoder_hidden_states: torch.Tensor,
1484
+ encoder_hidden_states: torch.Tensor,
1485
+ ) -> tuple[torch.Tensor, ...]:
1486
+ q, _ = self.q_proj_decoder(decoder_hidden_states)
1487
+ if encoder_hidden_states is None:
1488
+ # Encoder KV already cached.
1489
+ k = None
1490
+ v = None
1491
+ else:
1492
+ # Prefill phase, encoder KV cached here.
1493
+ kv_enc, _ = self.kv_proj_encoder(encoder_hidden_states)
1494
+ # Split kv in half
1495
+ k, v = kv_enc.split(self.kv_size, dim=-1)
1496
+ return q, k, v
1497
+
1498
+ def weight_loader(self,
1499
+ param: torch.nn.Parameter,
1500
+ loaded_weight: torch.Tensor,
1501
+ loaded_shard_id: Optional[str] = None):
1502
+ layer = (self.q_proj_decoder
1503
+ if loaded_shard_id == "q" else self.kv_proj_encoder)
1504
+ target_param = self.select_proj_params(layer, param)
1505
+ shard_id_args = (loaded_shard_id, ) if loaded_shard_id != "q" else ()
1506
+ if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED:
1507
+ layer.weight_loader_v2(target_param, loaded_weight, *shard_id_args)
1508
+ else:
1509
+ layer.weight_loader(target_param, loaded_weight, *shard_id_args)
1510
+
1511
+ def extra_repr(self) -> str:
1512
+ s = f"in_features={self.input_size}"
1513
+ s += f", q_size={self.q_size}"
1514
+ s += f", kv_size={self.kv_size}"
1515
+ s += f", bias={self.bias is not None}"
1516
+ s += f", tp_size={get_tensor_model_parallel_world_size()}"
1517
+ s += ", gather_output=False"
1518
+ return s