vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1197) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +53 -0
  3. vllm/_custom_ops.py +1828 -0
  4. vllm/_ipex_ops.py +244 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +115 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +308 -0
  20. vllm/attention/backends/blocksparse_attn.py +461 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
  23. vllm/attention/backends/flash_attn.py +1003 -0
  24. vllm/attention/backends/flashinfer.py +1104 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +313 -0
  27. vllm/attention/backends/ipex_attn.py +398 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1385 -0
  30. vllm/attention/backends/pallas.py +351 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +975 -0
  34. vllm/attention/backends/torch_sdpa.py +703 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +802 -0
  38. vllm/attention/layer.py +468 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +906 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/prefix_prefill.py +902 -0
  52. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  53. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  54. vllm/attention/ops/triton_decode_attention.py +674 -0
  55. vllm/attention/ops/triton_flash_attention.py +979 -0
  56. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  57. vllm/attention/ops/triton_unified_attention.py +334 -0
  58. vllm/attention/selector.py +187 -0
  59. vllm/attention/utils/fa_utils.py +55 -0
  60. vllm/beam_search.py +87 -0
  61. vllm/benchmarks/__init__.py +0 -0
  62. vllm/benchmarks/datasets.py +1185 -0
  63. vllm/benchmarks/endpoint_request_func.py +381 -0
  64. vllm/benchmarks/latency.py +168 -0
  65. vllm/benchmarks/serve.py +1135 -0
  66. vllm/benchmarks/throughput.py +609 -0
  67. vllm/benchmarks/utils.py +70 -0
  68. vllm/collect_env.py +820 -0
  69. vllm/compilation/__init__.py +0 -0
  70. vllm/compilation/activation_quant_fusion.py +89 -0
  71. vllm/compilation/backends.py +563 -0
  72. vllm/compilation/base_piecewise_backend.py +72 -0
  73. vllm/compilation/collective_fusion.py +127 -0
  74. vllm/compilation/compiler_interface.py +544 -0
  75. vllm/compilation/counter.py +38 -0
  76. vllm/compilation/cuda_piecewise_backend.py +214 -0
  77. vllm/compilation/decorators.py +250 -0
  78. vllm/compilation/fix_functionalization.py +191 -0
  79. vllm/compilation/fusion.py +618 -0
  80. vllm/compilation/fx_utils.py +62 -0
  81. vllm/compilation/inductor_pass.py +115 -0
  82. vllm/compilation/monitor.py +39 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +137 -0
  85. vllm/compilation/pass_manager.py +78 -0
  86. vllm/compilation/sequence_parallelism.py +268 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +67 -0
  89. vllm/compilation/wrapper.py +135 -0
  90. vllm/config.py +4746 -0
  91. vllm/connections.py +174 -0
  92. vllm/core/__init__.py +0 -0
  93. vllm/core/block/__init__.py +0 -0
  94. vllm/core/block/block_table.py +399 -0
  95. vllm/core/block/common.py +371 -0
  96. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  97. vllm/core/block/interfaces.py +319 -0
  98. vllm/core/block/naive_block.py +466 -0
  99. vllm/core/block/prefix_caching_block.py +1135 -0
  100. vllm/core/block/utils.py +28 -0
  101. vllm/core/block_manager.py +521 -0
  102. vllm/core/evictor.py +157 -0
  103. vllm/core/interfaces.py +135 -0
  104. vllm/core/placeholder_block_space_manager.py +100 -0
  105. vllm/core/scheduler.py +2093 -0
  106. vllm/device_allocator/__init__.py +0 -0
  107. vllm/device_allocator/cumem.py +281 -0
  108. vllm/distributed/__init__.py +6 -0
  109. vllm/distributed/communication_op.py +41 -0
  110. vllm/distributed/device_communicators/__init__.py +0 -0
  111. vllm/distributed/device_communicators/all2all.py +264 -0
  112. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  113. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  114. vllm/distributed/device_communicators/cuda_communicator.py +176 -0
  115. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  116. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  117. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  118. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  119. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  120. vllm/distributed/device_communicators/pynccl.py +218 -0
  121. vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
  122. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  123. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  124. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  125. vllm/distributed/kv_events.py +356 -0
  126. vllm/distributed/kv_transfer/README.md +29 -0
  127. vllm/distributed/kv_transfer/__init__.py +12 -0
  128. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  129. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  130. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  131. vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
  132. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  133. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  134. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
  142. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  145. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  146. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  147. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  149. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
  150. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  151. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  152. vllm/distributed/parallel_state.py +1296 -0
  153. vllm/distributed/tpu_distributed_utils.py +177 -0
  154. vllm/distributed/utils.py +536 -0
  155. vllm/engine/__init__.py +0 -0
  156. vllm/engine/arg_utils.py +1708 -0
  157. vllm/engine/async_llm_engine.py +1200 -0
  158. vllm/engine/async_timeout.py +173 -0
  159. vllm/engine/llm_engine.py +2097 -0
  160. vllm/engine/metrics.py +629 -0
  161. vllm/engine/metrics_types.py +94 -0
  162. vllm/engine/multiprocessing/__init__.py +148 -0
  163. vllm/engine/multiprocessing/client.py +681 -0
  164. vllm/engine/multiprocessing/engine.py +460 -0
  165. vllm/engine/output_processor/__init__.py +0 -0
  166. vllm/engine/output_processor/interfaces.py +75 -0
  167. vllm/engine/output_processor/multi_step.py +216 -0
  168. vllm/engine/output_processor/single_step.py +145 -0
  169. vllm/engine/output_processor/stop_checker.py +131 -0
  170. vllm/engine/output_processor/util.py +28 -0
  171. vllm/engine/protocol.py +317 -0
  172. vllm/entrypoints/__init__.py +0 -0
  173. vllm/entrypoints/api_server.py +178 -0
  174. vllm/entrypoints/chat_utils.py +1299 -0
  175. vllm/entrypoints/cli/__init__.py +0 -0
  176. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  177. vllm/entrypoints/cli/benchmark/base.py +39 -0
  178. vllm/entrypoints/cli/benchmark/latency.py +30 -0
  179. vllm/entrypoints/cli/benchmark/main.py +54 -0
  180. vllm/entrypoints/cli/benchmark/serve.py +30 -0
  181. vllm/entrypoints/cli/benchmark/throughput.py +30 -0
  182. vllm/entrypoints/cli/collect_env.py +35 -0
  183. vllm/entrypoints/cli/main.py +65 -0
  184. vllm/entrypoints/cli/openai.py +205 -0
  185. vllm/entrypoints/cli/run_batch.py +62 -0
  186. vllm/entrypoints/cli/serve.py +328 -0
  187. vllm/entrypoints/cli/types.py +25 -0
  188. vllm/entrypoints/launcher.py +147 -0
  189. vllm/entrypoints/llm.py +1544 -0
  190. vllm/entrypoints/logger.py +50 -0
  191. vllm/entrypoints/openai/__init__.py +0 -0
  192. vllm/entrypoints/openai/api_server.py +1387 -0
  193. vllm/entrypoints/openai/cli_args.py +315 -0
  194. vllm/entrypoints/openai/logits_processors.py +90 -0
  195. vllm/entrypoints/openai/protocol.py +1913 -0
  196. vllm/entrypoints/openai/run_batch.py +463 -0
  197. vllm/entrypoints/openai/serving_chat.py +1221 -0
  198. vllm/entrypoints/openai/serving_classification.py +160 -0
  199. vllm/entrypoints/openai/serving_completion.py +592 -0
  200. vllm/entrypoints/openai/serving_embedding.py +201 -0
  201. vllm/entrypoints/openai/serving_engine.py +986 -0
  202. vllm/entrypoints/openai/serving_models.py +315 -0
  203. vllm/entrypoints/openai/serving_pooling.py +232 -0
  204. vllm/entrypoints/openai/serving_score.py +433 -0
  205. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  206. vllm/entrypoints/openai/serving_transcription.py +424 -0
  207. vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
  208. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  209. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  210. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  211. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  212. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  213. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  214. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  215. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  216. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  217. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  218. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  219. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  220. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  221. vllm/entrypoints/score_utils.py +50 -0
  222. vllm/entrypoints/ssl.py +75 -0
  223. vllm/entrypoints/utils.py +233 -0
  224. vllm/env_override.py +41 -0
  225. vllm/envs.py +944 -0
  226. vllm/executor/__init__.py +0 -0
  227. vllm/executor/executor_base.py +401 -0
  228. vllm/executor/mp_distributed_executor.py +244 -0
  229. vllm/executor/msgspec_utils.py +30 -0
  230. vllm/executor/multiproc_worker_utils.py +313 -0
  231. vllm/executor/ray_distributed_executor.py +701 -0
  232. vllm/executor/ray_utils.py +399 -0
  233. vllm/executor/uniproc_executor.py +139 -0
  234. vllm/forward_context.py +179 -0
  235. vllm/inputs/__init__.py +41 -0
  236. vllm/inputs/data.py +331 -0
  237. vllm/inputs/parse.py +151 -0
  238. vllm/inputs/preprocess.py +909 -0
  239. vllm/inputs/registry.py +237 -0
  240. vllm/jsontree.py +80 -0
  241. vllm/logger.py +212 -0
  242. vllm/logging_utils/__init__.py +8 -0
  243. vllm/logging_utils/dump_input.py +85 -0
  244. vllm/logging_utils/formatter.py +18 -0
  245. vllm/logits_process.py +119 -0
  246. vllm/lora/__init__.py +0 -0
  247. vllm/lora/fully_sharded_layers.py +355 -0
  248. vllm/lora/layers.py +1285 -0
  249. vllm/lora/lora.py +199 -0
  250. vllm/lora/models.py +818 -0
  251. vllm/lora/ops/__init__.py +0 -0
  252. vllm/lora/ops/torch_ops/__init__.py +16 -0
  253. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  254. vllm/lora/ops/triton_ops/__init__.py +12 -0
  255. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  256. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  257. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  258. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  259. vllm/lora/ops/triton_ops/utils.py +120 -0
  260. vllm/lora/ops/xla_ops/__init__.py +7 -0
  261. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  262. vllm/lora/peft_helper.py +136 -0
  263. vllm/lora/punica_wrapper/__init__.py +10 -0
  264. vllm/lora/punica_wrapper/punica_base.py +485 -0
  265. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  266. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  267. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  268. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  269. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  270. vllm/lora/punica_wrapper/utils.py +164 -0
  271. vllm/lora/request.py +99 -0
  272. vllm/lora/resolver.py +85 -0
  273. vllm/lora/utils.py +240 -0
  274. vllm/lora/worker_manager.py +259 -0
  275. vllm/model_executor/__init__.py +16 -0
  276. vllm/model_executor/custom_op.py +152 -0
  277. vllm/model_executor/guided_decoding/__init__.py +181 -0
  278. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  279. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  280. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  281. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  282. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  283. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  284. vllm/model_executor/guided_decoding/utils.py +242 -0
  285. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  286. vllm/model_executor/layers/__init__.py +0 -0
  287. vllm/model_executor/layers/activation.py +369 -0
  288. vllm/model_executor/layers/fused_moe/__init__.py +54 -0
  289. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
  290. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  455. vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
  456. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
  457. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
  458. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
  459. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
  460. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
  461. vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
  462. vllm/model_executor/layers/fused_moe/layer.py +1535 -0
  463. vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
  464. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  465. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  466. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  467. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  468. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
  469. vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
  470. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
  471. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
  472. vllm/model_executor/layers/fused_moe/utils.py +98 -0
  473. vllm/model_executor/layers/layernorm.py +288 -0
  474. vllm/model_executor/layers/lightning_attn.py +652 -0
  475. vllm/model_executor/layers/linear.py +1524 -0
  476. vllm/model_executor/layers/logits_processor.py +197 -0
  477. vllm/model_executor/layers/mamba/__init__.py +0 -0
  478. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  479. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  480. vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
  481. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  482. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  483. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  484. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  485. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  486. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  487. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  488. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  489. vllm/model_executor/layers/pooler.py +350 -0
  490. vllm/model_executor/layers/quantization/__init__.py +157 -0
  491. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  492. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  493. vllm/model_executor/layers/quantization/awq.py +194 -0
  494. vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
  495. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  496. vllm/model_executor/layers/quantization/base_config.py +151 -0
  497. vllm/model_executor/layers/quantization/bitblas.py +461 -0
  498. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  499. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  500. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
  501. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
  502. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  503. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  504. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  505. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  506. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
  507. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
  508. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  509. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  510. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  511. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  512. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  513. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  514. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  515. vllm/model_executor/layers/quantization/experts_int8.py +196 -0
  516. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  517. vllm/model_executor/layers/quantization/fp8.py +906 -0
  518. vllm/model_executor/layers/quantization/gguf.py +565 -0
  519. vllm/model_executor/layers/quantization/gptq.py +278 -0
  520. vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
  521. vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
  522. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  523. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  524. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  525. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  526. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  527. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  528. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  529. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  530. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  531. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
  532. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  533. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  534. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  535. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  536. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  537. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  538. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  539. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  540. vllm/model_executor/layers/quantization/marlin.py +261 -0
  541. vllm/model_executor/layers/quantization/modelopt.py +737 -0
  542. vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
  543. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  544. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  545. vllm/model_executor/layers/quantization/qqq.py +275 -0
  546. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  547. vllm/model_executor/layers/quantization/quark/quark.py +441 -0
  548. vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
  549. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  550. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  551. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  552. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
  553. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  554. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  555. vllm/model_executor/layers/quantization/schema.py +86 -0
  556. vllm/model_executor/layers/quantization/torchao.py +161 -0
  557. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  558. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  559. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  560. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
  764. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  765. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  766. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  767. vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
  768. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  769. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  770. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  771. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  772. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  773. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  774. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  775. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
  776. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  777. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  778. vllm/model_executor/layers/rejection_sampler.py +406 -0
  779. vllm/model_executor/layers/resampler.py +270 -0
  780. vllm/model_executor/layers/rotary_embedding.py +1862 -0
  781. vllm/model_executor/layers/sampler.py +1204 -0
  782. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  783. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  784. vllm/model_executor/layers/utils.py +95 -0
  785. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  786. vllm/model_executor/model_loader/__init__.py +76 -0
  787. vllm/model_executor/model_loader/base_loader.py +43 -0
  788. vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
  789. vllm/model_executor/model_loader/default_loader.py +282 -0
  790. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  791. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  792. vllm/model_executor/model_loader/neuron.py +476 -0
  793. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  794. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  795. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  796. vllm/model_executor/model_loader/tensorizer.py +600 -0
  797. vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
  798. vllm/model_executor/model_loader/tpu.py +112 -0
  799. vllm/model_executor/model_loader/utils.py +302 -0
  800. vllm/model_executor/model_loader/weight_utils.py +782 -0
  801. vllm/model_executor/models/__init__.py +28 -0
  802. vllm/model_executor/models/adapters.py +248 -0
  803. vllm/model_executor/models/aimv2.py +246 -0
  804. vllm/model_executor/models/arctic.py +559 -0
  805. vllm/model_executor/models/aria.py +657 -0
  806. vllm/model_executor/models/aya_vision.py +466 -0
  807. vllm/model_executor/models/baichuan.py +474 -0
  808. vllm/model_executor/models/bamba.py +543 -0
  809. vllm/model_executor/models/bart.py +938 -0
  810. vllm/model_executor/models/bert.py +523 -0
  811. vllm/model_executor/models/bert_with_rope.py +769 -0
  812. vllm/model_executor/models/blip.py +339 -0
  813. vllm/model_executor/models/blip2.py +718 -0
  814. vllm/model_executor/models/bloom.py +373 -0
  815. vllm/model_executor/models/chameleon.py +1136 -0
  816. vllm/model_executor/models/chatglm.py +478 -0
  817. vllm/model_executor/models/clip.py +407 -0
  818. vllm/model_executor/models/commandr.py +472 -0
  819. vllm/model_executor/models/constant_size_cache.py +137 -0
  820. vllm/model_executor/models/dbrx.py +472 -0
  821. vllm/model_executor/models/deepseek.py +486 -0
  822. vllm/model_executor/models/deepseek_mtp.py +269 -0
  823. vllm/model_executor/models/deepseek_v2.py +843 -0
  824. vllm/model_executor/models/deepseek_vl2.py +648 -0
  825. vllm/model_executor/models/eagle.py +260 -0
  826. vllm/model_executor/models/exaone.py +551 -0
  827. vllm/model_executor/models/fairseq2_llama.py +154 -0
  828. vllm/model_executor/models/falcon.py +510 -0
  829. vllm/model_executor/models/falcon_h1.py +685 -0
  830. vllm/model_executor/models/florence2.py +1103 -0
  831. vllm/model_executor/models/fuyu.py +389 -0
  832. vllm/model_executor/models/gemma.py +425 -0
  833. vllm/model_executor/models/gemma2.py +425 -0
  834. vllm/model_executor/models/gemma3.py +533 -0
  835. vllm/model_executor/models/gemma3_mm.py +709 -0
  836. vllm/model_executor/models/glm.py +23 -0
  837. vllm/model_executor/models/glm4.py +305 -0
  838. vllm/model_executor/models/glm4v.py +648 -0
  839. vllm/model_executor/models/gpt2.py +328 -0
  840. vllm/model_executor/models/gpt_bigcode.py +335 -0
  841. vllm/model_executor/models/gpt_j.py +339 -0
  842. vllm/model_executor/models/gpt_neox.py +332 -0
  843. vllm/model_executor/models/granite.py +493 -0
  844. vllm/model_executor/models/granite_speech.py +779 -0
  845. vllm/model_executor/models/granitemoe.py +437 -0
  846. vllm/model_executor/models/granitemoehybrid.py +586 -0
  847. vllm/model_executor/models/granitemoeshared.py +341 -0
  848. vllm/model_executor/models/gritlm.py +224 -0
  849. vllm/model_executor/models/grok1.py +546 -0
  850. vllm/model_executor/models/h2ovl.py +546 -0
  851. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  852. vllm/model_executor/models/idefics3.py +776 -0
  853. vllm/model_executor/models/interfaces.py +572 -0
  854. vllm/model_executor/models/interfaces_base.py +164 -0
  855. vllm/model_executor/models/intern_vit.py +480 -0
  856. vllm/model_executor/models/internlm2.py +455 -0
  857. vllm/model_executor/models/internlm2_ve.py +147 -0
  858. vllm/model_executor/models/internvl.py +1418 -0
  859. vllm/model_executor/models/jais.py +373 -0
  860. vllm/model_executor/models/jamba.py +592 -0
  861. vllm/model_executor/models/kimi_vl.py +577 -0
  862. vllm/model_executor/models/llama.py +644 -0
  863. vllm/model_executor/models/llama4.py +532 -0
  864. vllm/model_executor/models/llama_eagle.py +165 -0
  865. vllm/model_executor/models/llama_eagle3.py +263 -0
  866. vllm/model_executor/models/llava.py +866 -0
  867. vllm/model_executor/models/llava_next.py +586 -0
  868. vllm/model_executor/models/llava_next_video.py +471 -0
  869. vllm/model_executor/models/llava_onevision.py +956 -0
  870. vllm/model_executor/models/mamba.py +273 -0
  871. vllm/model_executor/models/mamba2.py +308 -0
  872. vllm/model_executor/models/mamba_cache.py +76 -0
  873. vllm/model_executor/models/medusa.py +219 -0
  874. vllm/model_executor/models/mimo.py +192 -0
  875. vllm/model_executor/models/mimo_mtp.py +285 -0
  876. vllm/model_executor/models/minicpm.py +592 -0
  877. vllm/model_executor/models/minicpm3.py +230 -0
  878. vllm/model_executor/models/minicpm_eagle.py +391 -0
  879. vllm/model_executor/models/minicpmo.py +759 -0
  880. vllm/model_executor/models/minicpmv.py +1287 -0
  881. vllm/model_executor/models/minimax_cache.py +36 -0
  882. vllm/model_executor/models/minimax_text_01.py +1301 -0
  883. vllm/model_executor/models/minimax_vl_01.py +364 -0
  884. vllm/model_executor/models/mistral3.py +604 -0
  885. vllm/model_executor/models/mixtral.py +488 -0
  886. vllm/model_executor/models/mixtral_quant.py +453 -0
  887. vllm/model_executor/models/mllama.py +1624 -0
  888. vllm/model_executor/models/mllama4.py +938 -0
  889. vllm/model_executor/models/mlp_speculator.py +206 -0
  890. vllm/model_executor/models/modernbert.py +331 -0
  891. vllm/model_executor/models/module_mapping.py +72 -0
  892. vllm/model_executor/models/molmo.py +1568 -0
  893. vllm/model_executor/models/moonvit.py +630 -0
  894. vllm/model_executor/models/mpt.py +331 -0
  895. vllm/model_executor/models/nemotron.py +508 -0
  896. vllm/model_executor/models/nemotron_h.py +573 -0
  897. vllm/model_executor/models/nemotron_nas.py +484 -0
  898. vllm/model_executor/models/nvlm_d.py +216 -0
  899. vllm/model_executor/models/olmo.py +389 -0
  900. vllm/model_executor/models/olmo2.py +414 -0
  901. vllm/model_executor/models/olmoe.py +468 -0
  902. vllm/model_executor/models/opt.py +412 -0
  903. vllm/model_executor/models/orion.py +349 -0
  904. vllm/model_executor/models/ovis.py +567 -0
  905. vllm/model_executor/models/paligemma.py +398 -0
  906. vllm/model_executor/models/persimmon.py +344 -0
  907. vllm/model_executor/models/phi.py +356 -0
  908. vllm/model_executor/models/phi3.py +19 -0
  909. vllm/model_executor/models/phi3_small.py +465 -0
  910. vllm/model_executor/models/phi3v.py +723 -0
  911. vllm/model_executor/models/phi4mm.py +1246 -0
  912. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  913. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  914. vllm/model_executor/models/phimoe.py +665 -0
  915. vllm/model_executor/models/pixtral.py +1316 -0
  916. vllm/model_executor/models/plamo2.py +738 -0
  917. vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
  918. vllm/model_executor/models/qwen.py +362 -0
  919. vllm/model_executor/models/qwen2.py +497 -0
  920. vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
  921. vllm/model_executor/models/qwen2_5_vl.py +1166 -0
  922. vllm/model_executor/models/qwen2_audio.py +410 -0
  923. vllm/model_executor/models/qwen2_moe.py +540 -0
  924. vllm/model_executor/models/qwen2_rm.py +132 -0
  925. vllm/model_executor/models/qwen2_vl.py +1405 -0
  926. vllm/model_executor/models/qwen3.py +321 -0
  927. vllm/model_executor/models/qwen3_moe.py +535 -0
  928. vllm/model_executor/models/qwen_vl.py +785 -0
  929. vllm/model_executor/models/registry.py +622 -0
  930. vllm/model_executor/models/roberta.py +276 -0
  931. vllm/model_executor/models/siglip.py +524 -0
  932. vllm/model_executor/models/skyworkr1v.py +951 -0
  933. vllm/model_executor/models/smolvlm.py +52 -0
  934. vllm/model_executor/models/solar.py +506 -0
  935. vllm/model_executor/models/stablelm.py +343 -0
  936. vllm/model_executor/models/starcoder2.py +356 -0
  937. vllm/model_executor/models/tarsier.py +643 -0
  938. vllm/model_executor/models/telechat2.py +140 -0
  939. vllm/model_executor/models/teleflm.py +79 -0
  940. vllm/model_executor/models/transformers.py +508 -0
  941. vllm/model_executor/models/ultravox.py +656 -0
  942. vllm/model_executor/models/utils.py +731 -0
  943. vllm/model_executor/models/vision.py +147 -0
  944. vllm/model_executor/models/whisper.py +747 -0
  945. vllm/model_executor/models/zamba2.py +1009 -0
  946. vllm/model_executor/parameter.py +459 -0
  947. vllm/model_executor/pooling_metadata.py +72 -0
  948. vllm/model_executor/sampling_metadata.py +597 -0
  949. vllm/model_executor/utils.py +77 -0
  950. vllm/multimodal/__init__.py +33 -0
  951. vllm/multimodal/audio.py +106 -0
  952. vllm/multimodal/base.py +219 -0
  953. vllm/multimodal/hasher.py +118 -0
  954. vllm/multimodal/image.py +97 -0
  955. vllm/multimodal/inputs.py +876 -0
  956. vllm/multimodal/parse.py +461 -0
  957. vllm/multimodal/processing.py +1895 -0
  958. vllm/multimodal/profiling.py +258 -0
  959. vllm/multimodal/registry.py +331 -0
  960. vllm/multimodal/utils.py +436 -0
  961. vllm/multimodal/video.py +198 -0
  962. vllm/outputs.py +512 -0
  963. vllm/platforms/__init__.py +291 -0
  964. vllm/platforms/cpu.py +266 -0
  965. vllm/platforms/cuda.py +526 -0
  966. vllm/platforms/hpu.py +106 -0
  967. vllm/platforms/interface.py +538 -0
  968. vllm/platforms/neuron.py +150 -0
  969. vllm/platforms/rocm.py +435 -0
  970. vllm/platforms/tpu.py +216 -0
  971. vllm/platforms/xpu.py +156 -0
  972. vllm/plugins/__init__.py +94 -0
  973. vllm/plugins/lora_resolvers/README.md +15 -0
  974. vllm/plugins/lora_resolvers/__init__.py +0 -0
  975. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  976. vllm/pooling_params.py +54 -0
  977. vllm/profiler/__init__.py +0 -0
  978. vllm/profiler/layerwise_profile.py +375 -0
  979. vllm/profiler/utils.py +148 -0
  980. vllm/prompt_adapter/__init__.py +0 -0
  981. vllm/prompt_adapter/layers.py +83 -0
  982. vllm/prompt_adapter/models.py +358 -0
  983. vllm/prompt_adapter/request.py +37 -0
  984. vllm/prompt_adapter/utils.py +98 -0
  985. vllm/prompt_adapter/worker_manager.py +179 -0
  986. vllm/py.typed +2 -0
  987. vllm/reasoning/__init__.py +15 -0
  988. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  989. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  990. vllm/reasoning/granite_reasoning_parser.py +363 -0
  991. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  992. vllm/sampling_params.py +602 -0
  993. vllm/scalar_type.py +347 -0
  994. vllm/scripts.py +15 -0
  995. vllm/sequence.py +1568 -0
  996. vllm/spec_decode/__init__.py +0 -0
  997. vllm/spec_decode/batch_expansion.py +506 -0
  998. vllm/spec_decode/draft_model_runner.py +349 -0
  999. vllm/spec_decode/interfaces.py +99 -0
  1000. vllm/spec_decode/medusa_worker.py +138 -0
  1001. vllm/spec_decode/metrics.py +213 -0
  1002. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1003. vllm/spec_decode/mqa_scorer.py +160 -0
  1004. vllm/spec_decode/multi_step_worker.py +423 -0
  1005. vllm/spec_decode/ngram_worker.py +196 -0
  1006. vllm/spec_decode/proposer_worker_base.py +59 -0
  1007. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1008. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1009. vllm/spec_decode/target_model_runner.py +45 -0
  1010. vllm/spec_decode/top1_proposer.py +275 -0
  1011. vllm/spec_decode/util.py +277 -0
  1012. vllm/test_utils.py +130 -0
  1013. vllm/third_party/__init__.py +0 -0
  1014. vllm/third_party/pynvml.py +6140 -0
  1015. vllm/tracing.py +131 -0
  1016. vllm/transformers_utils/__init__.py +24 -0
  1017. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1018. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1019. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1020. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1021. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1022. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1023. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1024. vllm/transformers_utils/config.py +887 -0
  1025. vllm/transformers_utils/configs/__init__.py +61 -0
  1026. vllm/transformers_utils/configs/arctic.py +207 -0
  1027. vllm/transformers_utils/configs/chatglm.py +72 -0
  1028. vllm/transformers_utils/configs/cohere2.py +195 -0
  1029. vllm/transformers_utils/configs/dbrx.py +280 -0
  1030. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1031. vllm/transformers_utils/configs/eagle.py +85 -0
  1032. vllm/transformers_utils/configs/exaone.py +190 -0
  1033. vllm/transformers_utils/configs/falcon.py +90 -0
  1034. vllm/transformers_utils/configs/h2ovl.py +16 -0
  1035. vllm/transformers_utils/configs/internvl.py +54 -0
  1036. vllm/transformers_utils/configs/jais.py +238 -0
  1037. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1038. vllm/transformers_utils/configs/medusa.py +63 -0
  1039. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1040. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1041. vllm/transformers_utils/configs/mllama.py +31 -0
  1042. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1043. vllm/transformers_utils/configs/moonvit.py +33 -0
  1044. vllm/transformers_utils/configs/mpt.py +180 -0
  1045. vllm/transformers_utils/configs/nemotron.py +205 -0
  1046. vllm/transformers_utils/configs/nemotron_h.py +258 -0
  1047. vllm/transformers_utils/configs/nvlm_d.py +15 -0
  1048. vllm/transformers_utils/configs/ovis.py +184 -0
  1049. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1050. vllm/transformers_utils/configs/solar.py +247 -0
  1051. vllm/transformers_utils/configs/telechat2.py +64 -0
  1052. vllm/transformers_utils/configs/ultravox.py +108 -0
  1053. vllm/transformers_utils/detokenizer.py +168 -0
  1054. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1055. vllm/transformers_utils/processor.py +221 -0
  1056. vllm/transformers_utils/processors/__init__.py +8 -0
  1057. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1058. vllm/transformers_utils/processors/ovis.py +420 -0
  1059. vllm/transformers_utils/s3_utils.py +162 -0
  1060. vllm/transformers_utils/tokenizer.py +302 -0
  1061. vllm/transformers_utils/tokenizer_base.py +149 -0
  1062. vllm/transformers_utils/tokenizer_group.py +120 -0
  1063. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1064. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1065. vllm/transformers_utils/utils.py +99 -0
  1066. vllm/triton_utils/__init__.py +14 -0
  1067. vllm/triton_utils/importing.py +50 -0
  1068. vllm/usage/__init__.py +0 -0
  1069. vllm/usage/usage_lib.py +256 -0
  1070. vllm/utils.py +2910 -0
  1071. vllm/v1/__init__.py +0 -0
  1072. vllm/v1/attention/__init__.py +0 -0
  1073. vllm/v1/attention/backends/__init__.py +0 -0
  1074. vllm/v1/attention/backends/cpu_attn.py +163 -0
  1075. vllm/v1/attention/backends/flash_attn.py +869 -0
  1076. vllm/v1/attention/backends/flashinfer.py +651 -0
  1077. vllm/v1/attention/backends/flex_attention.py +477 -0
  1078. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1079. vllm/v1/attention/backends/mla/common.py +931 -0
  1080. vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
  1081. vllm/v1/attention/backends/mla/flashmla.py +152 -0
  1082. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
  1083. vllm/v1/attention/backends/mla/triton_mla.py +120 -0
  1084. vllm/v1/attention/backends/pallas.py +240 -0
  1085. vllm/v1/attention/backends/triton_attn.py +285 -0
  1086. vllm/v1/attention/backends/utils.py +52 -0
  1087. vllm/v1/core/__init__.py +0 -0
  1088. vllm/v1/core/block_pool.py +349 -0
  1089. vllm/v1/core/encoder_cache_manager.py +150 -0
  1090. vllm/v1/core/kv_cache_coordinator.py +363 -0
  1091. vllm/v1/core/kv_cache_manager.py +392 -0
  1092. vllm/v1/core/kv_cache_utils.py +996 -0
  1093. vllm/v1/core/sched/__init__.py +0 -0
  1094. vllm/v1/core/sched/interface.py +150 -0
  1095. vllm/v1/core/sched/output.py +154 -0
  1096. vllm/v1/core/sched/scheduler.py +1044 -0
  1097. vllm/v1/core/sched/utils.py +23 -0
  1098. vllm/v1/core/single_type_kv_cache_manager.py +403 -0
  1099. vllm/v1/engine/__init__.py +173 -0
  1100. vllm/v1/engine/async_llm.py +558 -0
  1101. vllm/v1/engine/coordinator.py +253 -0
  1102. vllm/v1/engine/core.py +961 -0
  1103. vllm/v1/engine/core_client.py +1129 -0
  1104. vllm/v1/engine/detokenizer.py +261 -0
  1105. vllm/v1/engine/exceptions.py +17 -0
  1106. vllm/v1/engine/llm_engine.py +317 -0
  1107. vllm/v1/engine/logprobs.py +199 -0
  1108. vllm/v1/engine/mm_input_cache.py +91 -0
  1109. vllm/v1/engine/output_processor.py +428 -0
  1110. vllm/v1/engine/parallel_sampling.py +133 -0
  1111. vllm/v1/engine/processor.py +407 -0
  1112. vllm/v1/executor/__init__.py +0 -0
  1113. vllm/v1/executor/abstract.py +113 -0
  1114. vllm/v1/executor/multiproc_executor.py +537 -0
  1115. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1116. vllm/v1/kv_cache_interface.py +194 -0
  1117. vllm/v1/metrics/__init__.py +0 -0
  1118. vllm/v1/metrics/loggers.py +523 -0
  1119. vllm/v1/metrics/prometheus.py +82 -0
  1120. vllm/v1/metrics/ray_wrappers.py +131 -0
  1121. vllm/v1/metrics/reader.py +246 -0
  1122. vllm/v1/metrics/stats.py +239 -0
  1123. vllm/v1/outputs.py +116 -0
  1124. vllm/v1/request.py +193 -0
  1125. vllm/v1/sample/__init__.py +0 -0
  1126. vllm/v1/sample/metadata.py +44 -0
  1127. vllm/v1/sample/ops/__init__.py +0 -0
  1128. vllm/v1/sample/ops/bad_words.py +39 -0
  1129. vllm/v1/sample/ops/penalties.py +59 -0
  1130. vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
  1131. vllm/v1/sample/rejection_sampler.py +631 -0
  1132. vllm/v1/sample/sampler.py +286 -0
  1133. vllm/v1/sample/tpu/__init__.py +0 -0
  1134. vllm/v1/sample/tpu/metadata.py +124 -0
  1135. vllm/v1/sample/tpu/sampler.py +145 -0
  1136. vllm/v1/serial_utils.py +315 -0
  1137. vllm/v1/spec_decode/__init__.py +0 -0
  1138. vllm/v1/spec_decode/eagle.py +432 -0
  1139. vllm/v1/spec_decode/medusa.py +62 -0
  1140. vllm/v1/spec_decode/metadata.py +62 -0
  1141. vllm/v1/spec_decode/metrics.py +178 -0
  1142. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1143. vllm/v1/spec_decode/utils.py +46 -0
  1144. vllm/v1/structured_output/__init__.py +222 -0
  1145. vllm/v1/structured_output/backend_guidance.py +245 -0
  1146. vllm/v1/structured_output/backend_types.py +134 -0
  1147. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1148. vllm/v1/structured_output/request.py +86 -0
  1149. vllm/v1/structured_output/utils.py +175 -0
  1150. vllm/v1/utils.py +743 -0
  1151. vllm/v1/worker/__init__.py +0 -0
  1152. vllm/v1/worker/block_table.py +142 -0
  1153. vllm/v1/worker/cpu_model_runner.py +86 -0
  1154. vllm/v1/worker/cpu_worker.py +152 -0
  1155. vllm/v1/worker/gpu_input_batch.py +681 -0
  1156. vllm/v1/worker/gpu_model_runner.py +2320 -0
  1157. vllm/v1/worker/gpu_worker.py +393 -0
  1158. vllm/v1/worker/lora_model_runner_mixin.py +173 -0
  1159. vllm/v1/worker/tpu_model_runner.py +1673 -0
  1160. vllm/v1/worker/tpu_worker.py +299 -0
  1161. vllm/v1/worker/utils.py +111 -0
  1162. vllm/v1/worker/worker_base.py +65 -0
  1163. vllm/version.py +41 -0
  1164. vllm/vllm_flash_attn/.gitkeep +0 -0
  1165. vllm/worker/__init__.py +0 -0
  1166. vllm/worker/cache_engine.py +145 -0
  1167. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1168. vllm/worker/cpu_model_runner.py +671 -0
  1169. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1170. vllm/worker/cpu_worker.py +450 -0
  1171. vllm/worker/enc_dec_model_runner.py +555 -0
  1172. vllm/worker/hpu_model_runner.py +2320 -0
  1173. vllm/worker/hpu_worker.py +484 -0
  1174. vllm/worker/model_runner.py +2178 -0
  1175. vllm/worker/model_runner_base.py +282 -0
  1176. vllm/worker/multi_step_hpu_worker.py +123 -0
  1177. vllm/worker/multi_step_model_runner.py +911 -0
  1178. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1179. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1180. vllm/worker/multi_step_tpu_worker.py +108 -0
  1181. vllm/worker/multi_step_worker.py +197 -0
  1182. vllm/worker/neuron_model_runner.py +460 -0
  1183. vllm/worker/neuron_worker.py +193 -0
  1184. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1185. vllm/worker/pooling_model_runner.py +211 -0
  1186. vllm/worker/tpu_model_runner.py +909 -0
  1187. vllm/worker/tpu_worker.py +337 -0
  1188. vllm/worker/utils.py +53 -0
  1189. vllm/worker/worker.py +577 -0
  1190. vllm/worker/worker_base.py +646 -0
  1191. vllm/worker/xpu_model_runner.py +606 -0
  1192. vllm/worker/xpu_worker.py +186 -0
  1193. vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
  1194. vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
  1195. vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
  1196. vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
  1197. vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2320 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ ###############################################################################
5
+ # Copyright (C) 2024 Habana Labs, Ltd. an Intel Company
6
+ ###############################################################################
7
+
8
+ import collections
9
+ import contextlib
10
+ import dataclasses
11
+ import functools
12
+ import gc
13
+ import itertools
14
+ import math
15
+ import os
16
+ import time
17
+ from array import array
18
+ from enum import Enum, IntEnum
19
+ from typing import (TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple,
20
+ Optional, Set, Tuple, Type, TypeVar, Union)
21
+
22
+ import habana_frameworks.torch as htorch
23
+ import habana_frameworks.torch.internal.bridge_config as bc
24
+ import torch
25
+ import torch.nn as nn
26
+ import vllm_hpu_extension.environment as environment
27
+ from vllm_hpu_extension.bucketing.common import get_bucketing_context
28
+ from vllm_hpu_extension.ops import LoraMask as LoraMask
29
+ from vllm_hpu_extension.profiler import (HabanaHighLevelProfiler,
30
+ HabanaMemoryProfiler, format_bytes)
31
+
32
+ import vllm.envs as envs
33
+ from vllm.attention import AttentionMetadata, get_attn_backend
34
+ from vllm.config import DeviceConfig, VllmConfig
35
+ from vllm.distributed import broadcast_tensor_dict
36
+ from vllm.distributed.parallel_state import get_world_group
37
+ from vllm.forward_context import set_forward_context
38
+ from vllm.logger import init_logger
39
+ from vllm.lora.layers import LoRAMapping
40
+ from vllm.lora.request import LoRARequest
41
+ from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
42
+ from vllm.model_executor import SamplingMetadata
43
+ from vllm.model_executor.layers.layernorm import RMSNorm
44
+ from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler
45
+ from vllm.model_executor.layers.vocab_parallel_embedding import (
46
+ VocabParallelEmbedding)
47
+ from vllm.model_executor.model_loader import get_model
48
+ from vllm.model_executor.sampling_metadata import SequenceGroupToSample
49
+ from vllm.multimodal import BatchedTensorInputs, MultiModalKwargs
50
+ from vllm.sampling_params import SamplingParams
51
+ from vllm.sequence import (CompletionSequenceGroupOutput, IntermediateTensors,
52
+ Logprob, SequenceData, SequenceGroupMetadata,
53
+ SequenceOutput)
54
+ from vllm.utils import (bind_kv_cache, is_pin_memory_available,
55
+ make_tensor_with_pad)
56
+ from vllm.worker.model_runner_base import (
57
+ ModelRunnerBase, ModelRunnerInputBase,
58
+ _add_attn_metadata_broadcastable_dict,
59
+ _add_sampling_metadata_broadcastable_dict,
60
+ _init_attn_metadata_from_tensor_dict,
61
+ _init_sampling_metadata_from_tensor_dict)
62
+
63
+ if TYPE_CHECKING:
64
+ from vllm.attention.backends.abstract import AttentionBackend
65
+
66
+ logger = init_logger(__name__)
67
+
68
+ _TYPE_CACHE = {}
69
+ # These values are assumed to be zero in several places.
70
+ # Use caution when updating them!
71
+ _PAD_SLOT_ID = 0
72
+ _PAD_BLOCK_ID = 0
73
+
74
+ LORA_WARMUP_RANK = 8
75
+
76
+ DUMMY_TOKEN_ID = -1
77
+
78
+
79
+ class PhaseType(Enum):
80
+ PREFILL = 'prefill'
81
+ PREFIX_PREFILL = 'prefix_prefill'
82
+ DECODE = 'decode'
83
+
84
+
85
+ def subtuple(obj: object,
86
+ typename: str,
87
+ to_copy: List[str],
88
+ to_override: Optional[Dict[str, object]] = None):
89
+ if obj is None:
90
+ return None
91
+ if to_override is None:
92
+ to_override = {}
93
+ fields = set(to_copy) | set(to_override.keys())
94
+ if type(obj) is dict:
95
+ values = {key: obj[key] for key in fields if key in obj}
96
+ else:
97
+ values = {f: to_override.get(f, getattr(obj, f)) for f in fields}
98
+ if typename not in _TYPE_CACHE:
99
+ _TYPE_CACHE[typename] = collections.namedtuple(typename,
100
+ ' '.join(fields))
101
+ return _TYPE_CACHE[typename](**values)
102
+
103
+
104
+ def round_up(value: int, k: int):
105
+ return (value + k - 1) // k * k
106
+
107
+
108
+ def align_workers(value, op):
109
+ group = get_world_group().cpu_group
110
+ world_size = torch.distributed.get_world_size()
111
+ if world_size <= 1:
112
+ return value
113
+ value_t = torch.tensor(value, device='cpu')
114
+ torch.distributed.all_reduce(value_t, op=op, group=group)
115
+ return value_t.item()
116
+
117
+
118
+ def setup_profiler():
119
+ schedule = torch.profiler.schedule(wait=0, warmup=2, active=1, repeat=1)
120
+ DEVICE = 'hpu'
121
+ activities = [torch.profiler.ProfilerActivity.CPU]
122
+ activities.extend([torch.profiler.ProfilerActivity.HPU] if DEVICE ==
123
+ 'hpu' else [])
124
+ #from habana_frameworks.torch.activity_profiler import DebugActivity
125
+ #debug_activities=[DebugActivity.BRIDGE_FUNCTION_CALLS]
126
+
127
+ profiler = torch.profiler.profile(
128
+ schedule=schedule,
129
+ activities=activities,
130
+ #debug_activities=debug_activities,
131
+ on_trace_ready=torch.profiler.tensorboard_trace_handler('.',
132
+ use_gzip=True),
133
+ record_shapes=False,
134
+ with_stack=True)
135
+ return profiler
136
+
137
+
138
+ def pad_list(input, k, v):
139
+ input_len = len(input)
140
+ target_len = round_up(input_len, k)
141
+ padding = target_len - input_len
142
+ return input + [v] * padding
143
+
144
+
145
+ def gather_list(input, indices, v):
146
+ return [input[i] if i is not None else v for i in indices]
147
+
148
+
149
+ def flatten(in_list):
150
+ return list(itertools.chain(*in_list))
151
+
152
+
153
+ def precompute_indices_and_offsets(block_size, slot_mapping, is_prompt):
154
+ slot_mapping = slot_mapping.flatten()
155
+ indices = torch.div(slot_mapping, block_size, rounding_mode="floor")
156
+ if is_prompt:
157
+ indices = indices.unflatten(0, (-1, block_size))[:, 0]
158
+ offsets = None
159
+ else:
160
+ offsets = torch.fmod(slot_mapping, block_size)
161
+ return indices, offsets
162
+
163
+
164
+ def modify_decoder_layer(module: torch.nn.Module, suffix="DecoderLayer"):
165
+ if module.__class__.__name__.endswith(suffix):
166
+
167
+ def forward_hook(module, args, output):
168
+ htorch.core.mark_step()
169
+ return output
170
+
171
+ module.register_forward_hook(forward_hook)
172
+
173
+ for child_name, child_module in module.named_children():
174
+ modify_decoder_layer(child_module)
175
+
176
+
177
+ class HpuModelAdapter:
178
+
179
+ def __init__(self, model, vllm_config):
180
+ self.model = model
181
+ self.sampler = get_sampler()
182
+ self.prefill_use_fusedsdpa = os.getenv('VLLM_PROMPT_USE_FUSEDSDPA',
183
+ '0').lower() in ['1', 'true']
184
+ self.vllm_config = vllm_config
185
+ self.block_size = vllm_config.cache_config.block_size
186
+ self.dtype = vllm_config.model_config.dtype
187
+ enforce_eager = vllm_config.model_config.enforce_eager
188
+
189
+ if not htorch.utils.internal.is_lazy() and not enforce_eager:
190
+ if os.getenv('VLLM_REGIONAL_COMPILATION',
191
+ 'true').lower() == 'true':
192
+ self.regional_compilation_layers_list = [
193
+ RMSNorm, VocabParallelEmbedding
194
+ ]
195
+ self._regional_compilation(self.model)
196
+ else:
197
+ self.model = torch.compile(self.model,
198
+ backend='hpu_backend',
199
+ dynamic=False)
200
+
201
+ def _regional_compilation(self,
202
+ module,
203
+ parent_module=None,
204
+ module_name=None):
205
+ if isinstance(module, torch.nn.ModuleList):
206
+ for children_name, children_module in module.named_children():
207
+ self._compile_region(module, children_name, children_module)
208
+ elif any(
209
+ isinstance(module, layer)
210
+ for layer in self.regional_compilation_layers_list):
211
+ self._compile_region(parent_module, module_name, module)
212
+ else:
213
+ for children_name, children_module in module.named_children():
214
+ self._regional_compilation(children_module, module,
215
+ children_name)
216
+
217
+ def _compile_region(self, model, name, module):
218
+ module = torch.compile(module, backend='hpu_backend', dynamic=False)
219
+ setattr(model, name, module)
220
+
221
+ def _set_attn_bias(self, attn_metadata, batch_size, seq_len, device,
222
+ dtype):
223
+ if (attn_metadata is None
224
+ or (self.prefill_use_fusedsdpa \
225
+ and attn_metadata.block_list is None)
226
+ or not attn_metadata.is_prompt):
227
+ return attn_metadata
228
+
229
+ prefill_metadata = attn_metadata
230
+
231
+ seq_lens_t = prefill_metadata.seq_lens_tensor
232
+ context_lens_t = prefill_metadata.context_lens_tensor
233
+ query_lens_t = seq_lens_t - context_lens_t
234
+
235
+ block_list = attn_metadata.block_list
236
+ max_context_len = (block_list.size(-1) //
237
+ batch_size if block_list is not None else 0)
238
+ max_context_len = max_context_len * self.block_size
239
+ past_mask = torch.arange(0,
240
+ max_context_len,
241
+ dtype=torch.int32,
242
+ device=device)
243
+ past_mask = (past_mask.view(1, -1).expand(batch_size, -1).ge(
244
+ context_lens_t.view(-1, 1)).view(batch_size, 1, -1).expand(
245
+ batch_size, seq_len, -1).view(batch_size, 1, seq_len, -1))
246
+
247
+ len_mask = (torch.arange(0, seq_len, device=device,
248
+ dtype=torch.int32).view(1, seq_len).ge(
249
+ query_lens_t.unsqueeze(-1)).view(
250
+ batch_size, 1, 1, seq_len))
251
+ causal_mask = torch.triu(torch.ones((batch_size, 1, seq_len, seq_len),
252
+ device=device,
253
+ dtype=torch.bool),
254
+ diagonal=1)
255
+ mask = causal_mask.logical_or(len_mask)
256
+ mask = torch.concat((past_mask, mask), dim=-1)
257
+ attn_bias = (torch.zeros_like(mask, dtype=dtype).masked_fill_(
258
+ mask, -math.inf))
259
+ attn_metadata = prefill_metadata._replace(attn_bias=attn_bias)
260
+ return attn_metadata
261
+
262
+ def _set_block_mapping(self, metadata, batch_size, device, dtype):
263
+ mask = torch.arange(0,
264
+ self.block_size,
265
+ device=device,
266
+ dtype=torch.int32).unsqueeze(0)
267
+ mask = mask >= metadata.block_usage.unsqueeze(-1)
268
+ attn_bias = (torch.zeros_like(mask, dtype=dtype).masked_fill_(
269
+ mask, -math.inf))
270
+ if os.environ.get('VLLM_USE_FAKE_HPU',
271
+ '0') == '0' and htorch.utils.internal.is_lazy():
272
+ block_mapping = torch.nn.functional.one_hot(metadata.block_groups,
273
+ num_classes=batch_size)
274
+ else:
275
+ # Unfortunately one_hot on CPU/torch.compile mode/eager mode
276
+ # doesn't handle out of bounds classes so we need to convert
277
+ # all negative values to 0 (block_mapping) or bs (block_groups)
278
+ block_groups = metadata.block_groups.to(torch.long)
279
+ block_mapping = torch.nn.functional.relu(block_groups)
280
+ block_mapping = torch.nn.functional.one_hot(block_mapping,
281
+ num_classes=batch_size)
282
+ oob_values = block_groups.lt(0)
283
+ block_mapping.masked_fill_(oob_values.unsqueeze(-1), 0)
284
+ block_groups.masked_fill_(oob_values, batch_size)
285
+ metadata = metadata._replace(block_groups=block_groups)
286
+ block_mapping = block_mapping.to(dtype)
287
+ metadata = metadata._replace(block_mapping=block_mapping,
288
+ attn_bias=attn_bias)
289
+ return metadata
290
+
291
+ def _update_metadata(self, attn_metadata, batch_size, seq_len, device,
292
+ dtype):
293
+ if attn_metadata.is_prompt:
294
+ meta = attn_metadata
295
+ attn_metadata = self._set_attn_bias(meta, batch_size, seq_len,
296
+ device, dtype)
297
+ else:
298
+ meta = attn_metadata
299
+ attn_metadata = self._set_block_mapping(meta, batch_size, device,
300
+ dtype)
301
+ return attn_metadata
302
+
303
+ def forward(self, *args, **kwargs):
304
+ kwargs = kwargs.copy()
305
+ selected_token_indices = kwargs.pop('selected_token_indices')
306
+ if 'warmup_mode' in kwargs:
307
+ kwargs.pop('warmup_mode')
308
+ virtual_engine = 0
309
+ if 'virtual_engine' in kwargs:
310
+ virtual_engine = kwargs.pop('virtual_engine')
311
+ input_ids = kwargs['input_ids']
312
+ attn_metadata = self._update_metadata(kwargs.pop('attn_metadata'),
313
+ input_ids.size(0),
314
+ input_ids.size(1),
315
+ input_ids.device, self.dtype)
316
+ LoraMask.setLoraMask(kwargs.pop('lora_mask'))
317
+ with set_forward_context(attn_metadata, self.vllm_config,
318
+ virtual_engine):
319
+ hidden_states = self.model(*args, **kwargs)
320
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
321
+ hidden_states = hidden_states.index_select(0,
322
+ selected_token_indices)
323
+ return hidden_states
324
+
325
+ def compute_logits(self, *args, **kwargs):
326
+ return self.model.compute_logits(*args, **kwargs)
327
+
328
+ def sample(self, *args, **kwargs):
329
+ return self.sampler(*args, **kwargs)
330
+
331
+
332
+ class PreparePromptMetadata(NamedTuple):
333
+ input_tokens: torch.Tensor
334
+ input_positions: List[List[int]]
335
+ attn_metadata: Optional[AttentionMetadata]
336
+ seq_lens: List[int]
337
+ query_lens: List[int]
338
+ lora_index_mapping: List[List[int]]
339
+ lora_prompt_mapping: List[List[int]]
340
+ lora_requests: Set[LoRARequest]
341
+ multi_modal_kwargs: Optional[Dict[str, BatchedTensorInputs]]
342
+ slot_mapping: List[List[int]]
343
+ lora_ids: List[int]
344
+
345
+ @classmethod
346
+ def empty(cls):
347
+ return PreparePromptMetadata(input_tokens=[],
348
+ input_positions=[],
349
+ attn_metadata=None,
350
+ seq_lens=[],
351
+ query_lens=[],
352
+ lora_index_mapping=[],
353
+ lora_prompt_mapping=[],
354
+ lora_requests=set(),
355
+ multi_modal_kwargs=None,
356
+ slot_mapping=[],
357
+ lora_ids=[])
358
+
359
+
360
+ class PrepareDecodeMetadata(NamedTuple):
361
+ input_tokens: torch.Tensor
362
+ input_positions: List[List[int]]
363
+ attn_metadata: Optional[AttentionMetadata]
364
+ lora_index_mapping: List[List[int]]
365
+ lora_prompt_mapping: List[List[int]]
366
+ lora_requests: Set[LoRARequest]
367
+ slot_mapping: List[List[int]]
368
+ lora_ids: List[int]
369
+
370
+ @classmethod
371
+ def empty(cls):
372
+ return PrepareDecodeMetadata(input_tokens=[],
373
+ input_positions=[],
374
+ attn_metadata=None,
375
+ lora_index_mapping=[],
376
+ lora_prompt_mapping=[],
377
+ lora_requests=set(),
378
+ slot_mapping=[],
379
+ lora_ids=[])
380
+
381
+
382
+ # How batches are constructed.
383
+ class BatchType(IntEnum):
384
+ # Every batch is prefill.
385
+ PREFILL = 0
386
+ # Every batch is decode.
387
+ DECODE = 1
388
+ # Batch is a mixture of prefill and decode.
389
+ MIXED = 2
390
+
391
+
392
+ TModelInputForHPU = TypeVar('TModelInputForHPU', bound="ModelInputForHPU")
393
+
394
+
395
+ @dataclasses.dataclass(frozen=True)
396
+ class ModelInputForHPU(ModelRunnerInputBase):
397
+ """
398
+ This base class contains metadata needed for the base model forward pass
399
+ but not metadata for possible additional steps, e.g., sampling. Model
400
+ runners that run additional steps should subclass this method to add
401
+ additional fields.
402
+ """
403
+ input_tokens: Optional[torch.Tensor] = None
404
+ input_positions: Optional[torch.Tensor] = None
405
+ seq_lens: Optional[List[int]] = None
406
+ query_lens: Optional[List[int]] = None
407
+ lora_mapping: Optional["LoRAMapping"] = None
408
+ lora_requests: Optional[Set[LoRARequest]] = None
409
+ attn_metadata: Optional["AttentionMetadata"] = None
410
+ multi_modal_kwargs: Optional[Dict[str, torch.Tensor]] = None
411
+ real_batch_size: Optional[int] = None
412
+ batch_size_padded: Optional[int] = None
413
+ virtual_engine: int = 0
414
+ lora_ids: Optional[List[int]] = None
415
+ async_callback: Optional[Callable] = None
416
+ is_first_multi_step: bool = True
417
+ is_last_step: bool = True
418
+
419
+ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
420
+ tensor_dict = {
421
+ "input_tokens": self.input_tokens,
422
+ "input_positions": self.input_positions,
423
+ "lora_requests": self.lora_requests,
424
+ "lora_mapping": self.lora_mapping,
425
+ "multi_modal_kwargs": self.multi_modal_kwargs,
426
+ "real_batch_size": self.real_batch_size,
427
+ "batch_size_padded": self.batch_size_padded,
428
+ "virtual_engine": self.virtual_engine,
429
+ "lora_ids": self.lora_ids,
430
+ "is_first_multi_step": self.is_first_multi_step,
431
+ "is_last_step": self.is_last_step,
432
+ }
433
+ _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata)
434
+ return tensor_dict
435
+
436
+ @classmethod
437
+ def from_broadcasted_tensor_dict(
438
+ cls: Type[TModelInputForHPU],
439
+ tensor_dict: Dict[str, Any],
440
+ attn_backend: Optional["AttentionBackend"] = None,
441
+ ) -> TModelInputForHPU:
442
+ if attn_backend is not None:
443
+ tensor_dict = _init_attn_metadata_from_tensor_dict(
444
+ attn_backend, tensor_dict)
445
+ return cls(**tensor_dict)
446
+
447
+
448
+ @dataclasses.dataclass(frozen=True)
449
+ class ModelInputForHPUWithSamplingMetadata(ModelInputForHPU):
450
+ """
451
+ Used by the ModelRunner.
452
+ """
453
+ sampling_metadata: Optional["SamplingMetadata"] = None
454
+ # Used for speculative decoding. We do not broadcast it because it is only
455
+ # used by the driver worker.
456
+ is_prompt: Optional[bool] = None
457
+
458
+ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]:
459
+ tensor_dict = {
460
+ "input_tokens": self.input_tokens,
461
+ "input_positions": self.input_positions,
462
+ "lora_requests": self.lora_requests,
463
+ "lora_mapping": self.lora_mapping,
464
+ "multi_modal_kwargs": self.multi_modal_kwargs,
465
+ "lora_ids": self.lora_ids,
466
+ }
467
+ _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata)
468
+ _add_sampling_metadata_broadcastable_dict(tensor_dict,
469
+ self.sampling_metadata)
470
+ return tensor_dict
471
+
472
+ @classmethod
473
+ def from_broadcasted_tensor_dict(
474
+ cls,
475
+ tensor_dict: Dict[str, Any],
476
+ attn_backend: Optional["AttentionBackend"] = None,
477
+ ) -> "ModelInputForHPUWithSamplingMetadata":
478
+ tensor_dict = _init_sampling_metadata_from_tensor_dict(tensor_dict)
479
+ # FIXME(kzawora): this fails for whatever reason - why?
480
+ if attn_backend is not None:
481
+ tensor_dict = _init_attn_metadata_from_tensor_dict(
482
+ attn_backend, tensor_dict)
483
+ return cls(**tensor_dict)
484
+
485
+
486
+ class HPUModelRunnerBase(ModelRunnerBase[TModelInputForHPU]):
487
+ """
488
+ Helper class for shared methods between GPU model runners.
489
+ """
490
+ _model_input_cls: Type[TModelInputForHPU]
491
+
492
+ def __init__(
493
+ self,
494
+ vllm_config: VllmConfig,
495
+ is_driver_worker: bool = False,
496
+ return_hidden_states: bool = False,
497
+ ):
498
+ ModelRunnerBase.__init__(self, vllm_config=vllm_config)
499
+ environment.set_model_config(self.model_config)
500
+ self.is_driver_worker = is_driver_worker
501
+ self.return_hidden_states = return_hidden_states
502
+
503
+ self.sliding_window = (self.model_config.get_sliding_window()
504
+ if self.model_config is not None else None)
505
+ self.device_config = (self.device_config if self.device_config
506
+ is not None else DeviceConfig())
507
+ self.device = self.device_config.device
508
+ self.enforce_eager = self.model_config.enforce_eager
509
+ self.max_num_seqs = self.scheduler_config.max_num_seqs
510
+ # NOTE(kzawora): Change that to scheduler_config.max_num_prefill_seqs
511
+ # once padding-aware scheduling gets merged
512
+ self.max_num_prefill_seqs = 64
513
+ self.max_model_len = self.scheduler_config.max_model_len
514
+ self.max_num_batched_tokens = \
515
+ self.scheduler_config.max_num_batched_tokens
516
+ self.block_size = self.cache_config.block_size
517
+
518
+ self.pin_memory = is_pin_memory_available()
519
+ self.kv_cache_dtype = self.cache_config.cache_dtype
520
+
521
+ self.attn_backend = get_attn_backend(
522
+ self.model_config.get_head_size(),
523
+ self.model_config.dtype,
524
+ self.kv_cache_dtype,
525
+ self.block_size,
526
+ self.model_config.is_attention_free,
527
+ )
528
+
529
+ # Lazy initialization
530
+ self.lora_manager: LRUCacheWorkerLoRAManager = None
531
+ self.model: torch.nn.Module = None
532
+ self.inc_initialized_successfully = False
533
+
534
+ # Profiler stats
535
+ self.profiler = HabanaHighLevelProfiler()
536
+ self.profiler_counter_helper = HabanaProfilerCounterHelper()
537
+ self.seen_configs: set = set()
538
+ self._mem_margin: Optional[int] = None
539
+ HPUBucketingContext = get_bucketing_context()
540
+ self.bucketing_ctx = HPUBucketingContext(self.max_num_seqs,
541
+ self.max_num_prefill_seqs,
542
+ self.block_size,
543
+ self.max_num_batched_tokens,
544
+ False, self.max_model_len)
545
+ self.graphed_buckets: Set[Any] = set()
546
+ self._set_gc_threshold()
547
+ if self.vllm_config.cache_config.enable_prefix_caching:
548
+ os.environ.setdefault("VLLM_CONTIGUOUS_PA", "False")
549
+ assert os.environ.get(
550
+ "VLLM_CONTIGUOUS_PA",
551
+ "").lower() != "true", "Contiguous PA doesn't support APC"
552
+ self.use_contiguous_pa = envs.VLLM_USE_HPU_CONTIGUOUS_CACHE_FETCH
553
+
554
+ # For multi-step scheduling
555
+ self.cached_step_outputs: List[torch.Tensor] = []
556
+ # For delayed sampling
557
+ self.cached_step_inputs: List[
558
+ ModelInputForHPUWithSamplingMetadata] = []
559
+
560
+ def _set_gc_threshold(self) -> None:
561
+ # Read https://docs.python.org/3/library/gc.html#gc.set_threshold
562
+ # for comprehensive description of gc generations.
563
+ # We can either use VLLM_GC_THR_GEN[0-2] (this has higher priority)
564
+ # to set particular generation threshold or use simpler
565
+ # VLLM_GC_THR_MULTIPLIER to multiply default values.
566
+ default_gc_thrs = list(gc.get_threshold())
567
+ requested_gc_thrs = [0] * len(default_gc_thrs)
568
+ for i in range(len(default_gc_thrs)):
569
+ requested_gc_thrs[i] = int(
570
+ os.environ.get(f'VLLM_GC_THR_GEN{i}', default_gc_thrs[i]))
571
+ if requested_gc_thrs == default_gc_thrs:
572
+ gc_thr_multiplier = int(os.environ.get('VLLM_GC_THR_MULTIPLIER',
573
+ 2))
574
+ requested_gc_thrs = [
575
+ t * gc_thr_multiplier for t in default_gc_thrs
576
+ ]
577
+ gc.set_threshold(*requested_gc_thrs)
578
+
579
+ self.skip_warmup = os.environ.get('VLLM_SKIP_WARMUP',
580
+ 'false').lower() == 'true'
581
+
582
+ def load_model(self) -> None:
583
+ import habana_frameworks.torch.core as htcore
584
+ if self.model_config.quantization == 'inc' or \
585
+ self.model_config.quantization == 'fp8':
586
+ htcore.hpu_set_env()
587
+ with HabanaMemoryProfiler() as m:
588
+ with HabanaMemoryProfiler() as m_getmodel:
589
+ self.model = get_model(vllm_config=self.vllm_config)
590
+ msg = ("Pre-loading model weights on "
591
+ f"{next(self.model.parameters()).device} "
592
+ f"took {m_getmodel.get_summary_string()}")
593
+ logger.info(msg)
594
+
595
+ if self.lora_config:
596
+ assert hasattr(self.model, "embedding_modules"
597
+ ), "Model does not have embedding_modules"
598
+ assert hasattr(
599
+ self.model, "embedding_padding_modules"
600
+ ), "Model does not have embedding_padding_modules"
601
+ assert not self.lora_config.bias_enabled, \
602
+ "Bias support in LoRA is not enabled in HPU yet."
603
+ assert not self.lora_config.fully_sharded_loras, \
604
+ "Fully sharded LoRAs is not enabled in HPU yet."
605
+
606
+ # Use get_text_config() in case of multimodal models
607
+ text_config = self.model_config.hf_config.get_text_config()
608
+
609
+ self.lora_manager = LRUCacheWorkerLoRAManager(
610
+ self.scheduler_config.max_num_seqs,
611
+ self.scheduler_config.max_num_batched_tokens,
612
+ self.vocab_size,
613
+ self.lora_config,
614
+ self.device,
615
+ self.model.embedding_modules,
616
+ self.model.embedding_padding_modules,
617
+ max_position_embeddings=text_config.
618
+ max_position_embeddings,
619
+ )
620
+ self.model = self.lora_manager.create_lora_manager(self.model)
621
+
622
+ if self.model_config.quantization == 'inc':
623
+ logger.info("Preparing model with INC..")
624
+ with HabanaMemoryProfiler() as m_inc:
625
+ from neural_compressor.torch.quantization import (
626
+ FP8Config, convert, prepare)
627
+ config = FP8Config.from_json_file(
628
+ os.getenv("QUANT_CONFIG", ""))
629
+ if config.measure:
630
+ self.model = prepare(self.model, config)
631
+ elif config.quantize:
632
+ self.model = convert(self.model, config)
633
+ htcore.hpu_initialize(self.model,
634
+ mark_only_scales_as_const=True)
635
+ self.inc_initialized_successfully = True
636
+ logger.info("Preparing model with INC took %s",
637
+ m_inc.get_summary_string())
638
+ else:
639
+ self.model = self.model.to("hpu")
640
+ htcore.mark_step()
641
+ modify_decoder_layer(self.model)
642
+ torch.hpu.synchronize()
643
+
644
+ with HabanaMemoryProfiler() as m_wrap:
645
+ self.model = _maybe_wrap_in_hpu_graph(
646
+ self.model, vllm_config=self.vllm_config)
647
+ msg = f"Wrapping in HPU Graph took {m_wrap.get_summary_string()}"
648
+ logger.info(msg)
649
+
650
+ self.model_memory_usage = m.consumed_device_memory
651
+ msg = f"Loading model weights took in total {m.get_summary_string()}"
652
+ logger.info(msg)
653
+
654
+ def _add_dummy_seq(self, seq_group_metadata_list, is_prompt):
655
+ real_batch_size = len(seq_group_metadata_list)
656
+ batch_size_padded = self.bucketing_ctx.get_padded_batch_size(
657
+ real_batch_size, is_prompt)
658
+ batch_size_padding = batch_size_padded - real_batch_size
659
+
660
+ seq_group_metadata_list = seq_group_metadata_list.copy()
661
+
662
+ if batch_size_padding > 0:
663
+ dummy_seq_group_metadata = self.create_dummy_seq_group_metadata(
664
+ 0, 0, is_prompt)
665
+ seq_group_metadata_list.extend(dummy_seq_group_metadata
666
+ for _ in range(batch_size_padding))
667
+ return seq_group_metadata_list, real_batch_size, batch_size_padded
668
+
669
+ def _maybe_wrap_in_hpu_graph(self, *args, **kwargs):
670
+ return htorch.hpu.wrap_in_hpu_graph(
671
+ HpuModelAdapter(*args, **kwargs), disable_tensor_cache=True
672
+ ) if htorch.utils.internal.is_lazy() else HpuModelAdapter(
673
+ *args, **kwargs)
674
+
675
+ def get_model(self) -> nn.Module:
676
+ return self.model
677
+
678
+ def _use_graphs(self, batch_size, seq_len, is_prompt):
679
+ if self.enforce_eager:
680
+ return False
681
+ if self.skip_warmup:
682
+ return True
683
+ return (batch_size, seq_len, is_prompt) in self.graphed_buckets
684
+
685
+ def _is_valid_bucket(self, bucket):
686
+ return bucket[0] * bucket[1] <= self.max_num_batched_tokens
687
+
688
+ def _prepare_prompt(
689
+ self,
690
+ seq_group_metadata_list: List[SequenceGroupMetadata],
691
+ ) -> PreparePromptMetadata:
692
+ input_tokens: List[List[int]] = []
693
+ input_positions: List[List[int]] = []
694
+ slot_mapping: List[List[int]] = []
695
+ lora_index_mapping: List[List[int]] = []
696
+ lora_prompt_mapping: List[List[int]] = []
697
+ lora_requests: Set[LoRARequest] = set()
698
+
699
+ seq_lens: List[int] = []
700
+ context_lens: List[int] = []
701
+ query_lens: List[int] = []
702
+ prefix_block_tables: List[List[int]] = []
703
+ multi_modal_kwargs_list: List[MultiModalKwargs] = []
704
+
705
+ if len(seq_group_metadata_list) == 0:
706
+ return PreparePromptMetadata.empty()
707
+
708
+ for seq_group_metadata in seq_group_metadata_list:
709
+ assert seq_group_metadata.is_prompt
710
+ seq_ids = list(seq_group_metadata.seq_data.keys())
711
+ assert len(seq_ids) == 1
712
+ seq_id = seq_ids[0]
713
+
714
+ computed_block_nums = seq_group_metadata.computed_block_nums
715
+ if (self.scheduler_config is not None
716
+ and self.scheduler_config.chunked_prefill_enabled
717
+ and not (computed_block_nums is None
718
+ or computed_block_nums == [])):
719
+ raise RuntimeError(
720
+ "chunked prefill cannot be used with prefix caching "
721
+ "now.")
722
+
723
+ token_chunk_size = seq_group_metadata.token_chunk_size
724
+ seq_data = seq_group_metadata.seq_data[seq_id]
725
+ context_len = seq_data.get_num_computed_tokens()
726
+ # We should use get_len here because in case of preemption
727
+ # it contains output tokens.
728
+ seq_len = min(seq_data.get_len(), context_len + token_chunk_size)
729
+ prompt_tokens = seq_data.get_token_ids()[context_len:seq_len]
730
+ seq_lens.append(seq_len)
731
+
732
+ # NOTE: This only works for oooooooxxx style attention.
733
+ if computed_block_nums is not None and len(
734
+ computed_block_nums) > 0 and self.sliding_window is None:
735
+ # Prefix is not supported with sliding_window
736
+ context_len = len(computed_block_nums) * self.block_size
737
+ if context_len == seq_len \
738
+ and self.vllm_config.cache_config.enable_prefix_caching:
739
+ # Fully cached prompt - compute only last token
740
+ context_len = context_len - 1
741
+ prompt_tokens = prompt_tokens[context_len:]
742
+ prefix_block_tables.append(computed_block_nums)
743
+ elif self.scheduler_config.chunked_prefill_enabled:
744
+ if seq_group_metadata.block_tables is not None:
745
+ # Prefill has chunked before.
746
+ block_table = seq_group_metadata.block_tables[seq_id]
747
+ prefix_block_tables.append(block_table)
748
+ else:
749
+ # The first prefill.
750
+ prefix_block_tables.append([])
751
+ else:
752
+ prefix_block_tables.append([])
753
+ # Right now, prefill start is always 0. However, this
754
+ # assumption can be changed once chunked prefill is introduced.
755
+ assert context_len == 0
756
+
757
+ # actual prompt lens
758
+ context_lens.append(context_len)
759
+ query_lens.append(seq_len - context_len)
760
+ input_tokens.append(prompt_tokens)
761
+ # NOTE(woosuk): Here we assume that the first token in the prompt
762
+ # is always the first token in the sequence.
763
+ input_positions.append(list(range(context_len, seq_len)))
764
+
765
+ mm_kwargs = seq_group_metadata.multi_modal_data
766
+ if mm_kwargs:
767
+ multi_modal_kwargs_list.append(mm_kwargs)
768
+
769
+ if seq_group_metadata.block_tables is None:
770
+ # During memory profiling, the block tables are not initialized
771
+ # yet. In this case, we just use a dummy slot mapping.
772
+ slot_mapping.append([_PAD_SLOT_ID] * seq_len)
773
+ continue
774
+
775
+ # Compute the slot mapping.
776
+ slot_mapping.append([])
777
+ block_table = seq_group_metadata.block_tables[seq_id]
778
+
779
+ # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID,
780
+ # where start_idx is max(0, seq_len - sliding_window).
781
+ # For example, if the prompt len is 10, sliding window is 8, and
782
+ # block size is 4, the first two tokens are masked and the slot
783
+ # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1].
784
+ start_idx = 0
785
+ if self.sliding_window is not None:
786
+ assert context_len == 0, (
787
+ "Prefix caching is currently not supported with "
788
+ "sliding window attention")
789
+ start_idx = max(0, seq_len - self.sliding_window)
790
+ for i in range(context_len, seq_len):
791
+ if i < start_idx:
792
+ slot_mapping[-1].append(_PAD_SLOT_ID)
793
+ continue
794
+
795
+ block_number = block_table[i // self.block_size]
796
+ block_offset = i % self.block_size
797
+ slot = block_number * self.block_size + block_offset
798
+ slot_mapping[-1].append(slot)
799
+
800
+ max_query_len = max(query_lens)
801
+ sum_query_len = sum(query_lens)
802
+ real_num_seqs = len(query_lens)
803
+ assert max_query_len > 0
804
+
805
+ max_prompt_len = max(
806
+ self.bucketing_ctx.get_padded_prompt_seq_len(max_query_len),
807
+ self.block_size)
808
+
809
+ lora_ids: List[int] = []
810
+ for seq_group_metadata, context_len in zip(seq_group_metadata_list,
811
+ context_lens):
812
+ lora_id = seq_group_metadata.lora_int_id
813
+ lora_ids.append(lora_id)
814
+
815
+ if lora_id > 0:
816
+ lora_requests.add(seq_group_metadata.lora_request)
817
+
818
+ lora_index_mapping += [lora_id] * max_prompt_len
819
+ lora_prompt_mapping.extend(
820
+ [lora_id] *
821
+ (max_prompt_len
822
+ if seq_group_metadata.sampling_params.prompt_logprobs else 1))
823
+
824
+ if any(context_lens):
825
+ assert not self.scheduler_config.chunked_prefill_enabled
826
+ # prefix caching
827
+
828
+ max_num_block = max(len(bt) for bt in prefix_block_tables)
829
+ prefix_block_list = list(
830
+ itertools.chain.from_iterable(
831
+ bt if len(bt) == max_num_block else bt +
832
+ ([_PAD_BLOCK_ID] * (max_num_block - len(bt)))
833
+ for bt in prefix_block_tables))
834
+
835
+ pad_len = len(prefix_block_list)
836
+ prefix_block_list = pad_list(prefix_block_list, pad_len,
837
+ _PAD_BLOCK_ID)
838
+
839
+ prefix_block_list_tensor = torch.tensor(prefix_block_list,
840
+ dtype=torch.long,
841
+ device=self.device)
842
+ else:
843
+ prefix_block_list_tensor = None
844
+
845
+ input_tokens = make_tensor_with_pad(input_tokens,
846
+ max_len=max_prompt_len,
847
+ pad=0,
848
+ dtype=torch.long,
849
+ device=self.device)
850
+
851
+ input_positions = make_tensor_with_pad(input_positions,
852
+ max_len=max_prompt_len,
853
+ pad=0,
854
+ dtype=torch.long,
855
+ device=self.device)
856
+
857
+ slot_mapping = make_tensor_with_pad(slot_mapping,
858
+ max_len=max_prompt_len,
859
+ pad=_PAD_SLOT_ID,
860
+ dtype=torch.long,
861
+ device=self.device)
862
+
863
+ seq_lens_tensor = torch.tensor(seq_lens,
864
+ dtype=torch.long,
865
+ device=self.device)
866
+
867
+ context_lens_tensor = torch.tensor(context_lens,
868
+ dtype=torch.long,
869
+ device=self.device)
870
+
871
+ block_indices, block_offsets = precompute_indices_and_offsets(
872
+ self.block_size, slot_mapping, True)
873
+ attn_metadata = self.attn_backend.make_metadata(
874
+ is_prompt=True,
875
+ block_list=prefix_block_list_tensor,
876
+ block_mapping=None,
877
+ block_usage=None,
878
+ block_indices=block_indices,
879
+ block_offsets=block_offsets,
880
+ block_groups=None,
881
+ attn_bias=None,
882
+ seq_lens_tensor=seq_lens_tensor,
883
+ context_lens_tensor=context_lens_tensor,
884
+ num_prefills=real_num_seqs,
885
+ num_prefill_tokens=sum_query_len,
886
+ num_decode_tokens=0,
887
+ slot_mapping=slot_mapping,
888
+ multi_modal_placeholder_index_maps=
889
+ None, # FIXME(kzawora): mutli-modality will not work here
890
+ enable_kv_scales_calculation=False,
891
+ )
892
+ multi_modal_kwargs = MultiModalKwargs.batch(multi_modal_kwargs_list)
893
+
894
+ return PreparePromptMetadata(input_tokens=input_tokens,
895
+ input_positions=input_positions,
896
+ attn_metadata=attn_metadata,
897
+ seq_lens=seq_lens,
898
+ query_lens=query_lens,
899
+ lora_index_mapping=lora_index_mapping,
900
+ lora_prompt_mapping=lora_prompt_mapping,
901
+ lora_requests=lora_requests,
902
+ multi_modal_kwargs=multi_modal_kwargs,
903
+ slot_mapping=slot_mapping,
904
+ lora_ids=lora_ids)
905
+
906
+ def _prepare_decode(
907
+ self,
908
+ seq_group_metadata_list: List[SequenceGroupMetadata],
909
+ output=None,
910
+ ) -> PrepareDecodeMetadata:
911
+ input_tokens: List[List[int]] = []
912
+ input_positions: List[List[int]] = []
913
+ slot_mapping: List[List[int]] = []
914
+ seq_lens: List[int] = []
915
+ block_tables: List[List[int]] = []
916
+ lora_index_mapping: List[List[int]] = []
917
+ lora_prompt_mapping: List[List[int]] = []
918
+ lora_requests: Set[LoRARequest] = set()
919
+
920
+ if len(seq_group_metadata_list) == 0:
921
+ return PrepareDecodeMetadata.empty()
922
+ lora_ids: List[int] = []
923
+
924
+ dummy_slots = itertools.cycle(
925
+ range(_PAD_SLOT_ID, _PAD_SLOT_ID + self.block_size))
926
+
927
+ for seq_group_metadata in seq_group_metadata_list:
928
+ assert not seq_group_metadata.is_prompt
929
+ assert seq_group_metadata.token_chunk_size == 1
930
+
931
+ seq_ids = list(seq_group_metadata.seq_data.keys())
932
+ lora_id = seq_group_metadata.lora_int_id
933
+ lora_ids.append(lora_id)
934
+
935
+ if lora_id > 0:
936
+ lora_requests.add(seq_group_metadata.lora_request)
937
+
938
+ for seq_id in seq_ids:
939
+ seq_data = seq_group_metadata.seq_data[seq_id]
940
+ if output is None:
941
+ generation_token = seq_data.get_last_token_id()
942
+ input_tokens.append([generation_token])
943
+
944
+ seq_len = seq_data.get_len()
945
+ position = seq_len - 1
946
+ input_positions.append([position])
947
+
948
+ seq_len = seq_len if self.sliding_window is None else min(
949
+ seq_len, self.sliding_window)
950
+ seq_lens.append(seq_len)
951
+
952
+ block_table = seq_group_metadata.block_tables[seq_id]
953
+ num_fully_occupied_blocks = position // self.block_size
954
+ block_table = block_table[:num_fully_occupied_blocks + 1]
955
+
956
+ if len(block_table) == 0:
957
+ block_number = _PAD_BLOCK_ID
958
+ else:
959
+ block_number = block_table[position // self.block_size]
960
+ if block_number == _PAD_BLOCK_ID:
961
+ slot = next(dummy_slots)
962
+ else:
963
+ block_offset = position % self.block_size
964
+ slot = block_number * self.block_size + block_offset
965
+ slot_mapping.append([slot])
966
+ lora_index_mapping.append(lora_id)
967
+ lora_prompt_mapping.append(lora_id)
968
+
969
+ if self.sliding_window is not None:
970
+ sliding_window_blocks = (self.sliding_window //
971
+ self.block_size)
972
+ block_table = block_table[-sliding_window_blocks:]
973
+ block_tables.append(block_table)
974
+
975
+ if output is None:
976
+ input_tokens = torch.tensor(input_tokens,
977
+ dtype=torch.long,
978
+ device=self.device)
979
+ else:
980
+ real_batch_size = len(seq_group_metadata_list)
981
+ input_tokens = output[:real_batch_size]
982
+
983
+ input_positions = torch.tensor(input_positions,
984
+ dtype=torch.long,
985
+ device=self.device)
986
+
987
+ num_decode_tokens = sum(seq_lens)
988
+
989
+ last_block_usage = [
990
+ slot[0] % self.block_size + 1 for slot in slot_mapping
991
+ ]
992
+ block_groups = [[i] * len(bt) for i, bt in enumerate(block_tables)]
993
+ block_usage = [[self.block_size] * (len(bt) - 1) + [lbu]
994
+ for bt, lbu in zip(block_tables, last_block_usage)
995
+ if bt]
996
+
997
+ block_list = flatten(block_tables)
998
+ block_groups = flatten(block_groups)
999
+ block_usage = flatten(block_usage)
1000
+
1001
+ assert len(block_list) == len(block_groups)
1002
+ assert len(block_list) == len(block_usage)
1003
+
1004
+ padding_fn = None
1005
+ if self.use_contiguous_pa:
1006
+ block_bucket_size = max(max(block_list) + 1, len(block_list))
1007
+ block_bucket_size = self.bucketing_ctx.get_padded_decode_num_blocks(
1008
+ block_bucket_size)
1009
+ indices: List[Any]
1010
+ indices = [None] * block_bucket_size
1011
+ for i, bid in enumerate(block_list):
1012
+ indices[bid] = i
1013
+ padding_fn = lambda tensor, pad_value: gather_list(
1014
+ tensor, indices, pad_value)
1015
+ else:
1016
+ block_bucket_size = \
1017
+ self.bucketing_ctx.get_padded_decode_num_blocks(
1018
+ len(block_list))
1019
+ padding_fn = lambda tensor, pad_value: pad_list(
1020
+ tensor, block_bucket_size, pad_value)
1021
+
1022
+ block_list = padding_fn(block_list, _PAD_BLOCK_ID)
1023
+ block_groups = padding_fn(block_groups, -1)
1024
+ block_usage = padding_fn(block_usage, 1)
1025
+
1026
+ block_list = torch.tensor(block_list,
1027
+ dtype=torch.int,
1028
+ device=self.device)
1029
+ block_groups = torch.tensor(block_groups,
1030
+ dtype=torch.int,
1031
+ device=self.device)
1032
+ block_usage = torch.tensor(block_usage,
1033
+ dtype=self.model_config.dtype,
1034
+ device=self.device)
1035
+ slot_mapping = torch.tensor(slot_mapping,
1036
+ dtype=torch.long,
1037
+ device=self.device)
1038
+
1039
+ block_indices, block_offsets = precompute_indices_and_offsets(
1040
+ self.block_size, slot_mapping, False)
1041
+
1042
+ attn_metadata = self.attn_backend.make_metadata(
1043
+ is_prompt=False,
1044
+ block_list=block_list,
1045
+ block_mapping=None,
1046
+ block_usage=block_usage,
1047
+ block_indices=block_indices,
1048
+ block_offsets=block_offsets,
1049
+ block_groups=block_groups,
1050
+ attn_bias=None,
1051
+ seq_lens_tensor=None,
1052
+ context_lens_tensor=None,
1053
+ num_prefills=0,
1054
+ num_prefill_tokens=0,
1055
+ num_decode_tokens=num_decode_tokens,
1056
+ slot_mapping=slot_mapping,
1057
+ multi_modal_placeholder_index_maps=None,
1058
+ enable_kv_scales_calculation=False,
1059
+ )
1060
+ return PrepareDecodeMetadata(input_tokens=input_tokens,
1061
+ input_positions=input_positions,
1062
+ attn_metadata=attn_metadata,
1063
+ lora_index_mapping=lora_index_mapping,
1064
+ lora_prompt_mapping=lora_prompt_mapping,
1065
+ lora_requests=lora_requests,
1066
+ slot_mapping=slot_mapping,
1067
+ lora_ids=lora_ids)
1068
+
1069
+ def prepare_input_tensors(
1070
+ self,
1071
+ seq_group_metadata_list: List[SequenceGroupMetadata],
1072
+ ) -> Tuple[TModelInputForHPU, SamplingMetadata]:
1073
+ if len(seq_group_metadata_list) == 0:
1074
+ return self._model_input_cls(), None
1075
+
1076
+ input_tokens = None
1077
+ input_positions = None
1078
+ lora_mapping = None
1079
+ lora_requests = None
1080
+ multi_modal_kwargs = None
1081
+ batch_type = None
1082
+ seq_lens = None
1083
+ query_lens = None
1084
+ real_batch_size = None
1085
+ batch_size_padded = None
1086
+
1087
+ self.event_start = self.profiler.get_timestamp_us()
1088
+ is_prompt = seq_group_metadata_list[0].is_prompt
1089
+ base_event_name = 'prompt' if is_prompt else 'decode'
1090
+ self.profiler.start('internal', base_event_name)
1091
+
1092
+ seq_group_metadata_list, real_batch_size, batch_size_padded = (
1093
+ self._add_dummy_seq(seq_group_metadata_list, is_prompt))
1094
+
1095
+ prefill_reqs = []
1096
+ decode_reqs = []
1097
+ for seq_group_meta in seq_group_metadata_list:
1098
+ if seq_group_meta.is_prompt:
1099
+ prefill_reqs.append(seq_group_meta)
1100
+ else:
1101
+ decode_reqs.append(seq_group_meta)
1102
+
1103
+ # Prepare input tensors.
1104
+ (
1105
+ input_tokens,
1106
+ input_positions,
1107
+ prefill_attn_metadata,
1108
+ seq_lens,
1109
+ query_lens,
1110
+ lora_index_mapping,
1111
+ lora_prompt_mapping,
1112
+ lora_requests,
1113
+ multi_modal_kwargs,
1114
+ slot_mapping,
1115
+ lora_ids,
1116
+ ) = self._prepare_prompt(prefill_reqs)
1117
+ (
1118
+ decode_input_tokens,
1119
+ decode_input_positions,
1120
+ decode_attn_metadata,
1121
+ decode_lora_index_mapping,
1122
+ decode_lora_prompt_mapping,
1123
+ decode_lora_requests,
1124
+ decode_slot_mapping,
1125
+ decode_lora_ids,
1126
+ ) = self._prepare_decode(decode_reqs)
1127
+ sampling_metadata = SamplingMetadata.prepare(seq_group_metadata_list,
1128
+ seq_lens, query_lens,
1129
+ self.device,
1130
+ self.pin_memory)
1131
+
1132
+ if not self.scheduler_config.chunked_prefill_enabled:
1133
+ assert (len(prefill_reqs) and len(decode_reqs)) == 0
1134
+
1135
+ num_prefills = len(seq_lens)
1136
+ num_prefill_tokens = len(input_tokens)
1137
+ num_decode_tokens = len(decode_input_tokens)
1138
+
1139
+ # NOTE(kzawora): Here we diverge from GPU code - we don't
1140
+ # support mixed batches, so we either use decode or prefill
1141
+ # inputs, without coalescing.
1142
+ assert (num_prefills == 0 and num_decode_tokens > 0) or (
1143
+ num_prefills > 0
1144
+ and num_decode_tokens == 0), "HPU does not support mixed batches!"
1145
+ if num_decode_tokens > 0:
1146
+ input_tokens = decode_input_tokens
1147
+ input_positions = decode_input_positions
1148
+ slot_mapping = decode_slot_mapping
1149
+ lora_index_mapping = decode_lora_index_mapping
1150
+ lora_prompt_mapping = decode_lora_prompt_mapping
1151
+ lora_requests = decode_lora_requests
1152
+ lora_ids = decode_lora_ids
1153
+
1154
+ # FIXME: We need to adjust selected_token_indices to accommodate
1155
+ # for padding
1156
+ max_len = input_tokens.size(1)
1157
+ paddings = [max_len - q for q in query_lens]
1158
+ paddings = [0] + paddings[:-1]
1159
+ paddings = list(itertools.accumulate(paddings))
1160
+ paddings_prompt_logprobs = []
1161
+ for i, seq_group_metadata in enumerate(seq_group_metadata_list):
1162
+ if seq_group_metadata.sampling_params.prompt_logprobs is not None \
1163
+ and seq_group_metadata.is_prompt:
1164
+ paddings_prompt_logprobs += ([paddings[i]] * seq_lens[i])
1165
+ paddings = torch.tensor(
1166
+ paddings_prompt_logprobs if paddings_prompt_logprobs else paddings,
1167
+ dtype=sampling_metadata.selected_token_indices.dtype,
1168
+ device=sampling_metadata.selected_token_indices.device)
1169
+ sampling_metadata.selected_token_indices.add_(paddings)
1170
+
1171
+ if self.lora_config:
1172
+ lora_mapping = LoRAMapping(
1173
+ **dict(index_mapping=lora_index_mapping,
1174
+ prompt_mapping=lora_prompt_mapping,
1175
+ is_prefill=(num_prefills > 0)))
1176
+ else:
1177
+ lora_mapping = None
1178
+
1179
+ if (prefill_attn_metadata is not None
1180
+ and decode_attn_metadata is not None):
1181
+ batch_type = BatchType.MIXED
1182
+ raise NotImplementedError("Mixed batch is not supported on HPU")
1183
+ elif prefill_attn_metadata is not None:
1184
+ batch_type = BatchType.PREFILL
1185
+ else:
1186
+ batch_type = BatchType.DECODE
1187
+
1188
+ metadata_dict = {
1189
+ "input_tokens": input_tokens,
1190
+ "input_positions": input_positions,
1191
+ "selected_token_indices": sampling_metadata.selected_token_indices,
1192
+ "lora_requests": lora_requests,
1193
+ "lora_mapping": lora_mapping,
1194
+ "multi_modal_kwargs": multi_modal_kwargs,
1195
+ "num_prefill_tokens": num_prefill_tokens,
1196
+ "num_decode_tokens": num_decode_tokens,
1197
+ "slot_mapping": slot_mapping,
1198
+ "num_prefills": num_prefills,
1199
+ "batch_type": batch_type,
1200
+ "seq_lens": seq_lens,
1201
+ "query_lens": query_lens
1202
+ }
1203
+ if prefill_attn_metadata is not None:
1204
+ metadata_dict.update(prefill_attn_metadata.asdict_zerocopy())
1205
+ else:
1206
+ assert decode_attn_metadata is not None
1207
+ metadata_dict.update(decode_attn_metadata.asdict_zerocopy())
1208
+
1209
+ attn_metadata = prefill_attn_metadata if \
1210
+ prefill_attn_metadata is not None else decode_attn_metadata
1211
+
1212
+ return self._model_input_cls(input_tokens=input_tokens,
1213
+ seq_lens=seq_lens,
1214
+ query_lens=query_lens,
1215
+ input_positions=input_positions,
1216
+ attn_metadata=attn_metadata,
1217
+ lora_requests=lora_requests,
1218
+ lora_mapping=lora_mapping,
1219
+ multi_modal_kwargs=multi_modal_kwargs,
1220
+ real_batch_size=real_batch_size,
1221
+ batch_size_padded=batch_size_padded,
1222
+ lora_ids=lora_ids), \
1223
+ sampling_metadata
1224
+
1225
+ def _seq_len(self, attn_metadata):
1226
+ if attn_metadata.num_prefills != 0:
1227
+ return attn_metadata.slot_mapping.size(1)
1228
+ else:
1229
+ return attn_metadata.block_list.numel()
1230
+
1231
+ def trim_attn_metadata(self, metadata: AttentionMetadata) -> object:
1232
+ # NOTE(kzawora): To anyone working on this in the future:
1233
+ # Trimming metadata is required when using HPUGraphs.
1234
+ # Attention metadata is going to be hashed by PT bridge, and
1235
+ # appropriate HPUGraphs will be matched based on all inputs' hash.
1236
+
1237
+ # Before you put more keys in here, make sure you know their
1238
+ # value type and make sure you know how it's going to be hashed.
1239
+ # You can find that information in input_hash function
1240
+ # in habana_frameworks/torch/hpu/graphs.py. You can also hash
1241
+ # it manually with torch.hpu.graphs.input_hash(attention_metadata)
1242
+
1243
+ # If you use primitive types here - they will get hashed based
1244
+ # on their value. You *will* get lots of excessive graph captures
1245
+ # (and an OOM eventually) if you decide to put something like
1246
+ # seq_len int here.
1247
+ # If you absolutely need a scalar, put it in a tensor. Tensors
1248
+ # get hashed using their metadata, not their values:
1249
+ # input_hash(torch.tensor(123)) == input_hash(torch.tensor(321))
1250
+ # input_hash(123) != input_hash(321)
1251
+ # input_hash("abc") != input_hash("cba")
1252
+ attention_metadata = subtuple(metadata, 'TrimmedAttentionMetadata', [
1253
+ 'attn_bias',
1254
+ 'seq_lens_tensor',
1255
+ 'context_lens_tensor',
1256
+ 'block_list',
1257
+ 'block_mapping',
1258
+ 'block_usage',
1259
+ 'slot_mapping',
1260
+ 'is_prompt',
1261
+ 'block_indices',
1262
+ 'block_offsets',
1263
+ 'block_groups',
1264
+ ])
1265
+ return attention_metadata
1266
+
1267
+ def create_dummy_seq_group_metadata(self,
1268
+ group_id,
1269
+ seq_len,
1270
+ is_prompt,
1271
+ lora_request=None):
1272
+ sampling_params = SamplingParams(temperature=0)
1273
+ num_blocks = math.ceil(seq_len / self.block_size)
1274
+ seq_len = max(seq_len, 1)
1275
+ if is_prompt:
1276
+ input_len = seq_len
1277
+ output_len = 0
1278
+ block_tables = None
1279
+ else:
1280
+ input_len = seq_len - 1
1281
+ output_len = 1
1282
+ block_tables = {group_id: [_PAD_BLOCK_ID] * num_blocks}
1283
+ prompt_token_ids = [0] * input_len
1284
+ output_token_ids = [1] * output_len
1285
+ prompt_token_ids_array = array('l', prompt_token_ids) # noqa: F821
1286
+ seq_data = SequenceData(prompt_token_ids_array)
1287
+ seq_data.output_token_ids = output_token_ids
1288
+ return SequenceGroupMetadata(request_id=str(group_id),
1289
+ is_prompt=(output_len == 0),
1290
+ seq_data={group_id: seq_data},
1291
+ sampling_params=sampling_params,
1292
+ block_tables=block_tables,
1293
+ lora_request=lora_request)
1294
+
1295
+ def profile_run(self) -> None:
1296
+ num_layers = self.model_config.get_num_layers(self.parallel_config)
1297
+ kv_caches = [None] * num_layers
1298
+ bind_kv_cache(
1299
+ self.vllm_config.compilation_config.static_forward_context,
1300
+ [kv_caches])
1301
+ _, max_seq_len = self.bucketing_ctx.get_max_prompt_shape()
1302
+ max_batch_size = min(self.max_num_seqs,
1303
+ self.max_num_batched_tokens // max_seq_len)
1304
+ self.warmup_scenario(max_batch_size, max_seq_len, True, kv_caches,
1305
+ False, True)
1306
+ return
1307
+
1308
+ def warmup_scenario(self,
1309
+ batch_size,
1310
+ seq_len,
1311
+ is_prompt,
1312
+ kv_caches,
1313
+ is_pt_profiler_run=False,
1314
+ is_lora_profile_run=False) -> None:
1315
+ use_graphs = self._use_graphs(batch_size, seq_len, is_prompt)
1316
+ scenario_name = ("warmup_"
1317
+ f"{'prompt' if is_prompt else 'decode'}_"
1318
+ f"bs{batch_size}_"
1319
+ f"seq{seq_len}_"
1320
+ f"graphs{'T' if use_graphs else 'F'}")
1321
+ # This represents the maximum number of different requests
1322
+ # that will have unique loras, an therefore the max amount of memory
1323
+ # consumption create dummy lora request copies from the lora request
1324
+ # passed in, which contains a lora from the lora warmup path.
1325
+ dummy_lora_requests: List[LoRARequest] = []
1326
+ dummy_lora_requests_per_seq: List[LoRARequest] = []
1327
+ if self.lora_config and is_lora_profile_run:
1328
+ assert self.lora_manager is not None
1329
+ with self.lora_manager.dummy_lora_cache():
1330
+ for idx in range(self.lora_config.max_loras):
1331
+ lora_id = idx + 1
1332
+ dummy_lora_request = LoRARequest(
1333
+ lora_name=f"warmup_{lora_id}",
1334
+ lora_int_id=lora_id,
1335
+ lora_local_path="/not/a/real/path",
1336
+ )
1337
+ self.lora_manager.add_dummy_lora(dummy_lora_request,
1338
+ rank=LORA_WARMUP_RANK)
1339
+ dummy_lora_requests.append(dummy_lora_request)
1340
+ dummy_lora_requests_per_seq = [
1341
+ dummy_lora_requests[idx % len(dummy_lora_requests)]
1342
+ for idx in range(batch_size)
1343
+ ]
1344
+ self.profiler.start('internal', scenario_name)
1345
+ times = 3 if use_graphs or is_pt_profiler_run else 1
1346
+ if is_prompt:
1347
+ seqs = [
1348
+ self.create_dummy_seq_group_metadata(
1349
+ i,
1350
+ seq_len,
1351
+ is_prompt,
1352
+ lora_request=dummy_lora_requests_per_seq[i]
1353
+ if dummy_lora_requests_per_seq else None)
1354
+ for i in range(batch_size)
1355
+ ]
1356
+ else:
1357
+ # FIXME: seq_len is actually number of blocks
1358
+ blocks = [seq_len // batch_size for _ in range(batch_size)]
1359
+ blocks[0] += seq_len % batch_size
1360
+ seqs = [
1361
+ self.create_dummy_seq_group_metadata(
1362
+ i,
1363
+ b * self.block_size - 1,
1364
+ is_prompt,
1365
+ lora_request=dummy_lora_requests_per_seq[i]
1366
+ if dummy_lora_requests_per_seq else None)
1367
+ for i, b in enumerate(blocks)
1368
+ ]
1369
+ torch.hpu.synchronize()
1370
+ profiler = None
1371
+ if is_pt_profiler_run and self.is_driver_worker:
1372
+ profiler = setup_profiler()
1373
+ profiler.start()
1374
+ for _ in range(times):
1375
+ inputs = self.prepare_model_input(seqs)
1376
+ is_single_step = \
1377
+ self.vllm_config.scheduler_config.num_scheduler_steps == 1
1378
+ if is_prompt or is_single_step:
1379
+ self.execute_model(inputs, None, warmup_mode=True)
1380
+ else: # decode with multi-step
1381
+ inputs = dataclasses.replace(inputs,
1382
+ is_first_multi_step=True,
1383
+ is_last_step=False)
1384
+ self.execute_model(inputs,
1385
+ None,
1386
+ warmup_mode=True,
1387
+ num_steps=2,
1388
+ seqs=seqs)
1389
+ inputs = dataclasses.replace(inputs,
1390
+ is_first_multi_step=False,
1391
+ is_last_step=True)
1392
+ self.execute_model(inputs,
1393
+ None,
1394
+ warmup_mode=True,
1395
+ num_steps=2,
1396
+ seqs=seqs)
1397
+ torch.hpu.synchronize()
1398
+ if profiler:
1399
+ profiler.step()
1400
+ if profiler:
1401
+ profiler.stop()
1402
+ self.profiler.end()
1403
+ gc.collect()
1404
+
1405
+ def remove_all_loras(self):
1406
+ if not self.lora_manager:
1407
+ raise RuntimeError("LoRA is not enabled.")
1408
+ self.lora_manager.remove_all_adapters()
1409
+
1410
+ def set_active_loras(self, lora_requests: Set[LoRARequest],
1411
+ lora_mapping: LoRAMapping) -> None:
1412
+ if not self.lora_manager:
1413
+ raise RuntimeError("LoRA is not enabled.")
1414
+ self.lora_manager.set_active_adapters(lora_requests, lora_mapping)
1415
+
1416
+ def add_lora(self, lora_request: LoRARequest) -> bool:
1417
+ if not self.lora_manager:
1418
+ raise RuntimeError("LoRA is not enabled.")
1419
+ return self.lora_manager.add_adapter(lora_request)
1420
+
1421
+ def remove_lora(self, lora_id: int) -> bool:
1422
+ if not self.lora_manager:
1423
+ raise RuntimeError("LoRA is not enabled.")
1424
+ return self.lora_manager.remove_adapter(lora_id)
1425
+
1426
+ def pin_lora(self, lora_id: int) -> bool:
1427
+ if not self.lora_manager:
1428
+ raise RuntimeError("LoRA is not enabled.")
1429
+ return self.lora_manager.pin_adapter(lora_id)
1430
+
1431
+ def list_loras(self) -> Set[int]:
1432
+ if not self.lora_manager:
1433
+ raise RuntimeError("LoRA is not enabled.")
1434
+ return self.lora_manager.list_adapters()
1435
+
1436
+ def log_warmup(self, phase, i, max_i, batch_size, seq_len):
1437
+ free_mem = format_bytes(
1438
+ HabanaMemoryProfiler.current_free_device_memory())
1439
+ dim = "num_blocks"
1440
+ if phase == "Prompt":
1441
+ dim = "seq_len"
1442
+ msg = (f"[Warmup][{phase}][{i+1}/{max_i}] "
1443
+ f"batch_size:{batch_size} "
1444
+ f"{dim}:{seq_len} "
1445
+ f"free_mem:{free_mem}")
1446
+ logger.info(msg)
1447
+
1448
+ def warmup_all_buckets(self, buckets, is_prompt, kv_caches):
1449
+ for i, (batch_size, seq_len) in enumerate(reversed(buckets)):
1450
+ self.log_warmup('Prompt' if is_prompt else 'Decode', i,
1451
+ len(buckets), batch_size, seq_len)
1452
+ self.warmup_scenario(batch_size, seq_len, is_prompt, kv_caches)
1453
+
1454
+ def warmup_graphs(self,
1455
+ strategy,
1456
+ buckets,
1457
+ is_prompt,
1458
+ kv_caches,
1459
+ available_mem,
1460
+ starting_mem=0,
1461
+ total_batch_seq=0.001):
1462
+ total_mem = starting_mem
1463
+ idx = 0
1464
+ phase = f'Graph/{"Prompt" if is_prompt else "Decode"}'
1465
+ num_candidates = len(buckets)
1466
+ ordering : Union[Callable[[Any], Tuple[Any, Any]], \
1467
+ Callable[[Any], Tuple[Any, Any, Any]]]
1468
+ if strategy == 'min_tokens':
1469
+ ordering = lambda b: (b[0] * b[1], b[1], b[0])
1470
+ elif strategy == 'max_bs':
1471
+ ordering = lambda b: (-b[0], b[1])
1472
+ else:
1473
+ raise NotImplementedError(
1474
+ f'Unsupported graph allocation strategy: {strategy}')
1475
+ buckets = list(sorted(buckets, key=ordering))
1476
+ captured_all = True
1477
+ for idx, (batch_size, seq_len) in enumerate(buckets):
1478
+ # Graph memory usage is proportional to seq dimension in a batch
1479
+ batch_seq = batch_size * seq_len if is_prompt else batch_size
1480
+ mem_estimate = batch_seq / total_batch_seq * total_mem
1481
+ if mem_estimate >= available_mem:
1482
+ captured_all = False
1483
+ continue
1484
+ graphed_bucket = (batch_size, seq_len, is_prompt)
1485
+ if graphed_bucket in self.graphed_buckets:
1486
+ continue
1487
+ self.graphed_buckets.add(graphed_bucket)
1488
+ self.log_warmup(phase, idx, num_candidates, batch_size, seq_len)
1489
+ with HabanaMemoryProfiler() as mem_prof:
1490
+ self.warmup_scenario(batch_size, seq_len, is_prompt, kv_caches)
1491
+ used_mem = align_workers(mem_prof.consumed_device_memory,
1492
+ torch.distributed.ReduceOp.MAX)
1493
+ available_mem -= used_mem
1494
+ total_mem += used_mem
1495
+ total_batch_seq += batch_seq
1496
+
1497
+ return total_mem, total_batch_seq, captured_all
1498
+
1499
+ def log_graph_warmup_summary(self, buckets, is_prompt, total_mem):
1500
+ num_candidates = len(buckets)
1501
+ phase = f'Graph/{"Prompt" if is_prompt else "Decode"}'
1502
+ graphed = list(c[:2] for c in self.graphed_buckets
1503
+ if c[2] == is_prompt)
1504
+ if num_candidates == 0:
1505
+ num_candidates = 1
1506
+ msg = (f'{phase} captured:{len(graphed)} '
1507
+ f'({100 * len(graphed) / num_candidates:.1f}%) '
1508
+ f'used_mem:{format_bytes(total_mem)} '
1509
+ f'buckets:{sorted(list(graphed))}')
1510
+ logger.info(msg)
1511
+
1512
+ @torch.inference_mode()
1513
+ def warmup_model(self, kv_caches: List[torch.Tensor]) -> None:
1514
+ max_blocks = kv_caches[0][0].size(0)
1515
+ self.bucketing_ctx.generate_decode_buckets(max_blocks)
1516
+ if profile := os.environ.get('VLLM_PT_PROFILE', None):
1517
+ phase, bs, seq_len, graph = profile.split('_')
1518
+ is_prompt = phase == 'prompt'
1519
+ graphs = graph == 't'
1520
+ if graphs:
1521
+ self.graphed_buckets.add((int(bs), int(seq_len), is_prompt))
1522
+ self.warmup_scenario(int(bs), int(seq_len), is_prompt, kv_caches,
1523
+ True)
1524
+ raise AssertionError("Finished profiling")
1525
+ if not htorch.utils.internal.is_lazy() and not self.enforce_eager:
1526
+ cache_size_limit = 1 + 3 * (
1527
+ len(self.bucketing_ctx.prompt_buckets) +
1528
+ len(self.bucketing_ctx.decode_buckets))
1529
+ torch._dynamo.config.cache_size_limit = max(
1530
+ cache_size_limit, torch._dynamo.config.cache_size_limit)
1531
+ # Multiply by 8 to follow the original default ratio between
1532
+ # the cache_size_limit and accumulated_cache_size_limit
1533
+ torch._dynamo.config.accumulated_cache_size_limit = max(
1534
+ cache_size_limit * 8,
1535
+ torch._dynamo.config.accumulated_cache_size_limit)
1536
+ if self.skip_warmup:
1537
+ logger.info("Skipping warmup...")
1538
+ return
1539
+ self.profiler.start('internal', 'warmup')
1540
+ start_mem = HabanaMemoryProfiler.current_device_memory_usage()
1541
+ start_time = time.perf_counter()
1542
+
1543
+ compile_only_mode_context = functools.partial(bc.env_setting,
1544
+ "PT_COMPILE_ONLY_MODE",
1545
+ True)
1546
+ can_use_compile_only_mode = True
1547
+ try:
1548
+ with compile_only_mode_context():
1549
+ pass
1550
+ logger.debug("Using PT_COMPILE_ONLY_MODE.")
1551
+ except KeyError:
1552
+ can_use_compile_only_mode = False
1553
+ logger.warning('Cannot use PT_COMPILE_ONLY_MODE. '
1554
+ 'Warmup time will be negatively impacted. '
1555
+ 'Please update Gaudi Software Suite.')
1556
+ with compile_only_mode_context(
1557
+ ) if can_use_compile_only_mode else contextlib.nullcontext():
1558
+ self.warmup_all_buckets(self.bucketing_ctx.prompt_buckets, True,
1559
+ kv_caches)
1560
+ self.warmup_all_buckets(self.bucketing_ctx.decode_buckets, False,
1561
+ kv_caches)
1562
+
1563
+ if not self.enforce_eager and htorch.utils.internal.is_lazy():
1564
+ assert self.mem_margin is not None, \
1565
+ ("HabanaWorker.determine_num_available_blocks needs "
1566
+ "to be called before warming up the model.")
1567
+ free_mem = HabanaMemoryProfiler.current_free_device_memory()
1568
+ graph_free_mem = free_mem - self.mem_margin
1569
+ graph_free_mem = align_workers(graph_free_mem,
1570
+ torch.distributed.ReduceOp.MIN)
1571
+ prompt_graph_mem_ratio = float(
1572
+ os.environ.get('VLLM_GRAPH_PROMPT_RATIO', '0.3'))
1573
+ prompt_available_memory = (prompt_graph_mem_ratio *
1574
+ graph_free_mem)
1575
+ decode_available_memory = (graph_free_mem -
1576
+ prompt_available_memory)
1577
+ msg = (
1578
+ f"Using {format_bytes(graph_free_mem)}"
1579
+ f"/{format_bytes(free_mem)} "
1580
+ "of free device memory for HPUGraphs, "
1581
+ f"{format_bytes(prompt_available_memory)} for prompt and "
1582
+ f"{format_bytes(decode_available_memory)} for decode "
1583
+ f"(VLLM_GRAPH_PROMPT_RATIO={prompt_graph_mem_ratio})")
1584
+ logger.info(msg)
1585
+ prompt_strategy = os.environ.get('VLLM_GRAPH_PROMPT_STRATEGY',
1586
+ 'min_tokens')
1587
+ decode_strategy = os.environ.get('VLLM_GRAPH_DECODE_STRATEGY',
1588
+ 'max_bs')
1589
+ mem_post_prompt, prompt_batch_seq, prompt_captured_all = \
1590
+ self.warmup_graphs(
1591
+ prompt_strategy, self.bucketing_ctx.prompt_buckets,
1592
+ True, kv_caches, prompt_available_memory)
1593
+ mem_post_decode, decode_batch_seq, decode_captured_all = \
1594
+ self.warmup_graphs(
1595
+ decode_strategy, self.bucketing_ctx.decode_buckets,
1596
+ False, kv_caches, decode_available_memory)
1597
+
1598
+ # Not all prompt buckets were captured, but all decode buckets
1599
+ # were captured and we have some free graph-allocated space
1600
+ # left. Let's try to use it for capturing more prompt buckets.
1601
+ if (mem_post_decode + mem_post_prompt < graph_free_mem
1602
+ and not prompt_captured_all and decode_captured_all):
1603
+ mem_post_prompt, _, prompt_captured_all = (
1604
+ self.warmup_graphs(
1605
+ prompt_strategy, self.bucketing_ctx.prompt_buckets,
1606
+ True, kv_caches,
1607
+ graph_free_mem - mem_post_prompt - mem_post_decode,
1608
+ mem_post_prompt, prompt_batch_seq))
1609
+
1610
+ # Not all decode buckets were captured, but all prompt buckets
1611
+ # were captured and we have some free graph-allocated space
1612
+ # left. Let's try to use it for capturing more decode buckets.
1613
+ if mem_post_decode + mem_post_prompt < graph_free_mem \
1614
+ and not decode_captured_all \
1615
+ and prompt_captured_all:
1616
+ mem_post_decode, _, _ = self.warmup_graphs(
1617
+ decode_strategy, self.bucketing_ctx.decode_buckets,
1618
+ False, kv_caches,
1619
+ graph_free_mem - mem_post_prompt - mem_post_decode,
1620
+ mem_post_decode, decode_batch_seq)
1621
+
1622
+ self.log_graph_warmup_summary(
1623
+ self.bucketing_ctx.prompt_buckets, True, mem_post_prompt)
1624
+ self.log_graph_warmup_summary(
1625
+ self.bucketing_ctx.decode_buckets, False, mem_post_decode)
1626
+
1627
+ end_time = time.perf_counter()
1628
+ end_mem = HabanaMemoryProfiler.current_device_memory_usage()
1629
+ elapsed_time = end_time - start_time
1630
+ msg = (
1631
+ f"Warmup finished in {elapsed_time:.0f} secs, "
1632
+ f"allocated {format_bytes(end_mem - start_mem)} of device memory")
1633
+ logger.info(msg)
1634
+ self.profiler.end()
1635
+
1636
+ @property
1637
+ def vocab_size(self) -> int:
1638
+ return self.model_config.get_vocab_size()
1639
+
1640
+ @property
1641
+ def mem_margin(self) -> Optional[int]:
1642
+ return self._mem_margin
1643
+
1644
+ @mem_margin.setter
1645
+ def mem_margin(self, value):
1646
+ self._mem_margin = value
1647
+
1648
+
1649
+ def _maybe_wrap_in_hpu_graph(*args, **kwargs):
1650
+ return htorch.hpu.wrap_in_hpu_graph(
1651
+ HpuModelAdapter(*args, **kwargs), disable_tensor_cache=True
1652
+ ) if htorch.utils.internal.is_lazy() else HpuModelAdapter(*args, **kwargs)
1653
+
1654
+
1655
+ class HabanaProfilerCounterHelper:
1656
+
1657
+ def __init__(self):
1658
+ self.niter = 0
1659
+ self.average_real_throughput = None
1660
+ self.logged_once = False
1661
+ self.real_seq_lens = []
1662
+ self.prompt_seq_lens = []
1663
+
1664
+ def capture_seq_group_metadata_stats(self, seq_group_metadata_list):
1665
+ self.real_seq_lens = [
1666
+ len(seq_data.prompt_token_ids) + len(seq_data.output_token_ids)
1667
+ for seq_group_metadata in seq_group_metadata_list
1668
+ for seq_data in seq_group_metadata.seq_data.values()
1669
+ ]
1670
+ self.prompt_seq_lens = [
1671
+ len(seq_data.prompt_token_ids)
1672
+ for seq_group_metadata in seq_group_metadata_list
1673
+ for seq_data in seq_group_metadata.seq_data.values()
1674
+ ]
1675
+
1676
+ def get_counter_dict(self, cache_config, duration, seq_len,
1677
+ batch_size_padded, real_batch_size, is_prompt):
1678
+ throughput = batch_size_padded / (duration / 1e6)
1679
+ throughput_effective = real_batch_size / (duration / 1e6)
1680
+
1681
+ real_max_seq_len = max(self.real_seq_lens)
1682
+ real_num_tokens = sum(self.real_seq_lens)
1683
+ padded_num_tokens = batch_size_padded * seq_len
1684
+ batch_token_utilization = real_num_tokens / padded_num_tokens
1685
+ if self.average_real_throughput is None:
1686
+ self.average_real_throughput = throughput_effective
1687
+ else: # https://www.heikohoffmann.de/htmlthesis/node134.html
1688
+ self.average_real_throughput = self.average_real_throughput + 1 / (
1689
+ self.niter + 1) * (throughput_effective -
1690
+ self.average_real_throughput)
1691
+ phase = "prompt" if is_prompt else "decode"
1692
+ counters = {
1693
+ f'{phase}_bucket_batch_size': batch_size_padded,
1694
+ f'{phase}_batch_size': real_batch_size,
1695
+ f'{phase}_bucket_seq_len': seq_len,
1696
+ f'{phase}_seq_len': real_max_seq_len,
1697
+ f'{phase}_bucket_gen_throughput': throughput,
1698
+ f'{phase}_real_gen_throughput': throughput_effective,
1699
+ f'{phase}_batch_token_utilization': batch_token_utilization,
1700
+ 'average_real_throughput': self.average_real_throughput,
1701
+ 'engine_iteration': self.niter,
1702
+ }
1703
+ self.niter += 1
1704
+ if is_prompt:
1705
+ prompt_bucket_in_throughput = (seq_len * batch_size_padded) / (
1706
+ duration / 1e6)
1707
+ prompt_real_in_throughput = sum(
1708
+ self.prompt_seq_lens) / (duration / 1e6)
1709
+ counters[
1710
+ f'{phase}_bucket_in_throughput'] = prompt_bucket_in_throughput
1711
+ counters[f'{phase}_real_in_throughput'] = prompt_real_in_throughput
1712
+
1713
+ # KV cache might not be created yet (e.g. for profiling run)
1714
+ if cache_config.num_gpu_blocks is not None and \
1715
+ cache_config.num_gpu_blocks != 0:
1716
+ cache_num_blocks_used = [
1717
+ math.ceil(sl / cache_config.block_size)
1718
+ for sl in self.real_seq_lens
1719
+ ]
1720
+ cache_total_num_blocks_used = sum(cache_num_blocks_used)
1721
+ num_cache_blocks = cache_config.num_gpu_blocks
1722
+ cache_total_num_free_blocks = \
1723
+ num_cache_blocks - cache_total_num_blocks_used
1724
+ cache_computed_utilization = \
1725
+ cache_total_num_blocks_used / num_cache_blocks
1726
+ max_blocks_per_seq = math.ceil(seq_len / cache_config.block_size)
1727
+ batch_block_utilization = cache_total_num_blocks_used / (
1728
+ batch_size_padded * max_blocks_per_seq)
1729
+ counters['cache_num_blocks_used'] = cache_total_num_blocks_used
1730
+ counters['cache_num_free_blocks'] = cache_total_num_free_blocks
1731
+ counters['cache_computed_utilization'] = cache_computed_utilization
1732
+ counters[
1733
+ f'{phase}_batch_block_utilization'] = batch_block_utilization
1734
+ if not self.logged_once:
1735
+ counters['const_cache_num_blocks'] = cache_config.num_gpu_blocks
1736
+ counters[
1737
+ 'const_gpu_memory_utilization'] = \
1738
+ cache_config.gpu_memory_utilization
1739
+ counters['const_block_size'] = cache_config.block_size
1740
+ self.logged_once = True
1741
+ return counters
1742
+
1743
+
1744
+ def unwrap_model(model):
1745
+ if isinstance(model, torch._dynamo.eval_frame.OptimizedModule):
1746
+ return unwrap_model(model._orig_mod)
1747
+ else:
1748
+ model = list(vars(model)['_modules'].values())[0]
1749
+ modules = list(vars(model)['_modules'].values())
1750
+ return modules
1751
+
1752
+
1753
+ class HPUModelRunner(HPUModelRunnerBase[ModelInputForHPUWithSamplingMetadata]):
1754
+ """
1755
+ GPU model runner with sampling step.
1756
+ """
1757
+ _model_input_cls: Type[ModelInputForHPUWithSamplingMetadata] = (
1758
+ ModelInputForHPUWithSamplingMetadata)
1759
+
1760
+ def make_model_input_from_broadcasted_tensor_dict(
1761
+ self,
1762
+ tensor_dict: Dict[str, Any],
1763
+ ) -> ModelInputForHPUWithSamplingMetadata:
1764
+ return (
1765
+ ModelInputForHPUWithSamplingMetadata.from_broadcasted_tensor_dict(
1766
+ tensor_dict,
1767
+ attn_backend=self.attn_backend,
1768
+ ))
1769
+
1770
+ @torch.inference_mode()
1771
+ def prepare_model_input(
1772
+ self,
1773
+ seq_group_metadata_list: List[SequenceGroupMetadata],
1774
+ virtual_engine: int = 0,
1775
+ finished_requests_ids: Optional[List[str]] = None
1776
+ ) -> ModelInputForHPUWithSamplingMetadata:
1777
+ """Prepare the model input based on a given sequence group, including
1778
+ metadata for the sampling step.
1779
+ The API assumes seq_group_metadata_list is sorted by prefill -> decode.
1780
+ The result tensors and data structure also batches input in prefill
1781
+ -> decode order. For example,
1782
+ - input_tokens[:num_prefill_tokens] contains prefill tokens.
1783
+ - input_tokens[num_prefill_tokens:] contains decode tokens.
1784
+ If cuda graph is required, this API automatically pads inputs.
1785
+ """
1786
+ with self.profiler.record_event('internal', 'prepare_input_tensors'):
1787
+ assert seq_group_metadata_list is not None
1788
+ if self.profiler.enabled:
1789
+ self.profiler_counter_helper.capture_seq_group_metadata_stats(
1790
+ seq_group_metadata_list=seq_group_metadata_list)
1791
+ model_input, sampling_metadata = self.prepare_input_tensors(
1792
+ seq_group_metadata_list)
1793
+ assert model_input.attn_metadata is not None
1794
+ is_prompt = model_input.attn_metadata.is_prompt
1795
+
1796
+ return dataclasses.replace(model_input,
1797
+ sampling_metadata=sampling_metadata,
1798
+ is_prompt=is_prompt,
1799
+ virtual_engine=virtual_engine)
1800
+
1801
+ def finish_measurements(self):
1802
+ from neural_compressor.torch.quantization import finalize_calibration
1803
+ finalize_calibration(self.model.model)
1804
+
1805
+ def _num_blocks(self, attn_metadata):
1806
+ if attn_metadata.block_list is None:
1807
+ return 0
1808
+ return attn_metadata.block_list.numel()
1809
+
1810
+ def _phase(self, attn_metadata):
1811
+ phase_type: PhaseType
1812
+ is_prompt = attn_metadata.is_prompt
1813
+ is_prefix_prefill = is_prompt and attn_metadata.block_list is not None
1814
+ if is_prompt and is_prefix_prefill:
1815
+ phase_type = PhaseType.PREFIX_PREFILL
1816
+ elif is_prompt and not is_prefix_prefill:
1817
+ phase_type = PhaseType.PREFILL
1818
+ elif not is_prompt:
1819
+ phase_type = PhaseType.DECODE
1820
+ else:
1821
+ raise ValueError("Unrecognized pass type, likely due to malformed "
1822
+ "attention metadata")
1823
+ return phase_type
1824
+
1825
+ def _check_config(self, batch_size, seq_len, attn_metadata, warmup_mode):
1826
+ is_prefix_caching = self.vllm_config.cache_config.enable_prefix_caching
1827
+ cfg: Optional[tuple] = None
1828
+ assert cfg is None, "Configs changed between 2D and 3D"
1829
+ if is_prefix_caching:
1830
+ phase = self._phase(attn_metadata)
1831
+ num_blocks = self._num_blocks(attn_metadata)
1832
+ cfg = (batch_size, seq_len, num_blocks, phase)
1833
+ else:
1834
+ phase = 'prompt' if attn_metadata.is_prompt else 'decode'
1835
+ cfg = (batch_size, seq_len, phase)
1836
+ seen = cfg in self.seen_configs
1837
+ self.seen_configs.add(cfg)
1838
+ if not seen and not warmup_mode:
1839
+ logger.warning("Configuration: %s was not warmed-up!",
1840
+ (phase.value, batch_size, seq_len,
1841
+ num_blocks) if is_prefix_caching else
1842
+ (phase, batch_size, seq_len))
1843
+
1844
+ def create_lora_mask(self, input_tokens: torch.Tensor, lora_ids: List[int],
1845
+ is_prompt: bool):
1846
+ '''
1847
+ This is a helper function to create the mask for lora computations.
1848
+ Lora Mask is needed to ensure we match the correct lora weights for the
1849
+ for the request.
1850
+ For Prompt phase we have
1851
+ lora_mask with shape (batch_size * seq_len, max_loras * max_rank)
1852
+ lora_logits_mask with shape (batch_size, max_loras * max_rank)
1853
+ For Decode phase we have both
1854
+ lora_mask and lora_logits_mask with shape
1855
+ (batch_size, max_loras * max_rank)
1856
+ '''
1857
+ lora_mask: torch.Tensor = None
1858
+ lora_logits_mask: torch.Tensor = None
1859
+ lora_index = 0
1860
+
1861
+ if self.lora_config:
1862
+ if is_prompt:
1863
+ lora_mask = torch.zeros(
1864
+ input_tokens.shape[0] * input_tokens.shape[1],
1865
+ (self.lora_config.max_loras) *\
1866
+ self.lora_config.max_lora_rank,
1867
+ dtype=self.lora_config.lora_dtype)
1868
+ lora_logits_mask = torch.zeros(
1869
+ input_tokens.shape[0], (self.lora_config.max_loras) *
1870
+ self.lora_config.max_lora_rank,
1871
+ dtype=self.lora_config.lora_dtype)
1872
+
1873
+ ones = torch.ones(input_tokens.shape[1],
1874
+ self.lora_config.max_lora_rank,
1875
+ dtype=self.lora_config.lora_dtype)
1876
+ logit_ones = torch.ones(1,
1877
+ self.lora_config.max_lora_rank,
1878
+ dtype=self.lora_config.lora_dtype)
1879
+
1880
+ for i in range(len(lora_ids)):
1881
+ if lora_ids[i] == 0:
1882
+ continue
1883
+ lora_index = self.lora_manager._adapter_manager.\
1884
+ lora_index_to_id.index(lora_ids[i])
1885
+ start_row = i * input_tokens.shape[1]
1886
+ end_row = start_row + input_tokens.shape[1]
1887
+ start_col = lora_index * self.lora_config.max_lora_rank
1888
+ end_col = start_col + self.lora_config.max_lora_rank
1889
+ lora_mask[start_row:end_row, start_col:end_col] = ones
1890
+ lora_logits_mask[i, start_col:end_col] = logit_ones
1891
+ lora_mask = lora_mask.to('hpu')
1892
+ lora_logits_mask = lora_logits_mask.to('hpu')
1893
+ else:
1894
+ lora_mask = torch.zeros(input_tokens.shape[0],
1895
+ (self.lora_config.max_loras) *
1896
+ self.lora_config.max_lora_rank,
1897
+ dtype=self.lora_config.lora_dtype)
1898
+ ones = torch.ones(1,
1899
+ self.lora_config.max_lora_rank,
1900
+ dtype=self.lora_config.lora_dtype)
1901
+ for i in range(len(lora_ids)):
1902
+ if lora_ids[i] == 0:
1903
+ continue
1904
+ lora_index = self.lora_manager._adapter_manager.\
1905
+ lora_index_to_id.index(lora_ids[i])
1906
+ start_pos = lora_index * self.lora_config.max_lora_rank
1907
+ end_pos = start_pos + self.lora_config.max_lora_rank
1908
+ lora_mask[i, start_pos:end_pos] = ones
1909
+ lora_mask = lora_mask.to('hpu')
1910
+ lora_logits_mask = lora_mask
1911
+
1912
+ return lora_mask, lora_logits_mask
1913
+
1914
+ def _get_seq_ids(self, model_input):
1915
+ return ([
1916
+ sg.seq_ids[0] for sg in model_input.sampling_metadata.seq_groups
1917
+ ])
1918
+
1919
+ def _pad_to_max_num_seqs(self, tensor, value):
1920
+ padding_needed = self.max_num_seqs - tensor.size(0)
1921
+ if padding_needed:
1922
+ padding = torch.full((padding_needed, *tensor.shape[1:]),
1923
+ value,
1924
+ device=tensor.device,
1925
+ dtype=tensor.dtype)
1926
+ tensor = torch.cat([tensor, padding])
1927
+ return tensor
1928
+
1929
+ @torch.inference_mode()
1930
+ def execute_model(
1931
+ self,
1932
+ model_input: ModelInputForHPUWithSamplingMetadata,
1933
+ kv_caches: List[torch.Tensor],
1934
+ intermediate_tensors: Optional[IntermediateTensors] = None,
1935
+ num_steps: int = 1,
1936
+ warmup_mode=False,
1937
+ seqs=None,
1938
+ ) -> Optional[Union[List[SamplerOutput], IntermediateTensors]]:
1939
+ VLLM_DELAYED_SAMPLING = envs.VLLM_HPU_USE_DELAYED_SAMPLING
1940
+ use_delayed_sampling = VLLM_DELAYED_SAMPLING and not warmup_mode
1941
+ assert not (use_delayed_sampling and num_steps != 1), \
1942
+ 'Delayed sampling is not compatible with MSS!'
1943
+ assert model_input.input_tokens is not None
1944
+ if use_delayed_sampling and not model_input.is_prompt and \
1945
+ self.is_driver_worker:
1946
+ num_cached = len(self.cached_step_outputs)
1947
+ assert num_cached > 0
1948
+ cur_seq_ids = self._get_seq_ids(model_input)
1949
+ cur_seq_id_pos = {
1950
+ sid: idx
1951
+ for idx, sid in enumerate(cur_seq_ids) if sid >= 0
1952
+ }
1953
+ htorch.core.mark_step()
1954
+ for i in range(num_cached):
1955
+ prev_seq_ids = self._get_seq_ids(self.cached_step_inputs[i])
1956
+ target_indices = [
1957
+ cur_seq_id_pos.get(psi, -1) for psi in prev_seq_ids
1958
+ ]
1959
+ padding = self.cached_step_outputs[i].size(0) - len(
1960
+ target_indices)
1961
+ target_indices.extend([-1] * padding)
1962
+ target_indices = torch.tensor(
1963
+ target_indices,
1964
+ device=model_input.input_tokens.device,
1965
+ dtype=model_input.input_tokens.dtype)
1966
+ model_input.input_tokens.index_copy_(
1967
+ 0, target_indices, self.cached_step_outputs[i])
1968
+ htorch.core.mark_step()
1969
+
1970
+ if not model_input.is_first_multi_step:
1971
+ if not model_input.is_last_step:
1972
+ # not first or last multi-step
1973
+ return []
1974
+ # last multi-step
1975
+ output = self._decode_sampler_outputs(
1976
+ model_input) if self.is_driver_worker else []
1977
+ torch.hpu.synchronize()
1978
+ if model_input.is_first_multi_step:
1979
+ # first multi-step
1980
+ if self.lora_config:
1981
+ assert model_input.lora_requests is not None
1982
+ assert model_input.lora_mapping is not None
1983
+ self.set_active_loras(model_input.lora_requests,
1984
+ model_input.lora_mapping)
1985
+ # Rank!=0 workers has is_prompt==None
1986
+ if use_delayed_sampling and not model_input.is_prompt and \
1987
+ model_input.input_tokens.size(1) == 1:
1988
+ if self.is_driver_worker:
1989
+ model_kwargs_broadcast_data = {
1990
+ "input_tokens": model_input.input_tokens
1991
+ }
1992
+ broadcast_tensor_dict(model_kwargs_broadcast_data, src=0)
1993
+ input_tokens = model_input.input_tokens
1994
+
1995
+ else:
1996
+ model_kwargs_broadcast_data = broadcast_tensor_dict(src=0)
1997
+ input_tokens = model_kwargs_broadcast_data["input_tokens"]
1998
+ else:
1999
+ input_tokens = model_input.input_tokens
2000
+ input_positions = model_input.input_positions
2001
+ attn_metadata = model_input.attn_metadata
2002
+ sampling_metadata = model_input.sampling_metadata
2003
+ real_batch_size = model_input.real_batch_size
2004
+ batch_size_padded = model_input.batch_size_padded
2005
+ assert input_tokens is not None
2006
+ assert input_positions is not None
2007
+ assert sampling_metadata is not None
2008
+ assert attn_metadata is not None
2009
+ is_prompt = attn_metadata.is_prompt
2010
+ assert is_prompt is not None
2011
+ batch_size = input_tokens.size(0)
2012
+ seq_len = self._seq_len(attn_metadata)
2013
+ use_graphs = self._use_graphs(batch_size, seq_len, is_prompt)
2014
+ self._check_config(batch_size, seq_len, attn_metadata, warmup_mode)
2015
+
2016
+ lora_mask: torch.Tensor = None
2017
+ lora_logits_mask: torch.Tensor = None
2018
+ if self.lora_config:
2019
+ assert model_input.lora_ids is not None
2020
+ lora_mask, lora_logits_mask = self.create_lora_mask(
2021
+ input_tokens, model_input.lora_ids,
2022
+ attn_metadata.is_prompt)
2023
+
2024
+ execute_model_kwargs = {
2025
+ "input_ids": input_tokens,
2026
+ "positions": input_positions,
2027
+ "attn_metadata": self.trim_attn_metadata(attn_metadata),
2028
+ "intermediate_tensors": intermediate_tensors,
2029
+ "lora_mask": lora_mask,
2030
+ "virtual_engine": model_input.virtual_engine,
2031
+ **(model_input.multi_modal_kwargs or {}),
2032
+ }
2033
+ if htorch.utils.internal.is_lazy():
2034
+ execute_model_kwargs.update(
2035
+ {"bypass_hpu_graphs": not use_graphs})
2036
+
2037
+ htorch.core.mark_step()
2038
+ if self.is_driver_worker:
2039
+ model_event_name = ("model_"
2040
+ f"{'prompt' if is_prompt else 'decode'}_"
2041
+ f"bs{batch_size}_"
2042
+ f"seq{seq_len}_"
2043
+ f"graphs{'T' if use_graphs else 'F'}")
2044
+ else:
2045
+ model_event_name = 'model_executable'
2046
+ if num_steps > 1 or use_delayed_sampling:
2047
+ # in case of multi-step scheduling
2048
+ # we only want to pythonize in the last step
2049
+ sampling_metadata.skip_sampler_cpu_output = True
2050
+ self.model.sampler.include_gpu_probs_tensor = True
2051
+ cache_orig_output_tokens_len: List[Dict] = []
2052
+
2053
+ def try_revert_dummy_output_tokens():
2054
+ if len(cache_orig_output_tokens_len) > 0:
2055
+ # Reuse the original output token ids length
2056
+ for i, seq_group_metadata in enumerate(
2057
+ seq_group_metadata_list):
2058
+ for j, data in seq_group_metadata.seq_data.items():
2059
+ orig_output_tokens_len = \
2060
+ cache_orig_output_tokens_len[i][j]
2061
+ data.output_token_ids = \
2062
+ data.output_token_ids[:orig_output_tokens_len]
2063
+
2064
+ for i in range(num_steps):
2065
+ if i != 0 and not self.is_driver_worker:
2066
+ broadcast_data = broadcast_tensor_dict(src=0)
2067
+ if 'early_exit' in broadcast_data and broadcast_data[
2068
+ 'early_exit']:
2069
+ return [output] if num_steps == 1 else []
2070
+ execute_model_kwargs.update({
2071
+ "input_ids":
2072
+ broadcast_data["input_ids"],
2073
+ "positions":
2074
+ broadcast_data["positions"],
2075
+ "attn_metadata":
2076
+ self.trim_attn_metadata(
2077
+ broadcast_data["attn_metadata"])
2078
+ })
2079
+ with self.profiler.record_event('internal', model_event_name):
2080
+ hidden_states = self.model.forward(
2081
+ **execute_model_kwargs,
2082
+ selected_token_indices=sampling_metadata.
2083
+ selected_token_indices)
2084
+
2085
+ if self.lora_config:
2086
+ LoraMask.setLoraMask(
2087
+ lora_logits_mask.index_select(
2088
+ 0, sampling_metadata.selected_token_indices))
2089
+
2090
+ # Compute the logits.
2091
+ with self.profiler.record_event(
2092
+ 'internal',
2093
+ ('compute_logits_'
2094
+ f'{"prompt" if is_prompt else "decode"}_bs'
2095
+ f'{batch_size}_'
2096
+ f'seq{seq_len}')):
2097
+ if num_steps == 1:
2098
+ sampling_metadata.selected_token_indices = None
2099
+ logits = self.model.compute_logits(hidden_states,
2100
+ sampling_metadata)
2101
+ htorch.core.mark_step()
2102
+ # Only perform sampling in the driver worker.
2103
+ if not self.is_driver_worker:
2104
+ continue
2105
+
2106
+ if use_delayed_sampling:
2107
+ fake_output = self._delayed_sampler_outputs(model_input)
2108
+
2109
+ with self.profiler.record_event(
2110
+ 'internal', ('sample_'
2111
+ f'{"prompt" if is_prompt else "decode"}_'
2112
+ f'bs{batch_size}_'
2113
+ f'seq{seq_len}')):
2114
+ output = self.model.sample(
2115
+ logits=logits,
2116
+ sampling_metadata=sampling_metadata,
2117
+ )
2118
+ if num_steps > 1:
2119
+ output = output.sampled_token_ids
2120
+ self.cached_step_outputs.append(output)
2121
+ if use_delayed_sampling and self.is_driver_worker:
2122
+ self._patch_prev_output()
2123
+ output = self._pad_to_max_num_seqs(
2124
+ output.sampled_token_ids, DUMMY_TOKEN_ID)
2125
+ self.cached_step_outputs.append(output)
2126
+ self.cached_step_inputs.append(model_input)
2127
+ htorch.core.mark_step()
2128
+ if model_input.async_callback is not None:
2129
+ model_input.async_callback()
2130
+ if i < num_steps - 1:
2131
+ if i == 0:
2132
+ if model_input.async_callback is not None:
2133
+ ctx = model_input.async_callback.keywords[ # type: ignore
2134
+ "ctx"]
2135
+ seq_group_metadata_list = \
2136
+ ctx.seq_group_metadata_list
2137
+ elif seqs is not None:
2138
+ seq_group_metadata_list = seqs
2139
+ else:
2140
+ raise RuntimeError(
2141
+ "seq_group_metadata_list is uninitialized")
2142
+ for i, seq_group_metadata in enumerate(
2143
+ seq_group_metadata_list):
2144
+ # Skip empty steps
2145
+ seq_group_metadata.state.current_step += (
2146
+ num_steps - 2)
2147
+ # Cache the original output token ids
2148
+ cache_orig_output_tokens_len.append({})
2149
+ for j, data in seq_group_metadata.seq_data.items():
2150
+ cache_orig_output_tokens_len[i][j] = \
2151
+ len(data.output_token_ids)
2152
+ for seq_group_metadata in seq_group_metadata_list:
2153
+ for data in seq_group_metadata.seq_data.values():
2154
+ max_output_len = sampling_metadata.seq_groups[
2155
+ 0].sampling_params.max_tokens
2156
+ if len(data.output_token_ids) < max_output_len - 1:
2157
+ # add a place holder for prepare_decode
2158
+ # arbitrary value, this could be any token
2159
+ dummy_token = (540, )
2160
+ data.output_token_ids += (dummy_token)
2161
+ else:
2162
+ broadcast_tensor_dict({'early_exit': True},
2163
+ src=0)
2164
+ if num_steps == 1:
2165
+ return [output]
2166
+ else:
2167
+ try_revert_dummy_output_tokens()
2168
+ return []
2169
+
2170
+ result = self._prepare_decode(seq_group_metadata_list,
2171
+ output=output)
2172
+ execute_model_kwargs.update({
2173
+ "input_ids":
2174
+ result.input_tokens,
2175
+ "positions":
2176
+ result.input_positions,
2177
+ "attn_metadata":
2178
+ self.trim_attn_metadata(result.attn_metadata)
2179
+ })
2180
+ model_kwargs_broadcast_data = {
2181
+ "input_ids": result.input_tokens,
2182
+ "positions": result.input_positions,
2183
+ "attn_metadata": vars(result.attn_metadata)
2184
+ }
2185
+ broadcast_tensor_dict(model_kwargs_broadcast_data, src=0)
2186
+ else:
2187
+ try_revert_dummy_output_tokens()
2188
+
2189
+ if self.is_driver_worker and self.profiler.enabled:
2190
+ # Stop recording 'execute_model' event
2191
+ self.profiler.end()
2192
+ event_end = self.profiler.get_timestamp_us()
2193
+ counters = self.profiler_counter_helper.get_counter_dict(
2194
+ cache_config=self.cache_config,
2195
+ duration=event_end - self.event_start,
2196
+ seq_len=seq_len,
2197
+ batch_size_padded=batch_size_padded,
2198
+ real_batch_size=real_batch_size,
2199
+ is_prompt=is_prompt)
2200
+ self.profiler.record_counter(self.event_start, counters)
2201
+ if num_steps == 1:
2202
+ if self.return_hidden_states:
2203
+ # we only need to pass hidden states of most recent token
2204
+ assert model_input.sampling_metadata is not None
2205
+ if model_input.is_prompt:
2206
+ output.prefill_hidden_states = hidden_states
2207
+ output.hidden_states = hidden_states
2208
+ if use_delayed_sampling:
2209
+ if self.is_driver_worker:
2210
+ return [fake_output]
2211
+ else:
2212
+ return []
2213
+
2214
+ return [output] if self.is_driver_worker else []
2215
+ else:
2216
+ return []
2217
+ return output if type(output) is list else [output]
2218
+
2219
+ def _delayed_sampler_outputs(self, model_input):
2220
+ next_token_ids = [[DUMMY_TOKEN_ID]] * len(
2221
+ model_input.sampling_metadata.seq_groups)
2222
+ sampler_output = self._make_decode_output(
2223
+ next_token_ids, model_input.sampling_metadata.seq_groups)
2224
+ return sampler_output
2225
+
2226
+ def _decode_sampler_outputs(self, model_input):
2227
+ use_async_out_proc = model_input.async_callback is not None
2228
+ sampler_outputs = []
2229
+ num_outputs = len(self.cached_step_outputs)
2230
+ for i in range(num_outputs):
2231
+ next_token_ids = self.cached_step_outputs.pop(0)
2232
+ next_token_ids = next_token_ids.cpu().tolist()
2233
+ sampler_output = self._make_decode_output(
2234
+ next_token_ids, model_input.sampling_metadata.seq_groups)
2235
+ sampler_outputs.append(sampler_output)
2236
+
2237
+ if i < num_outputs - 1 and use_async_out_proc:
2238
+ assert model_input.async_callback is not None
2239
+ ctx = model_input.async_callback.keywords[ # type: ignore
2240
+ "ctx"]
2241
+ ctx.append_output(
2242
+ outputs=[sampler_output],
2243
+ seq_group_metadata_list=ctx.seq_group_metadata_list,
2244
+ scheduler_outputs=ctx.scheduler_outputs,
2245
+ is_async=False,
2246
+ is_last_step=False,
2247
+ is_first_step_output=False)
2248
+ model_input.async_callback()
2249
+
2250
+ if use_async_out_proc:
2251
+ return [sampler_outputs[-1]]
2252
+ else:
2253
+ return sampler_outputs
2254
+
2255
+ def _make_decode_output(
2256
+ self,
2257
+ next_token_ids: List[List[int]],
2258
+ seq_groups: List[SequenceGroupToSample],
2259
+ ) -> SamplerOutput:
2260
+ zero_logprob = Logprob(0.0)
2261
+ sampler_outputs = []
2262
+ batch_idx = 0
2263
+ for seq_group in seq_groups:
2264
+ seq_ids = seq_group.seq_ids
2265
+ seq_outputs = []
2266
+ for seq_id in seq_ids:
2267
+ next_token_id = next_token_ids[batch_idx][0]
2268
+ seq_outputs.append(
2269
+ SequenceOutput(seq_id, next_token_id,
2270
+ {next_token_id: zero_logprob}))
2271
+ batch_idx += 1
2272
+ sampler_outputs.append(
2273
+ CompletionSequenceGroupOutput(seq_outputs, None))
2274
+ return SamplerOutput(sampler_outputs)
2275
+
2276
+ def shutdown_inc(self):
2277
+ can_finalize_inc = False
2278
+ from contextlib import suppress
2279
+ with suppress(AttributeError):
2280
+ can_finalize_inc = (self.model_config.quantization == 'inc') and \
2281
+ (self.model.model is not None) and \
2282
+ self.inc_initialized_successfully and \
2283
+ not getattr(self, "_is_inc_finalized", False)
2284
+ if can_finalize_inc:
2285
+ from neural_compressor.torch.quantization import (
2286
+ finalize_calibration)
2287
+ finalize_calibration(self.model.model)
2288
+ self._is_inc_finalized = True
2289
+
2290
+ def __del__(self):
2291
+ self.shutdown_inc()
2292
+
2293
+ def _patch_prev_output(self):
2294
+ assert len(self.cached_step_inputs) == len(self.cached_step_outputs), \
2295
+ f'''Inputs and outputs are out of sync!
2296
+ {len(self.cached_step_inputs)} vs {len(self.cached_step_outputs)}'''
2297
+ if len(self.cached_step_inputs) == 0:
2298
+ return
2299
+ model_input = self.cached_step_inputs.pop(0)
2300
+ delayed_output = self.cached_step_outputs.pop(0).cpu().squeeze(
2301
+ -1).tolist()
2302
+ ctx = model_input.async_callback.keywords["ctx"] # type: ignore
2303
+ # If there's no output to patch with, which is usually the case when
2304
+ # we're starting a new request after all requests are completed.
2305
+ if len(ctx.output_queue) == 0:
2306
+ return
2307
+ assert len(
2308
+ ctx.output_queue) == 1, 'There should be exactly 1 output waiting!'
2309
+ output_data = ctx.output_queue[0]
2310
+ assert len(output_data.outputs) == 1
2311
+ for fake_out, real_out in zip(output_data.outputs[0], delayed_output):
2312
+ fake_out.samples[0].output_token = real_out
2313
+ for sg, real_out in zip(output_data.seq_group_metadata_list,
2314
+ delayed_output):
2315
+ assert len(sg.seq_data) == 1
2316
+ seq_data = list(sg.seq_data.values())[0]
2317
+ # This is a hack. Assigning output_token_ids triggers
2318
+ # a cache recomputation and we only need to update the last token
2319
+ seq_data.output_token_ids_array[-1] = real_out
2320
+ seq_data._cached_all_token_ids[-1] = real_out