vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1197) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +53 -0
  3. vllm/_custom_ops.py +1828 -0
  4. vllm/_ipex_ops.py +244 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +106 -0
  9. vllm/adapter_commons/request.py +26 -0
  10. vllm/adapter_commons/utils.py +93 -0
  11. vllm/adapter_commons/worker_manager.py +39 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +45 -0
  14. vllm/assets/base.py +41 -0
  15. vllm/assets/image.py +34 -0
  16. vllm/assets/video.py +115 -0
  17. vllm/attention/__init__.py +20 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +308 -0
  20. vllm/attention/backends/blocksparse_attn.py +461 -0
  21. vllm/attention/backends/cpu_mla.py +307 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
  23. vllm/attention/backends/flash_attn.py +1003 -0
  24. vllm/attention/backends/flashinfer.py +1104 -0
  25. vllm/attention/backends/flashmla.py +244 -0
  26. vllm/attention/backends/hpu_attn.py +313 -0
  27. vllm/attention/backends/ipex_attn.py +398 -0
  28. vllm/attention/backends/mla/__init__.py +0 -0
  29. vllm/attention/backends/mla/common.py +1385 -0
  30. vllm/attention/backends/pallas.py +351 -0
  31. vllm/attention/backends/placeholder_attn.py +400 -0
  32. vllm/attention/backends/rocm_aiter_mla.py +435 -0
  33. vllm/attention/backends/rocm_flash_attn.py +975 -0
  34. vllm/attention/backends/torch_sdpa.py +703 -0
  35. vllm/attention/backends/triton_mla.py +115 -0
  36. vllm/attention/backends/utils.py +610 -0
  37. vllm/attention/backends/xformers.py +802 -0
  38. vllm/attention/layer.py +468 -0
  39. vllm/attention/ops/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  41. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
  42. vllm/attention/ops/blocksparse_attention/interface.py +239 -0
  43. vllm/attention/ops/blocksparse_attention/utils.py +246 -0
  44. vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
  45. vllm/attention/ops/flashmla.py +116 -0
  46. vllm/attention/ops/hpu_paged_attn.py +88 -0
  47. vllm/attention/ops/ipex_attn.py +195 -0
  48. vllm/attention/ops/merge_attn_states.py +43 -0
  49. vllm/attention/ops/nki_flash_attn.py +906 -0
  50. vllm/attention/ops/paged_attn.py +256 -0
  51. vllm/attention/ops/prefix_prefill.py +902 -0
  52. vllm/attention/ops/rocm_aiter_mla.py +100 -0
  53. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  54. vllm/attention/ops/triton_decode_attention.py +674 -0
  55. vllm/attention/ops/triton_flash_attention.py +979 -0
  56. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  57. vllm/attention/ops/triton_unified_attention.py +334 -0
  58. vllm/attention/selector.py +187 -0
  59. vllm/attention/utils/fa_utils.py +55 -0
  60. vllm/beam_search.py +87 -0
  61. vllm/benchmarks/__init__.py +0 -0
  62. vllm/benchmarks/datasets.py +1185 -0
  63. vllm/benchmarks/endpoint_request_func.py +381 -0
  64. vllm/benchmarks/latency.py +168 -0
  65. vllm/benchmarks/serve.py +1135 -0
  66. vllm/benchmarks/throughput.py +609 -0
  67. vllm/benchmarks/utils.py +70 -0
  68. vllm/collect_env.py +820 -0
  69. vllm/compilation/__init__.py +0 -0
  70. vllm/compilation/activation_quant_fusion.py +89 -0
  71. vllm/compilation/backends.py +563 -0
  72. vllm/compilation/base_piecewise_backend.py +72 -0
  73. vllm/compilation/collective_fusion.py +127 -0
  74. vllm/compilation/compiler_interface.py +544 -0
  75. vllm/compilation/counter.py +38 -0
  76. vllm/compilation/cuda_piecewise_backend.py +214 -0
  77. vllm/compilation/decorators.py +250 -0
  78. vllm/compilation/fix_functionalization.py +191 -0
  79. vllm/compilation/fusion.py +618 -0
  80. vllm/compilation/fx_utils.py +62 -0
  81. vllm/compilation/inductor_pass.py +115 -0
  82. vllm/compilation/monitor.py +39 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +137 -0
  85. vllm/compilation/pass_manager.py +78 -0
  86. vllm/compilation/sequence_parallelism.py +268 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +67 -0
  89. vllm/compilation/wrapper.py +135 -0
  90. vllm/config.py +4746 -0
  91. vllm/connections.py +174 -0
  92. vllm/core/__init__.py +0 -0
  93. vllm/core/block/__init__.py +0 -0
  94. vllm/core/block/block_table.py +399 -0
  95. vllm/core/block/common.py +371 -0
  96. vllm/core/block/cpu_gpu_block_allocator.py +441 -0
  97. vllm/core/block/interfaces.py +319 -0
  98. vllm/core/block/naive_block.py +466 -0
  99. vllm/core/block/prefix_caching_block.py +1135 -0
  100. vllm/core/block/utils.py +28 -0
  101. vllm/core/block_manager.py +521 -0
  102. vllm/core/evictor.py +157 -0
  103. vllm/core/interfaces.py +135 -0
  104. vllm/core/placeholder_block_space_manager.py +100 -0
  105. vllm/core/scheduler.py +2093 -0
  106. vllm/device_allocator/__init__.py +0 -0
  107. vllm/device_allocator/cumem.py +281 -0
  108. vllm/distributed/__init__.py +6 -0
  109. vllm/distributed/communication_op.py +41 -0
  110. vllm/distributed/device_communicators/__init__.py +0 -0
  111. vllm/distributed/device_communicators/all2all.py +264 -0
  112. vllm/distributed/device_communicators/base_device_communicator.py +260 -0
  113. vllm/distributed/device_communicators/cpu_communicator.py +145 -0
  114. vllm/distributed/device_communicators/cuda_communicator.py +176 -0
  115. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  116. vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
  117. vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
  118. vllm/distributed/device_communicators/hpu_communicator.py +46 -0
  119. vllm/distributed/device_communicators/neuron_communicator.py +20 -0
  120. vllm/distributed/device_communicators/pynccl.py +218 -0
  121. vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
  122. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  123. vllm/distributed/device_communicators/tpu_communicator.py +103 -0
  124. vllm/distributed/device_communicators/xpu_communicator.py +55 -0
  125. vllm/distributed/kv_events.py +356 -0
  126. vllm/distributed/kv_transfer/README.md +29 -0
  127. vllm/distributed/kv_transfer/__init__.py +12 -0
  128. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  129. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  130. vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
  131. vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
  132. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
  133. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
  134. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
  142. vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
  143. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  145. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  146. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  147. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  149. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
  150. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  151. vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
  152. vllm/distributed/parallel_state.py +1296 -0
  153. vllm/distributed/tpu_distributed_utils.py +177 -0
  154. vllm/distributed/utils.py +536 -0
  155. vllm/engine/__init__.py +0 -0
  156. vllm/engine/arg_utils.py +1708 -0
  157. vllm/engine/async_llm_engine.py +1200 -0
  158. vllm/engine/async_timeout.py +173 -0
  159. vllm/engine/llm_engine.py +2097 -0
  160. vllm/engine/metrics.py +629 -0
  161. vllm/engine/metrics_types.py +94 -0
  162. vllm/engine/multiprocessing/__init__.py +148 -0
  163. vllm/engine/multiprocessing/client.py +681 -0
  164. vllm/engine/multiprocessing/engine.py +460 -0
  165. vllm/engine/output_processor/__init__.py +0 -0
  166. vllm/engine/output_processor/interfaces.py +75 -0
  167. vllm/engine/output_processor/multi_step.py +216 -0
  168. vllm/engine/output_processor/single_step.py +145 -0
  169. vllm/engine/output_processor/stop_checker.py +131 -0
  170. vllm/engine/output_processor/util.py +28 -0
  171. vllm/engine/protocol.py +317 -0
  172. vllm/entrypoints/__init__.py +0 -0
  173. vllm/entrypoints/api_server.py +178 -0
  174. vllm/entrypoints/chat_utils.py +1299 -0
  175. vllm/entrypoints/cli/__init__.py +0 -0
  176. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  177. vllm/entrypoints/cli/benchmark/base.py +39 -0
  178. vllm/entrypoints/cli/benchmark/latency.py +30 -0
  179. vllm/entrypoints/cli/benchmark/main.py +54 -0
  180. vllm/entrypoints/cli/benchmark/serve.py +30 -0
  181. vllm/entrypoints/cli/benchmark/throughput.py +30 -0
  182. vllm/entrypoints/cli/collect_env.py +35 -0
  183. vllm/entrypoints/cli/main.py +65 -0
  184. vllm/entrypoints/cli/openai.py +205 -0
  185. vllm/entrypoints/cli/run_batch.py +62 -0
  186. vllm/entrypoints/cli/serve.py +328 -0
  187. vllm/entrypoints/cli/types.py +25 -0
  188. vllm/entrypoints/launcher.py +147 -0
  189. vllm/entrypoints/llm.py +1544 -0
  190. vllm/entrypoints/logger.py +50 -0
  191. vllm/entrypoints/openai/__init__.py +0 -0
  192. vllm/entrypoints/openai/api_server.py +1387 -0
  193. vllm/entrypoints/openai/cli_args.py +315 -0
  194. vllm/entrypoints/openai/logits_processors.py +90 -0
  195. vllm/entrypoints/openai/protocol.py +1913 -0
  196. vllm/entrypoints/openai/run_batch.py +463 -0
  197. vllm/entrypoints/openai/serving_chat.py +1221 -0
  198. vllm/entrypoints/openai/serving_classification.py +160 -0
  199. vllm/entrypoints/openai/serving_completion.py +592 -0
  200. vllm/entrypoints/openai/serving_embedding.py +201 -0
  201. vllm/entrypoints/openai/serving_engine.py +986 -0
  202. vllm/entrypoints/openai/serving_models.py +315 -0
  203. vllm/entrypoints/openai/serving_pooling.py +232 -0
  204. vllm/entrypoints/openai/serving_score.py +433 -0
  205. vllm/entrypoints/openai/serving_tokenization.py +157 -0
  206. vllm/entrypoints/openai/serving_transcription.py +424 -0
  207. vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
  208. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  209. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  210. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  211. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  212. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
  213. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  214. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  215. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  216. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
  217. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  218. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  219. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  220. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  221. vllm/entrypoints/score_utils.py +50 -0
  222. vllm/entrypoints/ssl.py +75 -0
  223. vllm/entrypoints/utils.py +233 -0
  224. vllm/env_override.py +41 -0
  225. vllm/envs.py +944 -0
  226. vllm/executor/__init__.py +0 -0
  227. vllm/executor/executor_base.py +401 -0
  228. vllm/executor/mp_distributed_executor.py +244 -0
  229. vllm/executor/msgspec_utils.py +30 -0
  230. vllm/executor/multiproc_worker_utils.py +313 -0
  231. vllm/executor/ray_distributed_executor.py +701 -0
  232. vllm/executor/ray_utils.py +399 -0
  233. vllm/executor/uniproc_executor.py +139 -0
  234. vllm/forward_context.py +179 -0
  235. vllm/inputs/__init__.py +41 -0
  236. vllm/inputs/data.py +331 -0
  237. vllm/inputs/parse.py +151 -0
  238. vllm/inputs/preprocess.py +909 -0
  239. vllm/inputs/registry.py +237 -0
  240. vllm/jsontree.py +80 -0
  241. vllm/logger.py +212 -0
  242. vllm/logging_utils/__init__.py +8 -0
  243. vllm/logging_utils/dump_input.py +85 -0
  244. vllm/logging_utils/formatter.py +18 -0
  245. vllm/logits_process.py +119 -0
  246. vllm/lora/__init__.py +0 -0
  247. vllm/lora/fully_sharded_layers.py +355 -0
  248. vllm/lora/layers.py +1285 -0
  249. vllm/lora/lora.py +199 -0
  250. vllm/lora/models.py +818 -0
  251. vllm/lora/ops/__init__.py +0 -0
  252. vllm/lora/ops/torch_ops/__init__.py +16 -0
  253. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  254. vllm/lora/ops/triton_ops/__init__.py +12 -0
  255. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  256. vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
  257. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  258. vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
  259. vllm/lora/ops/triton_ops/utils.py +120 -0
  260. vllm/lora/ops/xla_ops/__init__.py +7 -0
  261. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  262. vllm/lora/peft_helper.py +136 -0
  263. vllm/lora/punica_wrapper/__init__.py +10 -0
  264. vllm/lora/punica_wrapper/punica_base.py +485 -0
  265. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  266. vllm/lora/punica_wrapper/punica_gpu.py +290 -0
  267. vllm/lora/punica_wrapper/punica_hpu.py +145 -0
  268. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  269. vllm/lora/punica_wrapper/punica_tpu.py +405 -0
  270. vllm/lora/punica_wrapper/utils.py +164 -0
  271. vllm/lora/request.py +99 -0
  272. vllm/lora/resolver.py +85 -0
  273. vllm/lora/utils.py +240 -0
  274. vllm/lora/worker_manager.py +259 -0
  275. vllm/model_executor/__init__.py +16 -0
  276. vllm/model_executor/custom_op.py +152 -0
  277. vllm/model_executor/guided_decoding/__init__.py +181 -0
  278. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  279. vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
  280. vllm/model_executor/guided_decoding/guided_fields.py +41 -0
  281. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
  282. vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
  283. vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
  284. vllm/model_executor/guided_decoding/utils.py +242 -0
  285. vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
  286. vllm/model_executor/layers/__init__.py +0 -0
  287. vllm/model_executor/layers/activation.py +369 -0
  288. vllm/model_executor/layers/fused_moe/__init__.py +54 -0
  289. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
  290. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  455. vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
  456. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
  457. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
  458. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
  459. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
  460. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
  461. vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
  462. vllm/model_executor/layers/fused_moe/layer.py +1535 -0
  463. vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
  464. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  465. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  466. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
  467. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  468. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
  469. vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
  470. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
  471. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
  472. vllm/model_executor/layers/fused_moe/utils.py +98 -0
  473. vllm/model_executor/layers/layernorm.py +288 -0
  474. vllm/model_executor/layers/lightning_attn.py +652 -0
  475. vllm/model_executor/layers/linear.py +1524 -0
  476. vllm/model_executor/layers/logits_processor.py +197 -0
  477. vllm/model_executor/layers/mamba/__init__.py +0 -0
  478. vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
  479. vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
  480. vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
  481. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  482. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
  483. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  484. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  485. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
  486. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  487. vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
  488. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
  489. vllm/model_executor/layers/pooler.py +350 -0
  490. vllm/model_executor/layers/quantization/__init__.py +157 -0
  491. vllm/model_executor/layers/quantization/aqlm.py +376 -0
  492. vllm/model_executor/layers/quantization/auto_round.py +310 -0
  493. vllm/model_executor/layers/quantization/awq.py +194 -0
  494. vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
  495. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  496. vllm/model_executor/layers/quantization/base_config.py +151 -0
  497. vllm/model_executor/layers/quantization/bitblas.py +461 -0
  498. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  499. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  500. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
  501. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
  502. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
  503. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
  504. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  505. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  506. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
  507. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
  508. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  509. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
  510. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  511. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  512. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  513. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  514. vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
  515. vllm/model_executor/layers/quantization/experts_int8.py +196 -0
  516. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  517. vllm/model_executor/layers/quantization/fp8.py +906 -0
  518. vllm/model_executor/layers/quantization/gguf.py +565 -0
  519. vllm/model_executor/layers/quantization/gptq.py +278 -0
  520. vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
  521. vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
  522. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  523. vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
  524. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  525. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  526. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
  527. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
  528. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  529. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
  530. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  531. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
  532. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
  533. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  534. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
  535. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  536. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  537. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  538. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  539. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  540. vllm/model_executor/layers/quantization/marlin.py +261 -0
  541. vllm/model_executor/layers/quantization/modelopt.py +737 -0
  542. vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
  543. vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
  544. vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
  545. vllm/model_executor/layers/quantization/qqq.py +275 -0
  546. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  547. vllm/model_executor/layers/quantization/quark/quark.py +441 -0
  548. vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
  549. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  550. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  551. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
  552. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
  553. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  554. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  555. vllm/model_executor/layers/quantization/schema.py +86 -0
  556. vllm/model_executor/layers/quantization/torchao.py +161 -0
  557. vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
  558. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  559. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  560. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
  764. vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
  765. vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
  766. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  767. vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
  768. vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
  769. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
  770. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
  771. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  772. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  773. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
  774. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
  775. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
  776. vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
  777. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
  778. vllm/model_executor/layers/rejection_sampler.py +406 -0
  779. vllm/model_executor/layers/resampler.py +270 -0
  780. vllm/model_executor/layers/rotary_embedding.py +1862 -0
  781. vllm/model_executor/layers/sampler.py +1204 -0
  782. vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
  783. vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
  784. vllm/model_executor/layers/utils.py +95 -0
  785. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  786. vllm/model_executor/model_loader/__init__.py +76 -0
  787. vllm/model_executor/model_loader/base_loader.py +43 -0
  788. vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
  789. vllm/model_executor/model_loader/default_loader.py +282 -0
  790. vllm/model_executor/model_loader/dummy_loader.py +27 -0
  791. vllm/model_executor/model_loader/gguf_loader.py +120 -0
  792. vllm/model_executor/model_loader/neuron.py +476 -0
  793. vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
  794. vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
  795. vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
  796. vllm/model_executor/model_loader/tensorizer.py +600 -0
  797. vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
  798. vllm/model_executor/model_loader/tpu.py +112 -0
  799. vllm/model_executor/model_loader/utils.py +302 -0
  800. vllm/model_executor/model_loader/weight_utils.py +782 -0
  801. vllm/model_executor/models/__init__.py +28 -0
  802. vllm/model_executor/models/adapters.py +248 -0
  803. vllm/model_executor/models/aimv2.py +246 -0
  804. vllm/model_executor/models/arctic.py +559 -0
  805. vllm/model_executor/models/aria.py +657 -0
  806. vllm/model_executor/models/aya_vision.py +466 -0
  807. vllm/model_executor/models/baichuan.py +474 -0
  808. vllm/model_executor/models/bamba.py +543 -0
  809. vllm/model_executor/models/bart.py +938 -0
  810. vllm/model_executor/models/bert.py +523 -0
  811. vllm/model_executor/models/bert_with_rope.py +769 -0
  812. vllm/model_executor/models/blip.py +339 -0
  813. vllm/model_executor/models/blip2.py +718 -0
  814. vllm/model_executor/models/bloom.py +373 -0
  815. vllm/model_executor/models/chameleon.py +1136 -0
  816. vllm/model_executor/models/chatglm.py +478 -0
  817. vllm/model_executor/models/clip.py +407 -0
  818. vllm/model_executor/models/commandr.py +472 -0
  819. vllm/model_executor/models/constant_size_cache.py +137 -0
  820. vllm/model_executor/models/dbrx.py +472 -0
  821. vllm/model_executor/models/deepseek.py +486 -0
  822. vllm/model_executor/models/deepseek_mtp.py +269 -0
  823. vllm/model_executor/models/deepseek_v2.py +843 -0
  824. vllm/model_executor/models/deepseek_vl2.py +648 -0
  825. vllm/model_executor/models/eagle.py +260 -0
  826. vllm/model_executor/models/exaone.py +551 -0
  827. vllm/model_executor/models/fairseq2_llama.py +154 -0
  828. vllm/model_executor/models/falcon.py +510 -0
  829. vllm/model_executor/models/falcon_h1.py +685 -0
  830. vllm/model_executor/models/florence2.py +1103 -0
  831. vllm/model_executor/models/fuyu.py +389 -0
  832. vllm/model_executor/models/gemma.py +425 -0
  833. vllm/model_executor/models/gemma2.py +425 -0
  834. vllm/model_executor/models/gemma3.py +533 -0
  835. vllm/model_executor/models/gemma3_mm.py +709 -0
  836. vllm/model_executor/models/glm.py +23 -0
  837. vllm/model_executor/models/glm4.py +305 -0
  838. vllm/model_executor/models/glm4v.py +648 -0
  839. vllm/model_executor/models/gpt2.py +328 -0
  840. vllm/model_executor/models/gpt_bigcode.py +335 -0
  841. vllm/model_executor/models/gpt_j.py +339 -0
  842. vllm/model_executor/models/gpt_neox.py +332 -0
  843. vllm/model_executor/models/granite.py +493 -0
  844. vllm/model_executor/models/granite_speech.py +779 -0
  845. vllm/model_executor/models/granitemoe.py +437 -0
  846. vllm/model_executor/models/granitemoehybrid.py +586 -0
  847. vllm/model_executor/models/granitemoeshared.py +341 -0
  848. vllm/model_executor/models/gritlm.py +224 -0
  849. vllm/model_executor/models/grok1.py +546 -0
  850. vllm/model_executor/models/h2ovl.py +546 -0
  851. vllm/model_executor/models/idefics2_vision_model.py +389 -0
  852. vllm/model_executor/models/idefics3.py +776 -0
  853. vllm/model_executor/models/interfaces.py +572 -0
  854. vllm/model_executor/models/interfaces_base.py +164 -0
  855. vllm/model_executor/models/intern_vit.py +480 -0
  856. vllm/model_executor/models/internlm2.py +455 -0
  857. vllm/model_executor/models/internlm2_ve.py +147 -0
  858. vllm/model_executor/models/internvl.py +1418 -0
  859. vllm/model_executor/models/jais.py +373 -0
  860. vllm/model_executor/models/jamba.py +592 -0
  861. vllm/model_executor/models/kimi_vl.py +577 -0
  862. vllm/model_executor/models/llama.py +644 -0
  863. vllm/model_executor/models/llama4.py +532 -0
  864. vllm/model_executor/models/llama_eagle.py +165 -0
  865. vllm/model_executor/models/llama_eagle3.py +263 -0
  866. vllm/model_executor/models/llava.py +866 -0
  867. vllm/model_executor/models/llava_next.py +586 -0
  868. vllm/model_executor/models/llava_next_video.py +471 -0
  869. vllm/model_executor/models/llava_onevision.py +956 -0
  870. vllm/model_executor/models/mamba.py +273 -0
  871. vllm/model_executor/models/mamba2.py +308 -0
  872. vllm/model_executor/models/mamba_cache.py +76 -0
  873. vllm/model_executor/models/medusa.py +219 -0
  874. vllm/model_executor/models/mimo.py +192 -0
  875. vllm/model_executor/models/mimo_mtp.py +285 -0
  876. vllm/model_executor/models/minicpm.py +592 -0
  877. vllm/model_executor/models/minicpm3.py +230 -0
  878. vllm/model_executor/models/minicpm_eagle.py +391 -0
  879. vllm/model_executor/models/minicpmo.py +759 -0
  880. vllm/model_executor/models/minicpmv.py +1287 -0
  881. vllm/model_executor/models/minimax_cache.py +36 -0
  882. vllm/model_executor/models/minimax_text_01.py +1301 -0
  883. vllm/model_executor/models/minimax_vl_01.py +364 -0
  884. vllm/model_executor/models/mistral3.py +604 -0
  885. vllm/model_executor/models/mixtral.py +488 -0
  886. vllm/model_executor/models/mixtral_quant.py +453 -0
  887. vllm/model_executor/models/mllama.py +1624 -0
  888. vllm/model_executor/models/mllama4.py +938 -0
  889. vllm/model_executor/models/mlp_speculator.py +206 -0
  890. vllm/model_executor/models/modernbert.py +331 -0
  891. vllm/model_executor/models/module_mapping.py +72 -0
  892. vllm/model_executor/models/molmo.py +1568 -0
  893. vllm/model_executor/models/moonvit.py +630 -0
  894. vllm/model_executor/models/mpt.py +331 -0
  895. vllm/model_executor/models/nemotron.py +508 -0
  896. vllm/model_executor/models/nemotron_h.py +573 -0
  897. vllm/model_executor/models/nemotron_nas.py +484 -0
  898. vllm/model_executor/models/nvlm_d.py +216 -0
  899. vllm/model_executor/models/olmo.py +389 -0
  900. vllm/model_executor/models/olmo2.py +414 -0
  901. vllm/model_executor/models/olmoe.py +468 -0
  902. vllm/model_executor/models/opt.py +412 -0
  903. vllm/model_executor/models/orion.py +349 -0
  904. vllm/model_executor/models/ovis.py +567 -0
  905. vllm/model_executor/models/paligemma.py +398 -0
  906. vllm/model_executor/models/persimmon.py +344 -0
  907. vllm/model_executor/models/phi.py +356 -0
  908. vllm/model_executor/models/phi3.py +19 -0
  909. vllm/model_executor/models/phi3_small.py +465 -0
  910. vllm/model_executor/models/phi3v.py +723 -0
  911. vllm/model_executor/models/phi4mm.py +1246 -0
  912. vllm/model_executor/models/phi4mm_audio.py +1233 -0
  913. vllm/model_executor/models/phi4mm_utils.py +1884 -0
  914. vllm/model_executor/models/phimoe.py +665 -0
  915. vllm/model_executor/models/pixtral.py +1316 -0
  916. vllm/model_executor/models/plamo2.py +738 -0
  917. vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
  918. vllm/model_executor/models/qwen.py +362 -0
  919. vllm/model_executor/models/qwen2.py +497 -0
  920. vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
  921. vllm/model_executor/models/qwen2_5_vl.py +1166 -0
  922. vllm/model_executor/models/qwen2_audio.py +410 -0
  923. vllm/model_executor/models/qwen2_moe.py +540 -0
  924. vllm/model_executor/models/qwen2_rm.py +132 -0
  925. vllm/model_executor/models/qwen2_vl.py +1405 -0
  926. vllm/model_executor/models/qwen3.py +321 -0
  927. vllm/model_executor/models/qwen3_moe.py +535 -0
  928. vllm/model_executor/models/qwen_vl.py +785 -0
  929. vllm/model_executor/models/registry.py +622 -0
  930. vllm/model_executor/models/roberta.py +276 -0
  931. vllm/model_executor/models/siglip.py +524 -0
  932. vllm/model_executor/models/skyworkr1v.py +951 -0
  933. vllm/model_executor/models/smolvlm.py +52 -0
  934. vllm/model_executor/models/solar.py +506 -0
  935. vllm/model_executor/models/stablelm.py +343 -0
  936. vllm/model_executor/models/starcoder2.py +356 -0
  937. vllm/model_executor/models/tarsier.py +643 -0
  938. vllm/model_executor/models/telechat2.py +140 -0
  939. vllm/model_executor/models/teleflm.py +79 -0
  940. vllm/model_executor/models/transformers.py +508 -0
  941. vllm/model_executor/models/ultravox.py +656 -0
  942. vllm/model_executor/models/utils.py +731 -0
  943. vllm/model_executor/models/vision.py +147 -0
  944. vllm/model_executor/models/whisper.py +747 -0
  945. vllm/model_executor/models/zamba2.py +1009 -0
  946. vllm/model_executor/parameter.py +459 -0
  947. vllm/model_executor/pooling_metadata.py +72 -0
  948. vllm/model_executor/sampling_metadata.py +597 -0
  949. vllm/model_executor/utils.py +77 -0
  950. vllm/multimodal/__init__.py +33 -0
  951. vllm/multimodal/audio.py +106 -0
  952. vllm/multimodal/base.py +219 -0
  953. vllm/multimodal/hasher.py +118 -0
  954. vllm/multimodal/image.py +97 -0
  955. vllm/multimodal/inputs.py +876 -0
  956. vllm/multimodal/parse.py +461 -0
  957. vllm/multimodal/processing.py +1895 -0
  958. vllm/multimodal/profiling.py +258 -0
  959. vllm/multimodal/registry.py +331 -0
  960. vllm/multimodal/utils.py +436 -0
  961. vllm/multimodal/video.py +198 -0
  962. vllm/outputs.py +512 -0
  963. vllm/platforms/__init__.py +291 -0
  964. vllm/platforms/cpu.py +266 -0
  965. vllm/platforms/cuda.py +526 -0
  966. vllm/platforms/hpu.py +106 -0
  967. vllm/platforms/interface.py +538 -0
  968. vllm/platforms/neuron.py +150 -0
  969. vllm/platforms/rocm.py +435 -0
  970. vllm/platforms/tpu.py +216 -0
  971. vllm/platforms/xpu.py +156 -0
  972. vllm/plugins/__init__.py +94 -0
  973. vllm/plugins/lora_resolvers/README.md +15 -0
  974. vllm/plugins/lora_resolvers/__init__.py +0 -0
  975. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  976. vllm/pooling_params.py +54 -0
  977. vllm/profiler/__init__.py +0 -0
  978. vllm/profiler/layerwise_profile.py +375 -0
  979. vllm/profiler/utils.py +148 -0
  980. vllm/prompt_adapter/__init__.py +0 -0
  981. vllm/prompt_adapter/layers.py +83 -0
  982. vllm/prompt_adapter/models.py +358 -0
  983. vllm/prompt_adapter/request.py +37 -0
  984. vllm/prompt_adapter/utils.py +98 -0
  985. vllm/prompt_adapter/worker_manager.py +179 -0
  986. vllm/py.typed +2 -0
  987. vllm/reasoning/__init__.py +15 -0
  988. vllm/reasoning/abs_reasoning_parsers.py +192 -0
  989. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  990. vllm/reasoning/granite_reasoning_parser.py +363 -0
  991. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  992. vllm/sampling_params.py +602 -0
  993. vllm/scalar_type.py +347 -0
  994. vllm/scripts.py +15 -0
  995. vllm/sequence.py +1568 -0
  996. vllm/spec_decode/__init__.py +0 -0
  997. vllm/spec_decode/batch_expansion.py +506 -0
  998. vllm/spec_decode/draft_model_runner.py +349 -0
  999. vllm/spec_decode/interfaces.py +99 -0
  1000. vllm/spec_decode/medusa_worker.py +138 -0
  1001. vllm/spec_decode/metrics.py +213 -0
  1002. vllm/spec_decode/mlp_speculator_worker.py +94 -0
  1003. vllm/spec_decode/mqa_scorer.py +160 -0
  1004. vllm/spec_decode/multi_step_worker.py +423 -0
  1005. vllm/spec_decode/ngram_worker.py +196 -0
  1006. vllm/spec_decode/proposer_worker_base.py +59 -0
  1007. vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
  1008. vllm/spec_decode/spec_decode_worker.py +1326 -0
  1009. vllm/spec_decode/target_model_runner.py +45 -0
  1010. vllm/spec_decode/top1_proposer.py +275 -0
  1011. vllm/spec_decode/util.py +277 -0
  1012. vllm/test_utils.py +130 -0
  1013. vllm/third_party/__init__.py +0 -0
  1014. vllm/third_party/pynvml.py +6140 -0
  1015. vllm/tracing.py +131 -0
  1016. vllm/transformers_utils/__init__.py +24 -0
  1017. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1018. vllm/transformers_utils/chat_templates/registry.py +60 -0
  1019. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1020. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1021. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1022. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1023. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1024. vllm/transformers_utils/config.py +887 -0
  1025. vllm/transformers_utils/configs/__init__.py +61 -0
  1026. vllm/transformers_utils/configs/arctic.py +207 -0
  1027. vllm/transformers_utils/configs/chatglm.py +72 -0
  1028. vllm/transformers_utils/configs/cohere2.py +195 -0
  1029. vllm/transformers_utils/configs/dbrx.py +280 -0
  1030. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1031. vllm/transformers_utils/configs/eagle.py +85 -0
  1032. vllm/transformers_utils/configs/exaone.py +190 -0
  1033. vllm/transformers_utils/configs/falcon.py +90 -0
  1034. vllm/transformers_utils/configs/h2ovl.py +16 -0
  1035. vllm/transformers_utils/configs/internvl.py +54 -0
  1036. vllm/transformers_utils/configs/jais.py +238 -0
  1037. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1038. vllm/transformers_utils/configs/medusa.py +63 -0
  1039. vllm/transformers_utils/configs/minimax_text_01.py +70 -0
  1040. vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
  1041. vllm/transformers_utils/configs/mllama.py +31 -0
  1042. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1043. vllm/transformers_utils/configs/moonvit.py +33 -0
  1044. vllm/transformers_utils/configs/mpt.py +180 -0
  1045. vllm/transformers_utils/configs/nemotron.py +205 -0
  1046. vllm/transformers_utils/configs/nemotron_h.py +258 -0
  1047. vllm/transformers_utils/configs/nvlm_d.py +15 -0
  1048. vllm/transformers_utils/configs/ovis.py +184 -0
  1049. vllm/transformers_utils/configs/skyworkr1v.py +54 -0
  1050. vllm/transformers_utils/configs/solar.py +247 -0
  1051. vllm/transformers_utils/configs/telechat2.py +64 -0
  1052. vllm/transformers_utils/configs/ultravox.py +108 -0
  1053. vllm/transformers_utils/detokenizer.py +168 -0
  1054. vllm/transformers_utils/detokenizer_utils.py +189 -0
  1055. vllm/transformers_utils/processor.py +221 -0
  1056. vllm/transformers_utils/processors/__init__.py +8 -0
  1057. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1058. vllm/transformers_utils/processors/ovis.py +420 -0
  1059. vllm/transformers_utils/s3_utils.py +162 -0
  1060. vllm/transformers_utils/tokenizer.py +302 -0
  1061. vllm/transformers_utils/tokenizer_base.py +149 -0
  1062. vllm/transformers_utils/tokenizer_group.py +120 -0
  1063. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1064. vllm/transformers_utils/tokenizers/mistral.py +493 -0
  1065. vllm/transformers_utils/utils.py +99 -0
  1066. vllm/triton_utils/__init__.py +14 -0
  1067. vllm/triton_utils/importing.py +50 -0
  1068. vllm/usage/__init__.py +0 -0
  1069. vllm/usage/usage_lib.py +256 -0
  1070. vllm/utils.py +2910 -0
  1071. vllm/v1/__init__.py +0 -0
  1072. vllm/v1/attention/__init__.py +0 -0
  1073. vllm/v1/attention/backends/__init__.py +0 -0
  1074. vllm/v1/attention/backends/cpu_attn.py +163 -0
  1075. vllm/v1/attention/backends/flash_attn.py +869 -0
  1076. vllm/v1/attention/backends/flashinfer.py +651 -0
  1077. vllm/v1/attention/backends/flex_attention.py +477 -0
  1078. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1079. vllm/v1/attention/backends/mla/common.py +931 -0
  1080. vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
  1081. vllm/v1/attention/backends/mla/flashmla.py +152 -0
  1082. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
  1083. vllm/v1/attention/backends/mla/triton_mla.py +120 -0
  1084. vllm/v1/attention/backends/pallas.py +240 -0
  1085. vllm/v1/attention/backends/triton_attn.py +285 -0
  1086. vllm/v1/attention/backends/utils.py +52 -0
  1087. vllm/v1/core/__init__.py +0 -0
  1088. vllm/v1/core/block_pool.py +349 -0
  1089. vllm/v1/core/encoder_cache_manager.py +150 -0
  1090. vllm/v1/core/kv_cache_coordinator.py +363 -0
  1091. vllm/v1/core/kv_cache_manager.py +392 -0
  1092. vllm/v1/core/kv_cache_utils.py +996 -0
  1093. vllm/v1/core/sched/__init__.py +0 -0
  1094. vllm/v1/core/sched/interface.py +150 -0
  1095. vllm/v1/core/sched/output.py +154 -0
  1096. vllm/v1/core/sched/scheduler.py +1044 -0
  1097. vllm/v1/core/sched/utils.py +23 -0
  1098. vllm/v1/core/single_type_kv_cache_manager.py +403 -0
  1099. vllm/v1/engine/__init__.py +173 -0
  1100. vllm/v1/engine/async_llm.py +558 -0
  1101. vllm/v1/engine/coordinator.py +253 -0
  1102. vllm/v1/engine/core.py +961 -0
  1103. vllm/v1/engine/core_client.py +1129 -0
  1104. vllm/v1/engine/detokenizer.py +261 -0
  1105. vllm/v1/engine/exceptions.py +17 -0
  1106. vllm/v1/engine/llm_engine.py +317 -0
  1107. vllm/v1/engine/logprobs.py +199 -0
  1108. vllm/v1/engine/mm_input_cache.py +91 -0
  1109. vllm/v1/engine/output_processor.py +428 -0
  1110. vllm/v1/engine/parallel_sampling.py +133 -0
  1111. vllm/v1/engine/processor.py +407 -0
  1112. vllm/v1/executor/__init__.py +0 -0
  1113. vllm/v1/executor/abstract.py +113 -0
  1114. vllm/v1/executor/multiproc_executor.py +537 -0
  1115. vllm/v1/executor/ray_distributed_executor.py +62 -0
  1116. vllm/v1/kv_cache_interface.py +194 -0
  1117. vllm/v1/metrics/__init__.py +0 -0
  1118. vllm/v1/metrics/loggers.py +523 -0
  1119. vllm/v1/metrics/prometheus.py +82 -0
  1120. vllm/v1/metrics/ray_wrappers.py +131 -0
  1121. vllm/v1/metrics/reader.py +246 -0
  1122. vllm/v1/metrics/stats.py +239 -0
  1123. vllm/v1/outputs.py +116 -0
  1124. vllm/v1/request.py +193 -0
  1125. vllm/v1/sample/__init__.py +0 -0
  1126. vllm/v1/sample/metadata.py +44 -0
  1127. vllm/v1/sample/ops/__init__.py +0 -0
  1128. vllm/v1/sample/ops/bad_words.py +39 -0
  1129. vllm/v1/sample/ops/penalties.py +59 -0
  1130. vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
  1131. vllm/v1/sample/rejection_sampler.py +631 -0
  1132. vllm/v1/sample/sampler.py +286 -0
  1133. vllm/v1/sample/tpu/__init__.py +0 -0
  1134. vllm/v1/sample/tpu/metadata.py +124 -0
  1135. vllm/v1/sample/tpu/sampler.py +145 -0
  1136. vllm/v1/serial_utils.py +315 -0
  1137. vllm/v1/spec_decode/__init__.py +0 -0
  1138. vllm/v1/spec_decode/eagle.py +432 -0
  1139. vllm/v1/spec_decode/medusa.py +62 -0
  1140. vllm/v1/spec_decode/metadata.py +62 -0
  1141. vllm/v1/spec_decode/metrics.py +178 -0
  1142. vllm/v1/spec_decode/ngram_proposer.py +132 -0
  1143. vllm/v1/spec_decode/utils.py +46 -0
  1144. vllm/v1/structured_output/__init__.py +222 -0
  1145. vllm/v1/structured_output/backend_guidance.py +245 -0
  1146. vllm/v1/structured_output/backend_types.py +134 -0
  1147. vllm/v1/structured_output/backend_xgrammar.py +318 -0
  1148. vllm/v1/structured_output/request.py +86 -0
  1149. vllm/v1/structured_output/utils.py +175 -0
  1150. vllm/v1/utils.py +743 -0
  1151. vllm/v1/worker/__init__.py +0 -0
  1152. vllm/v1/worker/block_table.py +142 -0
  1153. vllm/v1/worker/cpu_model_runner.py +86 -0
  1154. vllm/v1/worker/cpu_worker.py +152 -0
  1155. vllm/v1/worker/gpu_input_batch.py +681 -0
  1156. vllm/v1/worker/gpu_model_runner.py +2320 -0
  1157. vllm/v1/worker/gpu_worker.py +393 -0
  1158. vllm/v1/worker/lora_model_runner_mixin.py +173 -0
  1159. vllm/v1/worker/tpu_model_runner.py +1673 -0
  1160. vllm/v1/worker/tpu_worker.py +299 -0
  1161. vllm/v1/worker/utils.py +111 -0
  1162. vllm/v1/worker/worker_base.py +65 -0
  1163. vllm/version.py +41 -0
  1164. vllm/vllm_flash_attn/.gitkeep +0 -0
  1165. vllm/worker/__init__.py +0 -0
  1166. vllm/worker/cache_engine.py +145 -0
  1167. vllm/worker/cpu_enc_dec_model_runner.py +326 -0
  1168. vllm/worker/cpu_model_runner.py +671 -0
  1169. vllm/worker/cpu_pooling_model_runner.py +125 -0
  1170. vllm/worker/cpu_worker.py +450 -0
  1171. vllm/worker/enc_dec_model_runner.py +555 -0
  1172. vllm/worker/hpu_model_runner.py +2320 -0
  1173. vllm/worker/hpu_worker.py +484 -0
  1174. vllm/worker/model_runner.py +2178 -0
  1175. vllm/worker/model_runner_base.py +282 -0
  1176. vllm/worker/multi_step_hpu_worker.py +123 -0
  1177. vllm/worker/multi_step_model_runner.py +911 -0
  1178. vllm/worker/multi_step_neuron_model_runner.py +84 -0
  1179. vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
  1180. vllm/worker/multi_step_tpu_worker.py +108 -0
  1181. vllm/worker/multi_step_worker.py +197 -0
  1182. vllm/worker/neuron_model_runner.py +460 -0
  1183. vllm/worker/neuron_worker.py +193 -0
  1184. vllm/worker/neuronx_distributed_model_runner.py +294 -0
  1185. vllm/worker/pooling_model_runner.py +211 -0
  1186. vllm/worker/tpu_model_runner.py +909 -0
  1187. vllm/worker/tpu_worker.py +337 -0
  1188. vllm/worker/utils.py +53 -0
  1189. vllm/worker/worker.py +577 -0
  1190. vllm/worker/worker_base.py +646 -0
  1191. vllm/worker/xpu_model_runner.py +606 -0
  1192. vllm/worker/xpu_worker.py +186 -0
  1193. vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
  1194. vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
  1195. vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
  1196. vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
  1197. vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1895 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import json
4
+ import sys
5
+ from abc import ABC, abstractmethod
6
+ from collections import defaultdict
7
+ from collections.abc import (Callable, Generator, ItemsView, Iterable, Mapping,
8
+ Sequence)
9
+ from dataclasses import dataclass, field
10
+ from enum import Enum
11
+ from functools import lru_cache
12
+ from typing import (TYPE_CHECKING, Generic, NamedTuple, Optional, Protocol,
13
+ TypeVar, Union, cast)
14
+
15
+ import regex as re
16
+ import torch
17
+ from typing_extensions import assert_never
18
+
19
+ from vllm.inputs import InputProcessingContext
20
+ from vllm.jsontree import json_map_leaves, json_reduce_leaves
21
+ from vllm.logger import init_logger
22
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, decode_tokens,
23
+ encode_tokens)
24
+ from vllm.utils import GiB_bytes, LRUCache, flatten_2d_lists, full_groupby
25
+
26
+ from .hasher import MultiModalHasher
27
+ from .inputs import (MultiModalDataDict, MultiModalEncDecInputs,
28
+ MultiModalFieldConfig, MultiModalInputs, MultiModalKwargs,
29
+ MultiModalKwargsItem, NestedTensors, PlaceholderRange)
30
+ from .parse import (DictEmbeddingItems, EmbeddingItems, MultiModalDataItems,
31
+ MultiModalDataParser)
32
+
33
+ if TYPE_CHECKING:
34
+ from transformers.configuration_utils import PretrainedConfig
35
+ from transformers.feature_extraction_utils import BatchFeature
36
+ from transformers.processing_utils import ProcessorMixin
37
+
38
+ from .profiling import BaseDummyInputsBuilder
39
+
40
+ logger = init_logger(__name__)
41
+
42
+ _S = TypeVar("_S", str, list[int])
43
+
44
+ PromptSeq = Union[str, list[int]]
45
+ """A token sequence (list of token IDs) or text."""
46
+
47
+
48
+ @dataclass
49
+ class PromptIndex:
50
+ """Resolves to an index in the prompt."""
51
+ get_match_index: Callable[[AnyTokenizer, PromptSeq], Optional[int]]
52
+
53
+
54
+ class PromptIndexTargets:
55
+
56
+ @staticmethod
57
+ def start() -> PromptIndex:
58
+ """
59
+ Resolves to the start of the prompt (before the first token).
60
+
61
+ This results in a match even if the prompt is empty.
62
+ """
63
+ return PromptIndex(lambda tok, prompt: 0)
64
+
65
+ @staticmethod
66
+ def prefix(seq: PromptSeq) -> PromptIndex:
67
+ """
68
+ Resolves to a location in the prompt after the given prefix.
69
+ """
70
+
71
+ def get_match_index(
72
+ tokenizer: AnyTokenizer,
73
+ prompt: PromptSeq,
74
+ ) -> Optional[int]:
75
+ prefix = seq
76
+
77
+ if isinstance(prompt, str):
78
+ if not isinstance(prefix, str):
79
+ # Make both `str`
80
+ prefix = decode_tokens(tokenizer, prefix)
81
+ else:
82
+ if isinstance(prefix, str):
83
+ # Make both `list[int]`
84
+ prefix = encode_tokens(tokenizer,
85
+ prefix,
86
+ add_special_tokens=False)
87
+
88
+ match_idx = len(prefix)
89
+ return match_idx if prompt[:match_idx] == prefix else None
90
+
91
+ return PromptIndex(get_match_index)
92
+
93
+ @staticmethod
94
+ def end() -> PromptIndex:
95
+ """
96
+ Resolves to the end of the prompt (after the last token).
97
+
98
+ This results in a match even if the prompt is empty.
99
+ """
100
+ return PromptIndex(lambda tok, prompt: len(prompt))
101
+
102
+
103
+ PromptTarget = Union[PromptSeq, PromptIndex]
104
+ """
105
+ The token sequence or text to update.
106
+ """
107
+
108
+
109
+ @dataclass
110
+ class PromptUpdateDetails(Generic[_S]):
111
+ """Details about the token sequence or text that are part of the update."""
112
+
113
+ full: _S
114
+ """The full content."""
115
+
116
+ is_embed: Optional[Callable[["_BoundPromptSequence"], torch.Tensor]] = None
117
+ """
118
+ Given [`full`][vllm.multimodal.processing.PromptUpdateDetails.full],
119
+ return a boolean mask of shape `(len(full),)` indicating which positions
120
+ of `full` to assign embeddings to.
121
+
122
+ `None` (default) means to assign embeddings to all positions of `full`.
123
+
124
+ The embeddings are obtained by calling
125
+ [`SupportsMultiModal.get_multimodal_embeddings`][vllm.model_executor.models.interfaces.SupportsMultiModal.get_multimodal_embeddings].
126
+ """
127
+
128
+ @staticmethod
129
+ def from_seq(seq: _S) -> "PromptUpdateDetails[_S]":
130
+ return PromptUpdateDetails(full=seq)
131
+
132
+ @staticmethod
133
+ def select_text(
134
+ seq: _S,
135
+ embed_text: str,
136
+ ) -> "PromptUpdateDetails[_S]":
137
+
138
+ def is_embed(full: "_BoundPromptSequence") -> torch.Tensor:
139
+ embed_token_ids = encode_tokens(full.tokenizer, embed_text)
140
+
141
+ return torch.isin(
142
+ torch.tensor(full.token_ids),
143
+ torch.tensor(embed_token_ids),
144
+ )
145
+
146
+ return PromptUpdateDetails(full=seq, is_embed=is_embed)
147
+
148
+ @staticmethod
149
+ def select_token_id(
150
+ seq: _S,
151
+ embed_token_id: int,
152
+ ) -> "PromptUpdateDetails[_S]":
153
+ return PromptUpdateDetails(
154
+ full=seq,
155
+ is_embed=lambda f: torch.tensor(f.token_ids) == embed_token_id,
156
+ )
157
+
158
+
159
+ PromptUpdateInfo = Union[PromptSeq, PromptUpdateDetails]
160
+ """
161
+ The token sequence or text that are part of the update.
162
+
163
+ If only part of the content corresponds to feature placeholders, you can
164
+ use [`PromptUpdateDetails`][vllm.multimodal.processing.PromptUpdateDetails] to
165
+ specify which part.
166
+ """
167
+
168
+ PromptUpdateContent = Union[Callable[[int], PromptUpdateInfo],
169
+ PromptUpdateInfo]
170
+ """
171
+ Given the index of the processed item within
172
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
173
+ output the corresponding token sequence (or text).
174
+
175
+ For convenience, you can directly pass in the token sequence (or text)
176
+ instead of a function if it does not depend on the input.
177
+ """
178
+
179
+
180
+ class UpdateMode(str, Enum):
181
+ INSERT = "insert"
182
+ REPLACE = "replace"
183
+
184
+
185
+ @dataclass
186
+ class PromptUpdate(ABC):
187
+ """
188
+ Defines how to update a prompt with placeholder tokens.
189
+ """
190
+
191
+ modality: str
192
+ """The modality for which the update is made."""
193
+
194
+ target: PromptTarget
195
+ """The token sequence (or text) to update."""
196
+
197
+ @property
198
+ @abstractmethod
199
+ def content(self) -> PromptUpdateContent:
200
+ """The placeholder tokens that are part of the update."""
201
+ raise NotImplementedError
202
+
203
+ @property
204
+ @abstractmethod
205
+ def mode(self) -> UpdateMode:
206
+ """Defines how to update the prompt."""
207
+ raise NotImplementedError
208
+
209
+ def bind(self, tokenizer: AnyTokenizer) -> "BoundPromptUpdate":
210
+ return BoundPromptUpdate(
211
+ _origin=self,
212
+ tokenizer=tokenizer,
213
+ )
214
+
215
+
216
+ @dataclass
217
+ class PromptInsertion(PromptUpdate):
218
+ """
219
+ Defines how to insert placeholder tokens into a prompt.
220
+
221
+ Example:
222
+
223
+ For each image, insert a number of ``<image>`` feature placeholders
224
+ equal to the feature size of the vision encoder after the ``<s>`` token:
225
+
226
+ ```python
227
+ PromptInsertion(
228
+ modality="image",
229
+ target="<s>",
230
+ insertion="<image>" * image_feature_size,
231
+ )
232
+ ```
233
+
234
+ Insert these tokens at the start of the prompt:
235
+
236
+ ```python
237
+ PromptInsertion(
238
+ modality="image",
239
+ target=PromptIndexTargets.start(),
240
+ insertion="<image>" * image_feature_size,
241
+ )
242
+ ```
243
+
244
+ Insert these tokens after a prefix ``Images:``:
245
+
246
+ ```python
247
+ PromptInsertion(
248
+ modality="image",
249
+ target=PromptIndexTargets.prefix("Images:"),
250
+ insertion="<image>" * image_feature_size,
251
+ )
252
+ ```
253
+
254
+ Insert these tokens at the end of the prompt:
255
+
256
+ ```python
257
+ PromptInsertion(
258
+ modality="image",
259
+ target=PromptIndexTargets.end(),
260
+ insertion="<image>" * image_feature_size,
261
+ )
262
+ ```
263
+ """
264
+
265
+ insertion: PromptUpdateContent = field(repr=False)
266
+ """
267
+ Given the index of the processed item within
268
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
269
+ output the token sequence (or text) to insert right after
270
+ [`target`][vllm.multimodal.processing.PromptUpdate.target].
271
+
272
+ For convenience, you can directly pass in the token sequence (or text)
273
+ instead of a function if it does not depend on the input.
274
+ """
275
+
276
+ @property
277
+ def content(self) -> PromptUpdateContent:
278
+ return self.insertion
279
+
280
+ @property
281
+ def mode(self) -> UpdateMode:
282
+ return UpdateMode.INSERT
283
+
284
+
285
+ @dataclass
286
+ class PromptReplacement(PromptUpdate):
287
+ """
288
+ Defines how to replace portions of an input prompt with placeholder tokens.
289
+
290
+ Example:
291
+
292
+ For each image, replace one ``<image>`` input placeholder in the prompt
293
+ with a number of ``<image>`` feature placeholders
294
+ equal to the feature size of the vision encoder:
295
+
296
+ ```python
297
+ PromptReplacement(
298
+ modality="image",
299
+ target="<image>",
300
+ replacement="<image>" * image_feature_size,
301
+ )
302
+ ```
303
+
304
+ As above, but further pad the feature placeholders with ``<image_bos>``
305
+ and `<image_eos>``, which are not supposed to be passed to the vision
306
+ encoder:
307
+
308
+ ```python
309
+ PromptReplacement(
310
+ modality="image",
311
+ target="<image>",
312
+ replacement=PromptUpdateDetails(
313
+ full="".join([
314
+ "<image_bos>",
315
+ "<image>" * image_feature_size,
316
+ "<image_eos>",
317
+ ]),
318
+ features="<image>" * image_feature_size,
319
+ ),
320
+ )
321
+ ```
322
+
323
+ To avoid unnecessary tokenization during prompt replacement,
324
+ we recommended passing token sequences instead of text:
325
+
326
+ ```python
327
+ PromptReplacement(
328
+ modality="image",
329
+ target=[image_token_id],
330
+ replacement=PromptUpdateDetails(
331
+ full=([image_bos_id] + [image_token_id] * image_feature_size
332
+ + [image_eos_id]),
333
+ features=[image_token_id] * image_feature_size,
334
+ ),
335
+ )
336
+ ```
337
+ """
338
+
339
+ replacement: PromptUpdateContent = field(repr=False)
340
+ """
341
+ Given the index of the processed item within
342
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
343
+ output the token sequence (or text) to replace
344
+ [`target`][vllm.multimodal.processing.PromptUpdate.target].
345
+
346
+ For convenience, you can directly pass in the token sequence (or text)
347
+ instead of a function if it does not depend on the input.
348
+ """
349
+
350
+ @property
351
+ def content(self) -> PromptUpdateContent:
352
+ return self.replacement
353
+
354
+ @property
355
+ def mode(self) -> UpdateMode:
356
+ return UpdateMode.REPLACE
357
+
358
+
359
+ @lru_cache(maxsize=2048)
360
+ def _cached_encode(
361
+ tokenizer: AnyTokenizer,
362
+ text: str,
363
+ *,
364
+ add_special_tokens: Optional[bool] = None,
365
+ ) -> list[int]:
366
+ return encode_tokens(tokenizer,
367
+ text,
368
+ add_special_tokens=add_special_tokens)
369
+
370
+
371
+ @lru_cache(maxsize=2048)
372
+ def _cached_decode(
373
+ tokenizer: AnyTokenizer,
374
+ token_ids: tuple[int, ...],
375
+ *,
376
+ skip_special_tokens: Optional[bool] = None,
377
+ ) -> str:
378
+ return decode_tokens(tokenizer,
379
+ list(token_ids),
380
+ skip_special_tokens=skip_special_tokens)
381
+
382
+
383
+ class _HasModalityAttr(Protocol):
384
+ modality: str
385
+
386
+
387
+ class _HasModalityProp(Protocol):
388
+
389
+ @property
390
+ def modality(self) -> str:
391
+ ...
392
+
393
+
394
+ _M = TypeVar("_M", bound=Union[_HasModalityAttr, _HasModalityProp])
395
+
396
+
397
+ def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]:
398
+ """Convenience function to apply [`full_groupby`][vllm.utils.full_groupby]
399
+ based on modality."""
400
+ return full_groupby(values, key=lambda x: x.modality)
401
+
402
+
403
+ @dataclass
404
+ class _BoundPromptSequence:
405
+ """
406
+ A [`_PromptSeq`][vllm.multimodal.processing.PromptSeq] bound
407
+ to a tokenizer to automatically
408
+ convert between token sequence and text representations.
409
+ """
410
+ tokenizer: AnyTokenizer = field(repr=False)
411
+
412
+ _text: Optional[str]
413
+ _token_ids: Optional[list[int]]
414
+
415
+ @staticmethod
416
+ def from_seq(
417
+ tokenizer: AnyTokenizer,
418
+ seq: PromptSeq,
419
+ ) -> "_BoundPromptSequence":
420
+ return _BoundPromptSequence(
421
+ tokenizer=tokenizer,
422
+ _text=seq if isinstance(seq, str) else None,
423
+ _token_ids=seq if isinstance(seq, list) else None,
424
+ )
425
+
426
+ def __post_init__(self) -> None:
427
+ if self._text is None and self._token_ids is None:
428
+ raise ValueError("At least one of 'text' and 'token_ids' must be "
429
+ "specified")
430
+
431
+ @property
432
+ def text(self) -> str:
433
+ if self._text is None:
434
+ assert self._token_ids is not None
435
+ self._text = _cached_decode(self.tokenizer, tuple(self._token_ids))
436
+
437
+ return self._text
438
+
439
+ @property
440
+ def token_ids(self) -> list[int]:
441
+ if self._token_ids is None:
442
+ assert self._text is not None
443
+ self._token_ids = _cached_encode(self.tokenizer,
444
+ self._text,
445
+ add_special_tokens=False)
446
+
447
+ return self._token_ids
448
+
449
+
450
+ @dataclass
451
+ class _BoundPromptContent:
452
+ full: _BoundPromptSequence
453
+ is_embed: Optional[Callable[["_BoundPromptSequence"], torch.Tensor]]
454
+
455
+
456
+ @dataclass
457
+ class BoundPromptUpdate:
458
+ """
459
+ A [`PromptUpdate`][vllm.multimodal.processing.PromptUpdate] bound
460
+ to a tokenizer to automatically convert
461
+ [`target`][vllm.multimodal.processing.PromptUpdate.target] and the result of
462
+ [`get_content`][vllm.multimodal.processing.BoundPromptUpdate.get_content]
463
+ between token sequence and text representations.
464
+ """
465
+ _origin: PromptUpdate
466
+ tokenizer: AnyTokenizer = field(repr=False)
467
+
468
+ def __post_init__(self) -> None:
469
+ self._content_cache = dict[int, _BoundPromptContent]()
470
+
471
+ @property
472
+ def modality(self) -> str:
473
+ return self._origin.modality
474
+
475
+ @property
476
+ def target(self) -> Union[_BoundPromptSequence, PromptIndex]:
477
+ """The token sequence (or text) to update."""
478
+ target = self._origin.target
479
+
480
+ if isinstance(target, PromptIndex):
481
+ return target
482
+
483
+ return _BoundPromptSequence.from_seq(self.tokenizer, target)
484
+
485
+ @property
486
+ def content(self) -> PromptUpdateContent:
487
+ """The placeholder tokens that are part of the update."""
488
+ return self._origin.content
489
+
490
+ @property
491
+ def mode(self) -> UpdateMode:
492
+ """Defines how to update the prompt."""
493
+ return self._origin.mode
494
+
495
+ def get_content(self, item_idx: int) -> _BoundPromptContent:
496
+ """
497
+ Given the index of the processed item within
498
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
499
+ output the token sequence (or text) to update.
500
+ """
501
+ content = self.content
502
+ if callable(content):
503
+ cache_key = item_idx
504
+ if cache_key in self._content_cache:
505
+ return self._content_cache[cache_key]
506
+
507
+ content = content(item_idx)
508
+ else:
509
+ cache_key = None
510
+
511
+ if not isinstance(content, PromptUpdateDetails):
512
+ content = PromptUpdateDetails.from_seq(content)
513
+
514
+ bound_full = _BoundPromptSequence.from_seq(self.tokenizer,
515
+ content.full)
516
+ bound_content = _BoundPromptContent(full=bound_full,
517
+ is_embed=content.is_embed)
518
+
519
+ if cache_key is not None:
520
+ self._content_cache[cache_key] = bound_content
521
+
522
+ return bound_content
523
+
524
+
525
+ class _TokenMatch(NamedTuple):
526
+ start_idx: int
527
+ end_idx: int
528
+
529
+
530
+ def iter_token_matches(
531
+ token_ids: list[int],
532
+ match_ids: list[int],
533
+ ) -> Generator[_TokenMatch]:
534
+ """
535
+ Yield each occurrence of `match_ids` in `token_ids`.
536
+
537
+ Note that empty matches are ignored.
538
+ """
539
+ prompt_len = len(token_ids)
540
+ match_len = len(match_ids)
541
+
542
+ if match_len == 0:
543
+ return
544
+
545
+ start_idx = 0
546
+ while start_idx < prompt_len - match_len + 1:
547
+ end_idx = start_idx + match_len
548
+
549
+ if token_ids[start_idx:end_idx] == match_ids:
550
+ yield _TokenMatch(start_idx=start_idx, end_idx=end_idx)
551
+
552
+ # Exclude overlapping matches
553
+ start_idx = end_idx
554
+ else:
555
+ start_idx += 1
556
+
557
+
558
+ def replace_token_matches(
559
+ token_ids: list[int],
560
+ match_ids: list[int],
561
+ new_ids: list[int],
562
+ ) -> list[int]:
563
+ """
564
+ Replace each occurrence of `match_ids` in `token_ids`
565
+ with `new_ids`.
566
+
567
+ Note that empty matches are ignored.
568
+ """
569
+ out_seqs = list[list[int]]()
570
+ prev_end_idx = 0
571
+
572
+ for match in iter_token_matches(token_ids, match_ids):
573
+ start_idx = match.start_idx
574
+ end_idx = match.end_idx
575
+
576
+ out_seqs.append(token_ids[prev_end_idx:start_idx])
577
+ out_seqs.append(new_ids)
578
+ prev_end_idx = end_idx
579
+
580
+ out_seqs.append(token_ids[prev_end_idx:])
581
+
582
+ return flatten_2d_lists(out_seqs)
583
+
584
+
585
+ @dataclass(repr=False)
586
+ class PromptTargetMatch(ABC):
587
+ _origin: BoundPromptUpdate
588
+
589
+ @property
590
+ def modality(self) -> str:
591
+ return self._origin.modality
592
+
593
+ @property
594
+ @abstractmethod
595
+ def start_idx(self) -> int:
596
+ raise NotImplementedError
597
+
598
+ @property
599
+ @abstractmethod
600
+ def end_idx(self) -> int:
601
+ raise NotImplementedError
602
+
603
+ def __repr__(self) -> str:
604
+ return (f"{type(self).__name__}(modality={self.modality!r}, "
605
+ f"start_idx={self.start_idx!r}, end_idx={self.end_idx!r})")
606
+
607
+
608
+ @dataclass(repr=False)
609
+ class _PromptTargetIndexMatch(PromptTargetMatch):
610
+ match_idx: int
611
+
612
+ @property
613
+ def start_idx(self) -> int:
614
+ return self.match_idx
615
+
616
+ @property
617
+ def end_idx(self) -> int:
618
+ return self.match_idx
619
+
620
+
621
+ @dataclass(repr=False)
622
+ class _PromptTargetTokenMatch(PromptTargetMatch):
623
+ match: _TokenMatch
624
+
625
+ @property
626
+ def start_idx(self) -> int:
627
+ return self.match.start_idx
628
+
629
+ @property
630
+ def end_idx(self) -> int:
631
+ return self.match.end_idx
632
+
633
+
634
+ @dataclass(repr=False)
635
+ class _PromptTargetTextMatch(PromptTargetMatch):
636
+ match: re.Match[str]
637
+
638
+ @property
639
+ def start_idx(self) -> int:
640
+ return self.match.start()
641
+
642
+ @property
643
+ def end_idx(self) -> int:
644
+ return self.match.end()
645
+
646
+
647
+ @dataclass
648
+ class PlaceholderFeaturesInfo:
649
+ modality: str
650
+ item_idx: int
651
+ start_idx: int
652
+ tokens: list[int]
653
+ is_embed: Optional[torch.Tensor]
654
+
655
+ @property
656
+ def length(self) -> int:
657
+ return len(self.tokens)
658
+
659
+ def to_range(self) -> PlaceholderRange:
660
+ # TODO: Is it worth it to optimize this by stripping the
661
+ # leading and ending positions where `is_embed=False`?
662
+ return PlaceholderRange(
663
+ offset=self.start_idx,
664
+ length=self.length,
665
+ is_embed=self.is_embed,
666
+ )
667
+
668
+
669
+ def find_token_matches(
670
+ prompt: list[int],
671
+ prompt_updates: Sequence[BoundPromptUpdate],
672
+ ) -> Sequence[PromptTargetMatch]:
673
+ """Return each target of `prompt_updates` found in `prompt`."""
674
+
675
+ def get_matches(update: BoundPromptUpdate):
676
+ target = update.target
677
+
678
+ if isinstance(target, PromptIndex):
679
+ match_idx = target.get_match_index(update.tokenizer, prompt)
680
+ if match_idx is None:
681
+ return []
682
+
683
+ return [_PromptTargetIndexMatch(update, match_idx)]
684
+
685
+ return [
686
+ _PromptTargetTokenMatch(update, match)
687
+ for match in iter_token_matches(prompt, target.token_ids)
688
+ ]
689
+
690
+ return [
691
+ match for update in prompt_updates for match in get_matches(update)
692
+ ]
693
+
694
+
695
+ def find_text_matches(
696
+ prompt: str,
697
+ prompt_updates: Sequence[BoundPromptUpdate],
698
+ ) -> Sequence[PromptTargetMatch]:
699
+ """Return each target of `prompt_updates` found in `prompt`."""
700
+
701
+ def get_matches(update: BoundPromptUpdate):
702
+ target = update.target
703
+
704
+ if isinstance(target, PromptIndex):
705
+ match_idx = target.get_match_index(update.tokenizer, prompt)
706
+ if match_idx is None:
707
+ return []
708
+
709
+ return [_PromptTargetIndexMatch(update, match_idx)]
710
+
711
+ return [
712
+ _PromptTargetTextMatch(update, match)
713
+ for match in re.finditer(re.escape(target.text), prompt)
714
+ ]
715
+
716
+ return [
717
+ match for update in prompt_updates for match in get_matches(update)
718
+ ]
719
+
720
+
721
+ def _resolve_matches(
722
+ prompt: PromptSeq,
723
+ mm_matches: Mapping[str, Sequence[PromptTargetMatch]],
724
+ ) -> list[PromptTargetMatch]:
725
+ """
726
+ Resolve `mm_matches` to ensure that there are no overlapping matches,
727
+ and sort them such that earlier matches take priority over later ones.
728
+ """
729
+ matches = [m for matches in mm_matches.values() for m in matches]
730
+
731
+ seen_matches: list[Optional[PromptTargetMatch]] = [None] * len(prompt)
732
+
733
+ for match in matches:
734
+ for idx in range(match.start_idx, match.end_idx):
735
+ if seen_matches[idx] is not None:
736
+ raise ValueError("Found overlapping matches "
737
+ f"({seen_matches[idx]} and {match}) "
738
+ f"at index={idx} of prompt={prompt}")
739
+
740
+ seen_matches[idx] = match
741
+
742
+ return sorted(matches, key=lambda x: x.start_idx)
743
+
744
+
745
+ def _apply_matches(
746
+ prompt: _S,
747
+ mm_matches: Mapping[str, Sequence[PromptTargetMatch]],
748
+ mm_item_counts: Mapping[str, int],
749
+ ) -> list[_S]:
750
+ """Apply the updates in `mm_matches` to `prompt`."""
751
+ out_seqs = list[Union[str, list[int]]]()
752
+ prev_end_idx = 0
753
+ next_idx_by_modality = defaultdict[str, int](lambda: 0)
754
+
755
+ for match in _resolve_matches(prompt, mm_matches):
756
+ modality = match.modality
757
+
758
+ item_start_idx = next_idx_by_modality[modality]
759
+ max_item_count = mm_item_counts.get(modality, 0)
760
+ if item_start_idx >= max_item_count:
761
+ continue
762
+
763
+ start_idx = match.start_idx
764
+ end_idx = match.end_idx
765
+ origin = match._origin
766
+ mode = origin.mode
767
+
768
+ if mode == UpdateMode.INSERT:
769
+ out_seqs.append(prompt[prev_end_idx:end_idx])
770
+ num_inserts = max_item_count
771
+ elif mode == UpdateMode.REPLACE:
772
+ out_seqs.append(prompt[prev_end_idx:start_idx])
773
+ num_inserts = max_item_count if start_idx == end_idx else 1
774
+ else:
775
+ assert_never(mode)
776
+
777
+ item_end_idx = min(item_start_idx + num_inserts, max_item_count)
778
+
779
+ for item_idx in range(item_start_idx, item_end_idx):
780
+ content = origin.get_content(item_idx)
781
+ insert_seq = (content.full.text if isinstance(prompt, str) else
782
+ content.full.token_ids)
783
+
784
+ out_seqs.append(insert_seq)
785
+
786
+ prev_end_idx = end_idx
787
+ next_idx_by_modality[modality] += item_end_idx - item_start_idx
788
+
789
+ out_seqs.append(prompt[prev_end_idx:])
790
+
791
+ return cast(list[_S], out_seqs)
792
+
793
+
794
+ def apply_token_matches(
795
+ prompt: list[int],
796
+ mm_matches: Mapping[str, Sequence[PromptTargetMatch]],
797
+ mm_item_counts: Mapping[str, int],
798
+ ) -> list[int]:
799
+ """Apply the updates in `mm_matches` to `prompt`."""
800
+ if not mm_matches:
801
+ return prompt
802
+
803
+ token_id_seqs = _apply_matches(prompt, mm_matches, mm_item_counts)
804
+
805
+ return flatten_2d_lists(token_id_seqs)
806
+
807
+
808
+ def apply_text_matches(
809
+ prompt: str,
810
+ mm_matches: Mapping[str, Sequence[PromptTargetMatch]],
811
+ mm_item_counts: Mapping[str, int],
812
+ ) -> str:
813
+ """Apply the updates in `mm_matches` to `prompt`."""
814
+ if not mm_matches:
815
+ return prompt
816
+
817
+ texts = _apply_matches(prompt, mm_matches, mm_item_counts)
818
+
819
+ return "".join(texts)
820
+
821
+
822
+ def _iter_placeholders(
823
+ mm_prompt_updates: Mapping[str, Sequence[BoundPromptUpdate]],
824
+ prompt: list[int],
825
+ mm_item_counts: Mapping[str, int],
826
+ ) -> Iterable[PlaceholderFeaturesInfo]:
827
+ """
828
+ Yield each set of placeholder tokens found in `prompt`.
829
+
830
+ Matches are exclusive even when multiple modalities share
831
+ the same placeholder tokens. In that case, the modality that
832
+ appears earlier in `mm_prompt_updates` takes priority.
833
+
834
+ Note that empty matches are ignored.
835
+ """
836
+ prompt_len = len(prompt)
837
+ item_idx_by_modality = defaultdict[str, int](lambda: 0)
838
+
839
+ start_idx = 0
840
+ while start_idx < prompt_len:
841
+ found = False
842
+
843
+ for modality, modality_updates in mm_prompt_updates.items():
844
+ item_idx = item_idx_by_modality[modality]
845
+ if item_idx >= mm_item_counts.get(modality, 0):
846
+ continue
847
+
848
+ for update_info in modality_updates:
849
+ content = update_info.get_content(item_idx)
850
+ content_tokens_full = content.full.token_ids
851
+ content_len_full = len(content_tokens_full)
852
+ end_idx_full = start_idx + content_len_full
853
+
854
+ if content_len_full == 0 or end_idx_full > prompt_len:
855
+ continue
856
+
857
+ if prompt[start_idx:end_idx_full] == content_tokens_full:
858
+ content_is_embed = content.is_embed
859
+ if content_is_embed is not None:
860
+ content_is_embed = content_is_embed(content.full)
861
+
862
+ yield PlaceholderFeaturesInfo(
863
+ modality=modality,
864
+ item_idx=item_idx,
865
+ start_idx=start_idx,
866
+ tokens=content_tokens_full,
867
+ is_embed=content_is_embed,
868
+ )
869
+
870
+ # Exclude overlapping matches
871
+ start_idx = end_idx_full
872
+ item_idx_by_modality[modality] += 1
873
+ found = True
874
+ break
875
+
876
+ if found:
877
+ break # Go back to the outer while loop
878
+
879
+ if not found:
880
+ start_idx += 1
881
+
882
+
883
+ def find_mm_placeholders(
884
+ mm_prompt_updates: Mapping[str, Sequence[BoundPromptUpdate]],
885
+ prompt: list[int],
886
+ mm_item_counts: Mapping[str, int],
887
+ ) -> Mapping[str, list[PlaceholderFeaturesInfo]]:
888
+ it = _iter_placeholders(mm_prompt_updates, prompt, mm_item_counts)
889
+ return dict(full_groupby_modality(it))
890
+
891
+
892
+ _V = TypeVar("_V", bound="Union[MultiModalKwargs, MultiModalKwargsItem]")
893
+
894
+
895
+ class ProcessingCacheOptionalItem(NamedTuple):
896
+ key: str
897
+ value: Optional[MultiModalKwargsItem]
898
+
899
+
900
+ class ProcessingCacheItem(NamedTuple):
901
+ key: str
902
+ value: MultiModalKwargsItem
903
+
904
+
905
+ class ProcessingCache:
906
+
907
+ @staticmethod
908
+ def get_lru_cache(
909
+ capacity_gb: float,
910
+ value_type: type[_V],
911
+ *,
912
+ debug: bool = False,
913
+ ) -> LRUCache[str, _V]:
914
+
915
+ def get_leaf_size(leaf: object) -> int:
916
+ # MultiModalKwargs is not a subclass of dict
917
+ if isinstance(leaf, MultiModalKwargs):
918
+ return get_item_size(leaf.data)
919
+
920
+ # MultiModalKwargsItem is not a subclass of dict
921
+ if isinstance(leaf, MultiModalKwargsItem):
922
+ leaf_data = {k: v.data for k, v in leaf.items()}
923
+ return get_item_size(leaf_data)
924
+
925
+ # sys.getsizeof doesn't work for tensors
926
+ if isinstance(leaf, torch.Tensor):
927
+ return leaf.nbytes
928
+
929
+ return sys.getsizeof(leaf)
930
+
931
+ def get_item_size(
932
+ value: Union[MultiModalKwargs, MultiModalKwargsItem,
933
+ Mapping[str, NestedTensors]]
934
+ ) -> int:
935
+ size = json_reduce_leaves(
936
+ lambda a, b: a + b,
937
+ json_map_leaves(get_leaf_size, value),
938
+ )
939
+
940
+ if debug:
941
+ logger.debug("Calculated size of %s to be %.2f GiB",
942
+ type(value), size / GiB_bytes)
943
+
944
+ return size
945
+
946
+ return LRUCache(GiB_bytes * capacity_gb, getsizeof=get_item_size)
947
+
948
+ def __init__(
949
+ self,
950
+ capacity_gb: float,
951
+ *,
952
+ debug_cache_hit_ratio_steps: Optional[int] = None,
953
+ ) -> None:
954
+ super().__init__()
955
+
956
+ self.debug_cache_hit_ratio_steps = debug_cache_hit_ratio_steps
957
+ self.debug_cache_hits = 0
958
+ self.debug_cache_total = 0
959
+
960
+ self._cache = self.get_lru_cache(
961
+ capacity_gb,
962
+ MultiModalKwargsItem,
963
+ debug=bool(debug_cache_hit_ratio_steps),
964
+ )
965
+
966
+ def _maybe_log_cache_stats(self) -> None:
967
+ steps = self.debug_cache_hit_ratio_steps
968
+ if not steps:
969
+ return
970
+
971
+ total = self.debug_cache_total
972
+ if total > 0 and total % steps == 0:
973
+ logger.debug("ProcessingCache: hit_ratio = %.2f",
974
+ self.debug_cache_hits / total)
975
+ logger.debug("ProcessingCache: size = %.2f / %.2f GiB",
976
+ self._cache.currsize / GiB_bytes,
977
+ self._cache.maxsize / GiB_bytes)
978
+
979
+ def get(
980
+ self,
981
+ model_id: str,
982
+ modality: str,
983
+ input_item: object,
984
+ input_kwargs: Mapping[str, object],
985
+ ) -> Optional[MultiModalKwargsItem]:
986
+ """
987
+ Get a processed multi-modal item from the cache
988
+ according to its dependencies, including:
989
+
990
+ - The model ID
991
+ - The modality of the item
992
+ - The original data item passed to the HF processor
993
+ - The configuration options of the HF processor
994
+ """
995
+ self._maybe_log_cache_stats()
996
+
997
+ cache_key = MultiModalHasher.hash_kwargs(model_id=model_id,
998
+ **{modality: input_item},
999
+ **input_kwargs)
1000
+
1001
+ if self.debug_cache_hit_ratio_steps:
1002
+ if cache_key in self._cache:
1003
+ self.debug_cache_hits += 1
1004
+
1005
+ self.debug_cache_total += 1
1006
+
1007
+ return self._cache.get(cache_key)
1008
+
1009
+ def get_item(
1010
+ self,
1011
+ model_id: str,
1012
+ modality: str,
1013
+ input_item: object,
1014
+ input_kwargs: Mapping[str, object],
1015
+ ) -> ProcessingCacheOptionalItem:
1016
+ cache_key = MultiModalHasher.hash_kwargs(model_id=model_id,
1017
+ **{modality: input_item},
1018
+ **input_kwargs)
1019
+
1020
+ return ProcessingCacheOptionalItem(
1021
+ key=cache_key,
1022
+ value=self._cache.get(cache_key),
1023
+ )
1024
+
1025
+ def put(
1026
+ self,
1027
+ model_id: str,
1028
+ modality: str,
1029
+ input_item: object,
1030
+ input_kwargs: Mapping[str, object],
1031
+ output_kwargs: MultiModalKwargsItem,
1032
+ ) -> None:
1033
+ """
1034
+ Put a processed multi-modal item into the cache
1035
+ according to its dependencies
1036
+ (see [`get`][vllm.multimodal.processing.ProcessingCache.get]).
1037
+ """
1038
+ cache_key = MultiModalHasher.hash_kwargs(model_id=model_id,
1039
+ **{modality: input_item},
1040
+ **input_kwargs)
1041
+ self._cache[cache_key] = output_kwargs
1042
+
1043
+ def put_item(self, item: ProcessingCacheItem) -> None:
1044
+ self._cache[item.key] = item.value
1045
+
1046
+ def reset(self) -> bool:
1047
+ self._cache.clear()
1048
+
1049
+ return True
1050
+
1051
+
1052
+ class BaseProcessingInfo:
1053
+ """Base class to provide the information necessary for data processing."""
1054
+
1055
+ def __init__(self, ctx: InputProcessingContext) -> None:
1056
+ super().__init__()
1057
+
1058
+ self.ctx = ctx
1059
+
1060
+ @property
1061
+ def model_id(self) -> str:
1062
+ return self.ctx.model_config.model
1063
+
1064
+ def get_tokenizer(self) -> AnyTokenizer:
1065
+ return self.ctx.tokenizer
1066
+
1067
+ def get_hf_config(self) -> "PretrainedConfig":
1068
+ return self.ctx.get_hf_config()
1069
+
1070
+ def get_hf_processor(self, **kwargs: object) -> "ProcessorMixin":
1071
+ """
1072
+ Subclasses can override this method to handle
1073
+ specific kwargs from model config or user inputs.
1074
+ """
1075
+ return self.ctx.get_hf_processor(**kwargs)
1076
+
1077
+ @abstractmethod
1078
+ def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
1079
+ """
1080
+ Return the maximum supported number of items for each modality.
1081
+
1082
+ A value of `None` means unlimited number of items.
1083
+
1084
+ Omitting a modality from the returned dictionary means that
1085
+ it is not supported at all.
1086
+ """
1087
+ raise NotImplementedError
1088
+
1089
+ def get_allowed_mm_limits(self) -> Mapping[str, int]:
1090
+ """Return the maximum allowed number of items for each modality."""
1091
+ supported_mm_limits = self.get_supported_mm_limits()
1092
+ mm_config = self.ctx.get_mm_config()
1093
+
1094
+ allowed_limits = dict[str, int]()
1095
+ for modality, supported_limit in supported_mm_limits.items():
1096
+ user_limit = mm_config.get_limit_per_prompt(modality)
1097
+
1098
+ allowed_limits[modality] = (user_limit if supported_limit is None
1099
+ else min(user_limit, supported_limit))
1100
+
1101
+ return allowed_limits
1102
+
1103
+
1104
+ _I = TypeVar("_I", bound=BaseProcessingInfo)
1105
+
1106
+ MultiModalHashes = dict[str, list[str]]
1107
+ """
1108
+ A collection of hashes with a similar structure as
1109
+ [`MultiModalKwargs`][vllm.multimodal.inputs.MultiModalKwargs].
1110
+ """
1111
+
1112
+
1113
+ class BaseMultiModalProcessor(ABC, Generic[_I]):
1114
+ """
1115
+ Abstract base class to process multi-modal inputs to be used in vLLM.
1116
+
1117
+ Not to be confused with `transformers.ProcessorMixin`.
1118
+ """
1119
+
1120
+ def __init__(self,
1121
+ info: _I,
1122
+ dummy_inputs: "BaseDummyInputsBuilder[_I]",
1123
+ *,
1124
+ cache: Optional[ProcessingCache] = None) -> None:
1125
+ super().__init__()
1126
+
1127
+ self.info = info
1128
+ self.dummy_inputs = dummy_inputs
1129
+ self.cache = cache
1130
+
1131
+ self.data_parser = self._get_data_parser()
1132
+
1133
+ def __call__(
1134
+ self,
1135
+ prompt: str,
1136
+ mm_data: MultiModalDataDict,
1137
+ hf_processor_mm_kwargs: Mapping[str, object],
1138
+ ) -> MultiModalInputs:
1139
+ return self.apply(prompt, mm_data, hf_processor_mm_kwargs)
1140
+
1141
+ def _get_data_parser(self) -> MultiModalDataParser:
1142
+ """
1143
+ Construct a parser to preprocess multi-modal data items
1144
+ before passing them to
1145
+ [`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
1146
+
1147
+ You can support additional modalities by creating a subclass
1148
+ of [`MultiModalDataParser`][vllm.multimodal.parse.MultiModalDataParser]
1149
+ that has additional subparsers.
1150
+ """
1151
+ return MultiModalDataParser()
1152
+
1153
+ def _to_mm_items(
1154
+ self,
1155
+ mm_data: MultiModalDataDict,
1156
+ ) -> MultiModalDataItems:
1157
+ """
1158
+ Normalize
1159
+ [`MultiModalDataDict`][vllm.multimodal.inputs.MultiModalDataDict]
1160
+ to [`MultiModalDataItems`][vllm.multimodal.parse.MultiModalDataItems]
1161
+ before passing them to
1162
+ [`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
1163
+ """
1164
+ mm_items = self.data_parser.parse_mm_data(mm_data)
1165
+ supported_mm_limits = self.info.get_supported_mm_limits()
1166
+ allowed_mm_limits = self.info.get_allowed_mm_limits()
1167
+
1168
+ for modality, items in mm_items.items():
1169
+ supported_limit = supported_mm_limits.get(modality, 0)
1170
+ allowed_limit = allowed_mm_limits.get(modality, 0)
1171
+ num_items = len(items)
1172
+
1173
+ if supported_limit is not None and num_items > supported_limit:
1174
+ raise ValueError(
1175
+ f"The model only supports at most {supported_limit} "
1176
+ f"{modality} items, but you passed {num_items} "
1177
+ f"{modality} items in the same prompt.")
1178
+
1179
+ if num_items > allowed_limit:
1180
+ raise ValueError(
1181
+ "You set or defaulted to "
1182
+ f"'{json.dumps({modality: allowed_limit})}' in "
1183
+ f"`--limit-mm-per-prompt`, but passed {num_items} "
1184
+ f"{modality} items in the same prompt.")
1185
+
1186
+ return mm_items
1187
+
1188
+ @abstractmethod
1189
+ def _get_mm_fields_config(
1190
+ self,
1191
+ hf_inputs: "BatchFeature",
1192
+ hf_processor_mm_kwargs: Mapping[str, object],
1193
+ ) -> Mapping[str, MultiModalFieldConfig]:
1194
+ """Given the HF-processed data, output the metadata of each field."""
1195
+ raise NotImplementedError
1196
+
1197
+ @abstractmethod
1198
+ def _get_prompt_updates(
1199
+ self,
1200
+ mm_items: MultiModalDataItems,
1201
+ hf_processor_mm_kwargs: Mapping[str, object],
1202
+ out_mm_kwargs: MultiModalKwargs,
1203
+ ) -> Sequence[PromptUpdate]:
1204
+ """
1205
+ Given the original multi-modal items for this modality
1206
+ and HF-processed data, output the updates to perform.
1207
+
1208
+ The information returned by this method is used to update token inputs
1209
+ which bypass the HF processor. It is also used to update the output of
1210
+ HF processor if the HF process does not apply prompt updates to text
1211
+ inputs.
1212
+
1213
+ Moreover, this information is critical to determine the token positions
1214
+ in order to construct
1215
+ [`PlaceholderRange`][vllm.multimodal.inputs.PlaceholderRange]
1216
+ for each multi-modal item.
1217
+ """
1218
+ raise NotImplementedError
1219
+
1220
+ def _find_mm_placeholders(
1221
+ self,
1222
+ mm_prompt_updates: Mapping[str, Sequence[BoundPromptUpdate]],
1223
+ new_token_ids: list[int],
1224
+ mm_item_counts: Mapping[str, int],
1225
+ ) -> Mapping[str, list[PlaceholderFeaturesInfo]]:
1226
+ return find_mm_placeholders(mm_prompt_updates, new_token_ids,
1227
+ mm_item_counts)
1228
+
1229
+ def _get_hf_mm_data(
1230
+ self,
1231
+ mm_items: MultiModalDataItems,
1232
+ ) -> tuple[Mapping[str, object], Mapping[str, object]]:
1233
+ processor_data = dict[str, object]()
1234
+ passthrough_data = dict[str, object]()
1235
+
1236
+ for items in mm_items.values():
1237
+ processor_data.update(items.get_processor_data())
1238
+ passthrough_data.update(items.get_passthrough_data())
1239
+
1240
+ return processor_data, passthrough_data
1241
+
1242
+ def _call_hf_processor(
1243
+ self,
1244
+ prompt: str,
1245
+ # Not to be confused with `mm_data` in `self.apply`.
1246
+ # This refers to the data to be passed to HF processor.
1247
+ mm_data: Mapping[str, object],
1248
+ mm_kwargs: Mapping[str, object],
1249
+ ) -> "BatchFeature":
1250
+ """
1251
+ Call the HF processor on the prompt text and
1252
+ associated multi-modal data.
1253
+ """
1254
+ return self.info.ctx.call_hf_processor(
1255
+ self.info.get_hf_processor(**mm_kwargs),
1256
+ dict(text=prompt, **mm_data),
1257
+ mm_kwargs,
1258
+ )
1259
+
1260
+ def _hf_processor_applies_updates(
1261
+ self,
1262
+ prompt_text: str,
1263
+ mm_items: MultiModalDataItems,
1264
+ hf_processor_mm_kwargs: Mapping[str, object],
1265
+ ) -> bool:
1266
+ """
1267
+ Return whether the HF processor applies prompt updates.
1268
+
1269
+ For most HF processors, this should be `True` when multi-modal
1270
+ data items are passed, but `False` when multi-modal embeddings
1271
+ are passed.
1272
+ """
1273
+ return not any(
1274
+ isinstance(items, (EmbeddingItems, DictEmbeddingItems))
1275
+ for items in mm_items.values())
1276
+
1277
+ def _apply_hf_processor_text_mm(
1278
+ self,
1279
+ prompt_text: str,
1280
+ mm_items: MultiModalDataItems,
1281
+ hf_processor_mm_kwargs: Mapping[str, object],
1282
+ ) -> tuple[list[int], MultiModalKwargs, bool]:
1283
+ """
1284
+ Apply the HF processor on the prompt text and multi-modal data
1285
+ together.
1286
+
1287
+ In addition, return whether prompt updates have been applied.
1288
+ """
1289
+ processor_data, passthrough_data = self._get_hf_mm_data(mm_items)
1290
+
1291
+ processed_data = self._call_hf_processor(
1292
+ prompt=prompt_text,
1293
+ mm_data=processor_data,
1294
+ mm_kwargs=hf_processor_mm_kwargs,
1295
+ )
1296
+ processed_data.update(passthrough_data)
1297
+
1298
+ prompt_ids, = processed_data.pop("input_ids").tolist()
1299
+
1300
+ mm_kwargs = MultiModalKwargs.from_hf_inputs(
1301
+ processed_data,
1302
+ self._get_mm_fields_config(processed_data, hf_processor_mm_kwargs),
1303
+ )
1304
+
1305
+ is_update_applied = self._hf_processor_applies_updates(
1306
+ prompt_text=prompt_text,
1307
+ mm_items=mm_items,
1308
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1309
+ )
1310
+
1311
+ return prompt_ids, mm_kwargs, is_update_applied
1312
+
1313
+ def _apply_hf_processor_text_only(self, prompt_text: str) -> list[int]:
1314
+ """
1315
+ Apply the HF processor on the prompt text only.
1316
+
1317
+ Since HF processor requires that text and multi-modal items
1318
+ correspond to each other, we create dummy multi-modal items
1319
+ to go along with the text.
1320
+ """
1321
+ prompt_ids, _, _ = self._apply_hf_processor_text_mm(
1322
+ prompt_text=prompt_text,
1323
+ mm_items=MultiModalDataItems({}),
1324
+ hf_processor_mm_kwargs={},
1325
+ )
1326
+
1327
+ return prompt_ids
1328
+
1329
+ def _apply_hf_processor_tokens_only(
1330
+ self,
1331
+ prompt_tokens: list[int],
1332
+ ) -> list[int]:
1333
+ """
1334
+ Apply the HF processor on the prompt tokens only.
1335
+
1336
+ Most HF processors accept prompt text but not prompt tokens.
1337
+ If the HF processor adds or removes tokens that are not related to
1338
+ multi-modal data, you should override this method so it is consistent
1339
+ with the output of
1340
+ [`_apply_hf_processor_text_only`][vllm.multimodal.processing.BaseMultiModalProcessor._apply_hf_processor_text_only]
1341
+ on the
1342
+ corresponding text.
1343
+ """
1344
+ return prompt_tokens
1345
+
1346
+ def _apply_hf_processor_mm_only(
1347
+ self,
1348
+ mm_items: MultiModalDataItems,
1349
+ hf_processor_mm_kwargs: Mapping[str, object],
1350
+ ) -> MultiModalKwargs:
1351
+ """
1352
+ Apply the HF processor on the multi-modal data only.
1353
+
1354
+ Since HF processor requires that text and multi-modal items
1355
+ correspond to each other, we generate dummy text using
1356
+ [`DummyInputsBuilder`][vllm.multimodal.profiling.BaseDummyInputsBuilder]
1357
+ to go along with the multi-modal data.
1358
+ """
1359
+ mm_counts = mm_items.get_all_counts()
1360
+
1361
+ _, mm_kwargs, _ = self._apply_hf_processor_text_mm(
1362
+ prompt_text=self.dummy_inputs.get_dummy_text(mm_counts),
1363
+ mm_items=mm_items,
1364
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1365
+ )
1366
+
1367
+ return mm_kwargs
1368
+
1369
+ def _apply_hf_processor_main(
1370
+ self,
1371
+ prompt: Union[str, list[int]],
1372
+ mm_items: MultiModalDataItems,
1373
+ hf_processor_mm_kwargs: Mapping[str, object],
1374
+ *,
1375
+ enable_hf_prompt_update: bool,
1376
+ ) -> tuple[list[int], MultiModalKwargs, bool]:
1377
+ """
1378
+ Apply the HF processor on the prompt text and multi-modal data.
1379
+
1380
+ In addition, return whether prompt updates have been applied
1381
+ (for most HF processors, this should be `True`).
1382
+
1383
+ Note:
1384
+ If `enable_hf_prompt_update=False`, we use HF processor
1385
+ to perform prompt updates if available; HF processor requires
1386
+ that the prompt corresponds to multi-modal items.
1387
+ """
1388
+ if isinstance(prompt, str):
1389
+ if enable_hf_prompt_update:
1390
+ return self._apply_hf_processor_text_mm(
1391
+ prompt_text=prompt,
1392
+ mm_items=mm_items,
1393
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1394
+ )
1395
+
1396
+ prompt_ids = self._apply_hf_processor_text_only(prompt)
1397
+ else:
1398
+ prompt_ids = self._apply_hf_processor_tokens_only(prompt)
1399
+
1400
+ mm_kwargs = self._apply_hf_processor_mm_only(
1401
+ mm_items=mm_items,
1402
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1403
+ )
1404
+
1405
+ return prompt_ids, mm_kwargs, False
1406
+
1407
+ def _get_cache_missing_items(
1408
+ self,
1409
+ cache: ProcessingCache,
1410
+ mm_data_items: MultiModalDataItems,
1411
+ hf_processor_mm_kwargs: Mapping[str, object],
1412
+ ) -> tuple[dict[str, list[ProcessingCacheOptionalItem]], dict[
1413
+ str, list[object]]]:
1414
+ model_id = self.info.model_id
1415
+
1416
+ mm_cache_items = {
1417
+ modality: [
1418
+ cache.get_item(model_id, modality, item,
1419
+ hf_processor_mm_kwargs) for item in items
1420
+ ]
1421
+ for modality, items in mm_data_items.items()
1422
+ }
1423
+
1424
+ mm_missing_idxs = {
1425
+ modality: [
1426
+ idx for idx, item in enumerate(cache_items)
1427
+ if item.value is None
1428
+ ]
1429
+ for modality, cache_items in mm_cache_items.items()
1430
+ }
1431
+ mm_missing_data = {
1432
+ modality: [mm_data_items[modality][idx] for idx in idxs]
1433
+ for modality, idxs in mm_missing_idxs.items()
1434
+ }
1435
+
1436
+ return mm_cache_items, mm_missing_data
1437
+
1438
+ def _hash_mm_items(
1439
+ self,
1440
+ mm_items: MultiModalDataItems,
1441
+ hf_processor_mm_kwargs: Mapping[str, object],
1442
+ ) -> MultiModalHashes:
1443
+ """Create MM hashes to be returned (only used in V1)."""
1444
+ model_id = self.info.model_id
1445
+
1446
+ return {
1447
+ modality: [
1448
+ MultiModalHasher.hash_kwargs(model_id=model_id,
1449
+ **{modality: item},
1450
+ **hf_processor_mm_kwargs)
1451
+ for item in items
1452
+ ]
1453
+ for modality, items in mm_items.items()
1454
+ }
1455
+
1456
+ def _merge_mm_kwargs(
1457
+ self,
1458
+ cache: ProcessingCache,
1459
+ mm_cache_items: dict[str, list[ProcessingCacheOptionalItem]],
1460
+ mm_missing_data: dict[str, list[object]],
1461
+ mm_missing_kwargs: MultiModalKwargs,
1462
+ ) -> dict[str, list[ProcessingCacheItem]]:
1463
+ mm_missing_next_idx = {modality: 0 for modality in mm_missing_data}
1464
+
1465
+ merged_items = defaultdict[str, list[ProcessingCacheItem]](list)
1466
+ for modality, cache_items in mm_cache_items.items():
1467
+ for cache_item in cache_items:
1468
+ if cache_item.value is None:
1469
+ kw_item = mm_missing_kwargs.get_item(
1470
+ modality,
1471
+ mm_missing_next_idx[modality],
1472
+ )
1473
+ cache_item_new = ProcessingCacheItem(
1474
+ key=cache_item.key,
1475
+ value=kw_item,
1476
+ )
1477
+
1478
+ cache.put_item(cache_item_new)
1479
+ mm_missing_next_idx[modality] += 1
1480
+ else:
1481
+ cache_item_new = ProcessingCacheItem(
1482
+ key=cache_item.key,
1483
+ value=cache_item.value,
1484
+ )
1485
+
1486
+ merged_items[modality].append(cache_item_new)
1487
+
1488
+ return dict(merged_items)
1489
+
1490
+ def _apply_hf_processor(
1491
+ self,
1492
+ prompt: Union[str, list[int]],
1493
+ mm_data_items: MultiModalDataItems,
1494
+ hf_processor_mm_kwargs: Mapping[str, object],
1495
+ *,
1496
+ return_mm_hashes: bool,
1497
+ ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]:
1498
+ (
1499
+ prompt_ids,
1500
+ mm_kwargs,
1501
+ is_update_applied,
1502
+ ) = self._apply_hf_processor_main(
1503
+ prompt=prompt,
1504
+ mm_items=mm_data_items,
1505
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1506
+ enable_hf_prompt_update=True,
1507
+ )
1508
+
1509
+ mm_hashes = (self._hash_mm_items(mm_data_items, hf_processor_mm_kwargs)
1510
+ if return_mm_hashes else None)
1511
+
1512
+ return prompt_ids, mm_kwargs, mm_hashes, is_update_applied
1513
+
1514
+ def _cached_apply_hf_processor(
1515
+ self,
1516
+ prompt: Union[str, list[int]],
1517
+ mm_data_items: MultiModalDataItems,
1518
+ hf_processor_mm_kwargs: Mapping[str, object],
1519
+ *,
1520
+ return_mm_hashes: bool,
1521
+ ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]:
1522
+ """
1523
+ Apply the HF processor on the full prompt text,
1524
+ caching the results and reusing cached results.
1525
+ """
1526
+ cache = self.cache
1527
+
1528
+ _, passthrough_data = self._get_hf_mm_data(mm_data_items)
1529
+ if cache is None or passthrough_data:
1530
+ return self._apply_hf_processor(
1531
+ prompt=prompt,
1532
+ mm_data_items=mm_data_items,
1533
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1534
+ return_mm_hashes=return_mm_hashes,
1535
+ )
1536
+
1537
+ (
1538
+ mm_cache_items,
1539
+ mm_missing_data,
1540
+ ) = self._get_cache_missing_items(
1541
+ cache=cache,
1542
+ mm_data_items=mm_data_items,
1543
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1544
+ )
1545
+
1546
+ # NOTE: `prompt` does not correspond to `mm_missing_data_items`,
1547
+ # so we can't apply prompt updates until the new multimodal
1548
+ # items are combined with the cached multimodal items
1549
+ (
1550
+ prompt_ids,
1551
+ mm_missing_kwargs,
1552
+ is_update_applied,
1553
+ ) = self._apply_hf_processor_main(
1554
+ prompt=prompt,
1555
+ mm_items=self._to_mm_items(mm_missing_data),
1556
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1557
+ enable_hf_prompt_update=False,
1558
+ )
1559
+
1560
+ mm_cache_items_merged = self._merge_mm_kwargs(
1561
+ cache,
1562
+ mm_cache_items=mm_cache_items,
1563
+ mm_missing_data=mm_missing_data,
1564
+ mm_missing_kwargs=mm_missing_kwargs,
1565
+ )
1566
+
1567
+ mm_kwargs = MultiModalKwargs.from_items([
1568
+ item.value for cache_items in mm_cache_items_merged.values()
1569
+ for item in cache_items
1570
+ ])
1571
+
1572
+ mm_hashes = {
1573
+ modality: [item.key for item in cache_items]
1574
+ for modality, cache_items in mm_cache_items_merged.items()
1575
+ } if return_mm_hashes else None
1576
+
1577
+ return prompt_ids, mm_kwargs, mm_hashes, is_update_applied
1578
+
1579
+ def _bind_and_group_updates(
1580
+ self,
1581
+ prompt_updates: Sequence[PromptUpdate],
1582
+ ) -> dict[str, Sequence[BoundPromptUpdate]]:
1583
+ tokenizer = self.info.get_tokenizer()
1584
+
1585
+ it = (update.bind(tokenizer) for update in prompt_updates)
1586
+ return dict(full_groupby_modality(it))
1587
+
1588
+ def _apply_token_matches(
1589
+ self,
1590
+ prompt: list[int],
1591
+ mm_matches: Mapping[str, Sequence[PromptTargetMatch]],
1592
+ mm_item_counts: Mapping[str, int],
1593
+ ) -> list[int]:
1594
+ return apply_token_matches(prompt, mm_matches, mm_item_counts)
1595
+
1596
+ def _apply_text_matches(
1597
+ self,
1598
+ prompt: str,
1599
+ mm_matches: Mapping[str, Sequence[PromptTargetMatch]],
1600
+ mm_item_counts: Mapping[str, int],
1601
+ ) -> str:
1602
+ return apply_text_matches(prompt, mm_matches, mm_item_counts)
1603
+
1604
+ def _apply_prompt_updates(
1605
+ self,
1606
+ token_ids: list[int],
1607
+ mm_prompt_updates: Mapping[str, Sequence[BoundPromptUpdate]],
1608
+ mm_item_counts: Mapping[str, int],
1609
+ ) -> tuple[list[int], str, Mapping[str, list[PlaceholderFeaturesInfo]]]:
1610
+ tokenizer = self.info.get_tokenizer()
1611
+
1612
+ mm_token_matches = {
1613
+ modality: find_token_matches(token_ids, updates)
1614
+ for modality, updates in mm_prompt_updates.items()
1615
+ }
1616
+ mm_match_counts = {
1617
+ modality: len(matches)
1618
+ for modality, matches in mm_token_matches.items()
1619
+ }
1620
+
1621
+ # If the search text does not represent a special token,
1622
+ # it may have different token IDs in the prompt, because
1623
+ # the tokens may go across the boundaries of the search text.
1624
+ # ----
1625
+ # e.g. when searching for "foo" in "food", if "food" itself makes
1626
+ # up a token, then the token ID of "foo" will not appear at all
1627
+ # ----
1628
+ # Since it is inefficient to search for all possible tokenizations
1629
+ # of the search text in the prompt, we instead perform string-based
1630
+ # updates on the decoded token IDs, then encode them back.
1631
+ if all(
1632
+ mm_match_counts.get(modality, 0) >= item_count
1633
+ for modality, item_count in mm_item_counts.items()
1634
+ ): # yapf: disable
1635
+ token_ids = self._apply_token_matches(
1636
+ token_ids,
1637
+ mm_token_matches,
1638
+ mm_item_counts,
1639
+ )
1640
+
1641
+ text = decode_tokens(tokenizer, token_ids)
1642
+ matched_updates = {
1643
+ modality: [match._origin for match in token_matches]
1644
+ for modality, token_matches in mm_token_matches.items()
1645
+ }
1646
+ else:
1647
+ text = decode_tokens(tokenizer, token_ids)
1648
+
1649
+ mm_text_matches = {
1650
+ modality: find_text_matches(text, updates)
1651
+ for modality, updates in mm_prompt_updates.items()
1652
+ }
1653
+ text = self._apply_text_matches(
1654
+ text,
1655
+ mm_text_matches,
1656
+ mm_item_counts,
1657
+ )
1658
+
1659
+ token_ids = encode_tokens(tokenizer,
1660
+ text,
1661
+ add_special_tokens=False)
1662
+ matched_updates = {
1663
+ modality: [match._origin for match in token_matches]
1664
+ for modality, token_matches in mm_text_matches.items()
1665
+ }
1666
+
1667
+ placeholders = self._find_mm_placeholders(
1668
+ matched_updates,
1669
+ token_ids,
1670
+ mm_item_counts,
1671
+ )
1672
+
1673
+ return token_ids, text, placeholders
1674
+
1675
+ def _validate_mm_kwargs(
1676
+ self,
1677
+ mm_kwargs: MultiModalKwargs,
1678
+ mm_item_counts: Mapping[str, int],
1679
+ ) -> None:
1680
+ for modality, item_count in mm_item_counts.items():
1681
+ if modality in mm_kwargs.modalities:
1682
+ items = mm_kwargs.get_items(modality)
1683
+ else:
1684
+ items = []
1685
+
1686
+ if len(items) != item_count:
1687
+ raise RuntimeError(
1688
+ f"Expected there to be {item_count} {modality} items in "
1689
+ f"keyword arguments corresponding to {item_count} "
1690
+ f"{modality} data items, but only found {len(items)}! "
1691
+ "There is likely a problem with your "
1692
+ "implementation of merged multi-modal processor for this "
1693
+ "model (usually arising from an inconsistency between "
1694
+ "`_call_hf_processor` and `_get_mm_fields_config`).")
1695
+
1696
+ def _validate_mm_placeholders(
1697
+ self,
1698
+ mm_placeholders: Mapping[str, list[PlaceholderFeaturesInfo]],
1699
+ mm_item_counts: Mapping[str, int],
1700
+ ) -> None:
1701
+ for modality, item_count in mm_item_counts.items():
1702
+ placeholders = mm_placeholders.get(modality, [])
1703
+
1704
+ if len(placeholders) != item_count:
1705
+ # NOTE: If you are a model developer, this can also arise from
1706
+ # an inconsistency between `_call_hf_processor` and
1707
+ # `_get_mm_fields_config` implementations
1708
+ raise RuntimeError(
1709
+ f"Expected there to be {item_count} prompt updates "
1710
+ f"corresponding to {item_count} {modality} items, but "
1711
+ f"instead found {len(placeholders)} prompt updates! "
1712
+ "This is likely because you forgot to include input "
1713
+ "placeholder tokens (e.g., `<image>`, `<|image_pad|>`) "
1714
+ "in the prompt. If the model has a chat template, make "
1715
+ "sure you have applied it before calling `LLM.generate`.")
1716
+
1717
+ def _maybe_apply_prompt_updates(
1718
+ self,
1719
+ mm_items: MultiModalDataItems,
1720
+ hf_processor_mm_kwargs: Mapping[str, object],
1721
+ prompt_ids: list[int],
1722
+ mm_kwargs: MultiModalKwargs,
1723
+ is_update_applied: bool,
1724
+ ) -> tuple[list[int], str, Mapping[str, list[PlaceholderFeaturesInfo]]]:
1725
+ unbound_prompt_updates = self._get_prompt_updates(
1726
+ mm_items,
1727
+ hf_processor_mm_kwargs,
1728
+ mm_kwargs,
1729
+ )
1730
+ mm_prompt_updates = self._bind_and_group_updates(
1731
+ unbound_prompt_updates)
1732
+
1733
+ mm_item_counts = mm_items.get_all_counts()
1734
+ self._validate_mm_kwargs(mm_kwargs, mm_item_counts)
1735
+
1736
+ if is_update_applied:
1737
+ mm_placeholders = self._find_mm_placeholders(
1738
+ mm_prompt_updates,
1739
+ prompt_ids,
1740
+ mm_item_counts,
1741
+ )
1742
+ self._validate_mm_placeholders(mm_placeholders, mm_item_counts)
1743
+
1744
+ tokenizer = self.info.get_tokenizer()
1745
+ prompt = decode_tokens(tokenizer, prompt_ids)
1746
+ else:
1747
+ (
1748
+ prompt_ids,
1749
+ prompt,
1750
+ mm_placeholders,
1751
+ ) = self._apply_prompt_updates(
1752
+ prompt_ids,
1753
+ mm_prompt_updates,
1754
+ mm_item_counts,
1755
+ )
1756
+ self._validate_mm_placeholders(mm_placeholders, mm_item_counts)
1757
+
1758
+ return prompt_ids, prompt, mm_placeholders
1759
+
1760
+ def apply(
1761
+ self,
1762
+ prompt: Union[str, list[int]],
1763
+ mm_data: MultiModalDataDict,
1764
+ hf_processor_mm_kwargs: Mapping[str, object],
1765
+ return_mm_hashes: bool = False,
1766
+ ) -> MultiModalInputs:
1767
+ """
1768
+ Process multi-modal inputs to be used in vLLM.
1769
+
1770
+ The main steps are:
1771
+
1772
+ 1. Apply HF Processor on prompt text and multi-modal data together,
1773
+ outputting token IDs and processed tensors.
1774
+ 2. Find and update sequences in the token IDs with placeholder tokens.
1775
+ The number of placeholder tokens equals the feature size of the
1776
+ multi-modal data outputted by the multi-modal encoder.
1777
+ 3. Extract information about the placeholder tokens from the
1778
+ processed token IDs.
1779
+ """
1780
+ mm_items = self._to_mm_items(mm_data)
1781
+
1782
+ (
1783
+ prompt_ids,
1784
+ mm_kwargs,
1785
+ mm_hashes,
1786
+ is_update_applied,
1787
+ ) = self._cached_apply_hf_processor(
1788
+ prompt,
1789
+ mm_items,
1790
+ hf_processor_mm_kwargs,
1791
+ return_mm_hashes=return_mm_hashes,
1792
+ )
1793
+
1794
+ prompt_ids, prompt, mm_placeholders = self._maybe_apply_prompt_updates(
1795
+ mm_items=mm_items,
1796
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1797
+ prompt_ids=prompt_ids,
1798
+ mm_kwargs=mm_kwargs,
1799
+ is_update_applied=is_update_applied,
1800
+ )
1801
+
1802
+ mm_placeholder_ranges = {
1803
+ modality: [item.to_range() for item in placeholders]
1804
+ for modality, placeholders in mm_placeholders.items()
1805
+ }
1806
+
1807
+ return MultiModalInputs(
1808
+ type="multimodal",
1809
+ prompt=prompt,
1810
+ prompt_token_ids=prompt_ids,
1811
+ mm_kwargs=mm_kwargs,
1812
+ mm_hashes=mm_hashes,
1813
+ mm_placeholders=mm_placeholder_ranges,
1814
+ )
1815
+
1816
+
1817
+ class EncDecMultiModalProcessor(BaseMultiModalProcessor[_I]):
1818
+
1819
+ @abstractmethod
1820
+ def create_encoder_prompt(
1821
+ self,
1822
+ prompt: Union[str, list[int]],
1823
+ mm_data: MultiModalDataDict,
1824
+ ) -> Union[str, list[int]]:
1825
+ """
1826
+ Create input prompt for the encoder. HF processor will be applied on
1827
+ this prompt during profiling and generation.
1828
+ """
1829
+ raise NotImplementedError
1830
+
1831
+ @property
1832
+ def pad_dummy_encoder_prompt(self) -> bool:
1833
+ return False
1834
+
1835
+ def create_decoder_prompt(
1836
+ self,
1837
+ prompt: Union[str, list[int]],
1838
+ mm_data: MultiModalDataDict,
1839
+ ) -> Union[str, list[int]]:
1840
+ """Create input prompt for the decoder."""
1841
+ return prompt
1842
+
1843
+ def _get_enc_dec_inputs(
1844
+ self,
1845
+ prompt: Union[str, list[int]],
1846
+ mm_data: MultiModalDataDict,
1847
+ encoder_inputs: MultiModalInputs,
1848
+ ):
1849
+ tokenizer = self.info.get_tokenizer()
1850
+ decoder_prompt = self.create_decoder_prompt(prompt, mm_data)
1851
+ if isinstance(decoder_prompt, str):
1852
+ decoder_prompt_ids = encode_tokens(tokenizer,
1853
+ decoder_prompt,
1854
+ add_special_tokens=False)
1855
+ else:
1856
+ decoder_prompt_ids = decoder_prompt
1857
+ decoder_prompt = decode_tokens(tokenizer, decoder_prompt)
1858
+
1859
+ mm_inputs = MultiModalEncDecInputs(
1860
+ encoder_prompt=encoder_inputs["prompt"],
1861
+ encoder_prompt_token_ids=encoder_inputs["prompt_token_ids"],
1862
+ **encoder_inputs)
1863
+ mm_inputs.update({
1864
+ "prompt": decoder_prompt,
1865
+ "prompt_token_ids": decoder_prompt_ids
1866
+ })
1867
+ return mm_inputs
1868
+
1869
+ def apply(
1870
+ self,
1871
+ prompt: Union[str, list[int]],
1872
+ mm_data: MultiModalDataDict,
1873
+ hf_processor_mm_kwargs: Mapping[str, object],
1874
+ return_mm_hashes: bool = False,
1875
+ ) -> MultiModalEncDecInputs:
1876
+ """
1877
+ Process multi-modal inputs to be used in vLLM.
1878
+ The main processing steps are modified to fit encoder-decoder model:
1879
+ 1. Create encoder prompt from input prompt text.
1880
+ 2. Apply the HF processor on encoder prompt.
1881
+ 3. Copy the input prompt text as decoder prompt inputs.
1882
+ """
1883
+ encoder_prompt = self.create_encoder_prompt(prompt, mm_data)
1884
+ encoder_inputs = super().apply(
1885
+ encoder_prompt,
1886
+ mm_data,
1887
+ hf_processor_mm_kwargs,
1888
+ return_mm_hashes,
1889
+ )
1890
+
1891
+ return self._get_enc_dec_inputs(
1892
+ prompt=prompt,
1893
+ mm_data=mm_data,
1894
+ encoder_inputs=encoder_inputs,
1895
+ )