vllm-cpu-avx512vnni 0.10.2.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.

Files changed (1395) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2022 -0
  5. vllm/_ipex_ops.py +404 -0
  6. vllm/_version.py +34 -0
  7. vllm/adapter_commons/__init__.py +0 -0
  8. vllm/adapter_commons/layers.py +16 -0
  9. vllm/adapter_commons/models.py +106 -0
  10. vllm/adapter_commons/request.py +26 -0
  11. vllm/adapter_commons/utils.py +93 -0
  12. vllm/adapter_commons/worker_manager.py +39 -0
  13. vllm/assets/__init__.py +0 -0
  14. vllm/assets/audio.py +45 -0
  15. vllm/assets/base.py +41 -0
  16. vllm/assets/image.py +50 -0
  17. vllm/assets/video.py +138 -0
  18. vllm/attention/__init__.py +19 -0
  19. vllm/attention/backends/__init__.py +0 -0
  20. vllm/attention/backends/abstract.py +348 -0
  21. vllm/attention/backends/differential_flash_attn.py +935 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1499 -0
  23. vllm/attention/backends/flash_attn.py +933 -0
  24. vllm/attention/backends/flashmla.py +238 -0
  25. vllm/attention/backends/mla/__init__.py +0 -0
  26. vllm/attention/backends/mla/common.py +1310 -0
  27. vllm/attention/backends/placeholder_attn.py +340 -0
  28. vllm/attention/backends/rocm_aiter_mla.py +410 -0
  29. vllm/attention/backends/rocm_flash_attn.py +953 -0
  30. vllm/attention/backends/triton_mla.py +111 -0
  31. vllm/attention/backends/utils.py +610 -0
  32. vllm/attention/backends/xformers.py +805 -0
  33. vllm/attention/layer.py +552 -0
  34. vllm/attention/layers/__init__.py +0 -0
  35. vllm/attention/layers/chunked_local_attention.py +91 -0
  36. vllm/attention/layers/cross_attention.py +159 -0
  37. vllm/attention/layers/encoder_only_attention.py +86 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  40. vllm/attention/ops/common.py +139 -0
  41. vllm/attention/ops/flashmla.py +123 -0
  42. vllm/attention/ops/merge_attn_states.py +43 -0
  43. vllm/attention/ops/paged_attn.py +261 -0
  44. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  45. vllm/attention/ops/prefix_prefill.py +928 -0
  46. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  47. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  48. vllm/attention/ops/triton_decode_attention.py +676 -0
  49. vllm/attention/ops/triton_flash_attention.py +984 -0
  50. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  51. vllm/attention/ops/triton_unified_attention.py +854 -0
  52. vllm/attention/selector.py +243 -0
  53. vllm/attention/utils/__init__.py +0 -0
  54. vllm/attention/utils/fa_utils.py +85 -0
  55. vllm/attention/utils/kv_sharing_utils.py +33 -0
  56. vllm/beam_search.py +87 -0
  57. vllm/benchmarks/__init__.py +0 -0
  58. vllm/benchmarks/datasets.py +2651 -0
  59. vllm/benchmarks/latency.py +170 -0
  60. vllm/benchmarks/lib/__init__.py +3 -0
  61. vllm/benchmarks/lib/endpoint_request_func.py +510 -0
  62. vllm/benchmarks/lib/ready_checker.py +72 -0
  63. vllm/benchmarks/lib/utils.py +80 -0
  64. vllm/benchmarks/serve.py +1247 -0
  65. vllm/benchmarks/throughput.py +696 -0
  66. vllm/collect_env.py +823 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/activation_quant_fusion.py +193 -0
  69. vllm/compilation/backends.py +641 -0
  70. vllm/compilation/base_static_graph.py +51 -0
  71. vllm/compilation/collective_fusion.py +1190 -0
  72. vllm/compilation/compiler_interface.py +572 -0
  73. vllm/compilation/counter.py +47 -0
  74. vllm/compilation/cuda_graph.py +193 -0
  75. vllm/compilation/cuda_piecewise_backend.py +117 -0
  76. vllm/compilation/decorators.py +316 -0
  77. vllm/compilation/fix_functionalization.py +208 -0
  78. vllm/compilation/fusion.py +600 -0
  79. vllm/compilation/fusion_attn.py +303 -0
  80. vllm/compilation/fx_utils.py +84 -0
  81. vllm/compilation/inductor_pass.py +136 -0
  82. vllm/compilation/monitor.py +57 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +165 -0
  85. vllm/compilation/pass_manager.py +88 -0
  86. vllm/compilation/sequence_parallelism.py +484 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +50 -0
  89. vllm/compilation/wrapper.py +138 -0
  90. vllm/config/__init__.py +3921 -0
  91. vllm/config/cache.py +214 -0
  92. vllm/config/compilation.py +580 -0
  93. vllm/config/kv_events.py +50 -0
  94. vllm/config/kv_transfer.py +111 -0
  95. vllm/config/load.py +113 -0
  96. vllm/config/lora.py +132 -0
  97. vllm/config/parallel.py +446 -0
  98. vllm/config/scheduler.py +304 -0
  99. vllm/config/utils.py +29 -0
  100. vllm/connections.py +174 -0
  101. vllm/core/__init__.py +0 -0
  102. vllm/core/block/__init__.py +0 -0
  103. vllm/core/block/block_table.py +399 -0
  104. vllm/core/block/common.py +371 -0
  105. vllm/core/block/cpu_gpu_block_allocator.py +439 -0
  106. vllm/core/block/interfaces.py +319 -0
  107. vllm/core/block/naive_block.py +466 -0
  108. vllm/core/block/prefix_caching_block.py +1135 -0
  109. vllm/core/block/utils.py +28 -0
  110. vllm/core/block_manager.py +523 -0
  111. vllm/core/evictor.py +157 -0
  112. vllm/core/interfaces.py +139 -0
  113. vllm/core/placeholder_block_space_manager.py +103 -0
  114. vllm/core/scheduler.py +2028 -0
  115. vllm/device_allocator/__init__.py +0 -0
  116. vllm/device_allocator/cumem.py +286 -0
  117. vllm/distributed/__init__.py +6 -0
  118. vllm/distributed/communication_op.py +41 -0
  119. vllm/distributed/device_communicators/__init__.py +0 -0
  120. vllm/distributed/device_communicators/all2all.py +259 -0
  121. vllm/distributed/device_communicators/all_reduce_utils.py +292 -0
  122. vllm/distributed/device_communicators/base_device_communicator.py +277 -0
  123. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  124. vllm/distributed/device_communicators/cuda_communicator.py +294 -0
  125. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  126. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  127. vllm/distributed/device_communicators/pynccl.py +290 -0
  128. vllm/distributed/device_communicators/pynccl_wrapper.py +382 -0
  129. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  130. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  131. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  132. vllm/distributed/device_communicators/symm_mem.py +136 -0
  133. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  134. vllm/distributed/device_communicators/xpu_communicator.py +69 -0
  135. vllm/distributed/eplb/__init__.py +8 -0
  136. vllm/distributed/eplb/eplb_state.py +619 -0
  137. vllm/distributed/eplb/rebalance_algo.py +234 -0
  138. vllm/distributed/eplb/rebalance_execute.py +424 -0
  139. vllm/distributed/kv_events.py +362 -0
  140. vllm/distributed/kv_transfer/README.md +29 -0
  141. vllm/distributed/kv_transfer/__init__.py +13 -0
  142. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  143. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  145. vllm/distributed/kv_transfer/kv_connector/factory.py +108 -0
  146. vllm/distributed/kv_transfer/kv_connector/utils.py +246 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/base.py +356 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +266 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1319 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +484 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +542 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +266 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +414 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  158. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  159. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  160. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  161. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  162. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  163. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  164. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  165. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  166. vllm/distributed/parallel_state.py +1489 -0
  167. vllm/distributed/tpu_distributed_utils.py +178 -0
  168. vllm/distributed/utils.py +536 -0
  169. vllm/engine/__init__.py +0 -0
  170. vllm/engine/arg_utils.py +1857 -0
  171. vllm/engine/async_llm_engine.py +1044 -0
  172. vllm/engine/async_timeout.py +173 -0
  173. vllm/engine/llm_engine.py +1849 -0
  174. vllm/engine/metrics.py +577 -0
  175. vllm/engine/metrics_types.py +84 -0
  176. vllm/engine/multiprocessing/__init__.py +145 -0
  177. vllm/engine/multiprocessing/client.py +643 -0
  178. vllm/engine/multiprocessing/engine.py +470 -0
  179. vllm/engine/output_processor/__init__.py +0 -0
  180. vllm/engine/output_processor/interfaces.py +61 -0
  181. vllm/engine/output_processor/single_step.py +145 -0
  182. vllm/engine/output_processor/stop_checker.py +131 -0
  183. vllm/engine/output_processor/util.py +28 -0
  184. vllm/engine/protocol.py +343 -0
  185. vllm/entrypoints/__init__.py +0 -0
  186. vllm/entrypoints/api_server.py +178 -0
  187. vllm/entrypoints/chat_utils.py +1535 -0
  188. vllm/entrypoints/cli/__init__.py +12 -0
  189. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  190. vllm/entrypoints/cli/benchmark/base.py +25 -0
  191. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  192. vllm/entrypoints/cli/benchmark/main.py +58 -0
  193. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  194. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  195. vllm/entrypoints/cli/collect_env.py +36 -0
  196. vllm/entrypoints/cli/main.py +60 -0
  197. vllm/entrypoints/cli/openai.py +214 -0
  198. vllm/entrypoints/cli/run_batch.py +69 -0
  199. vllm/entrypoints/cli/serve.py +232 -0
  200. vllm/entrypoints/cli/types.py +29 -0
  201. vllm/entrypoints/constants.py +10 -0
  202. vllm/entrypoints/context.py +444 -0
  203. vllm/entrypoints/harmony_utils.py +431 -0
  204. vllm/entrypoints/launcher.py +168 -0
  205. vllm/entrypoints/llm.py +1579 -0
  206. vllm/entrypoints/logger.py +79 -0
  207. vllm/entrypoints/openai/__init__.py +0 -0
  208. vllm/entrypoints/openai/api_server.py +2011 -0
  209. vllm/entrypoints/openai/cli_args.py +281 -0
  210. vllm/entrypoints/openai/logits_processors.py +90 -0
  211. vllm/entrypoints/openai/protocol.py +2590 -0
  212. vllm/entrypoints/openai/run_batch.py +497 -0
  213. vllm/entrypoints/openai/serving_chat.py +1591 -0
  214. vllm/entrypoints/openai/serving_classification.py +176 -0
  215. vllm/entrypoints/openai/serving_completion.py +688 -0
  216. vllm/entrypoints/openai/serving_embedding.py +632 -0
  217. vllm/entrypoints/openai/serving_engine.py +996 -0
  218. vllm/entrypoints/openai/serving_models.py +288 -0
  219. vllm/entrypoints/openai/serving_pooling.py +277 -0
  220. vllm/entrypoints/openai/serving_responses.py +1690 -0
  221. vllm/entrypoints/openai/serving_score.py +479 -0
  222. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  223. vllm/entrypoints/openai/serving_transcription.py +136 -0
  224. vllm/entrypoints/openai/speech_to_text.py +388 -0
  225. vllm/entrypoints/openai/tool_parsers/__init__.py +51 -0
  226. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  227. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  228. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  229. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  230. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  231. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  232. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +418 -0
  233. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  234. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  235. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  236. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  237. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  238. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  239. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  240. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  241. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +73 -0
  242. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  243. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  244. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  245. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  246. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  247. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  248. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  249. vllm/entrypoints/renderer.py +395 -0
  250. vllm/entrypoints/score_utils.py +232 -0
  251. vllm/entrypoints/ssl.py +75 -0
  252. vllm/entrypoints/tool.py +139 -0
  253. vllm/entrypoints/tool_server.py +195 -0
  254. vllm/entrypoints/utils.py +328 -0
  255. vllm/env_override.py +23 -0
  256. vllm/envs.py +1354 -0
  257. vllm/executor/__init__.py +0 -0
  258. vllm/executor/executor_base.py +378 -0
  259. vllm/executor/mp_distributed_executor.py +244 -0
  260. vllm/executor/msgspec_utils.py +35 -0
  261. vllm/executor/multiproc_worker_utils.py +279 -0
  262. vllm/executor/ray_distributed_executor.py +699 -0
  263. vllm/executor/ray_utils.py +410 -0
  264. vllm/executor/uniproc_executor.py +152 -0
  265. vllm/forward_context.py +273 -0
  266. vllm/inputs/__init__.py +44 -0
  267. vllm/inputs/data.py +356 -0
  268. vllm/inputs/parse.py +151 -0
  269. vllm/inputs/preprocess.py +973 -0
  270. vllm/inputs/registry.py +251 -0
  271. vllm/logger.py +229 -0
  272. vllm/logging_utils/__init__.py +8 -0
  273. vllm/logging_utils/dump_input.py +81 -0
  274. vllm/logging_utils/formatter.py +79 -0
  275. vllm/logits_process.py +119 -0
  276. vllm/logprobs.py +28 -0
  277. vllm/lora/__init__.py +0 -0
  278. vllm/lora/layers/__init__.py +34 -0
  279. vllm/lora/layers/base.py +69 -0
  280. vllm/lora/layers/base_linear.py +184 -0
  281. vllm/lora/layers/column_parallel_linear.py +622 -0
  282. vllm/lora/layers/logits_processor.py +247 -0
  283. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  284. vllm/lora/layers/replicated_linear.py +61 -0
  285. vllm/lora/layers/row_parallel_linear.py +201 -0
  286. vllm/lora/layers/utils.py +60 -0
  287. vllm/lora/layers/vocal_parallel_embedding.py +172 -0
  288. vllm/lora/lora.py +199 -0
  289. vllm/lora/models.py +792 -0
  290. vllm/lora/ops/__init__.py +0 -0
  291. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  292. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  293. vllm/lora/ops/torch_ops/__init__.py +16 -0
  294. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  295. vllm/lora/ops/triton_ops/__init__.py +12 -0
  296. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  297. vllm/lora/ops/triton_ops/lora_expand_op.py +291 -0
  298. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  299. vllm/lora/ops/triton_ops/lora_shrink_op.py +245 -0
  300. vllm/lora/ops/triton_ops/utils.py +126 -0
  301. vllm/lora/ops/xla_ops/__init__.py +7 -0
  302. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  303. vllm/lora/peft_helper.py +127 -0
  304. vllm/lora/punica_wrapper/__init__.py +10 -0
  305. vllm/lora/punica_wrapper/punica_base.py +458 -0
  306. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  307. vllm/lora/punica_wrapper/punica_gpu.py +279 -0
  308. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  309. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  310. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  311. vllm/lora/punica_wrapper/utils.py +136 -0
  312. vllm/lora/request.py +99 -0
  313. vllm/lora/resolver.py +85 -0
  314. vllm/lora/utils.py +246 -0
  315. vllm/lora/worker_manager.py +256 -0
  316. vllm/model_executor/__init__.py +16 -0
  317. vllm/model_executor/custom_op.py +194 -0
  318. vllm/model_executor/layers/__init__.py +0 -0
  319. vllm/model_executor/layers/activation.py +575 -0
  320. vllm/model_executor/layers/attention_layer_base.py +23 -0
  321. vllm/model_executor/layers/fla/__init__.py +8 -0
  322. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  323. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  324. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  325. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  326. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  327. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  328. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  329. vllm/model_executor/layers/fla/ops/index.py +39 -0
  330. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  331. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  332. vllm/model_executor/layers/fla/ops/op.py +39 -0
  333. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  334. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  335. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  336. vllm/model_executor/layers/fused_moe/__init__.py +80 -0
  337. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +304 -0
  338. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +164 -0
  339. vllm/model_executor/layers/fused_moe/config.py +497 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  560. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +297 -0
  561. vllm/model_executor/layers/fused_moe/cutlass_moe.py +996 -0
  562. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +370 -0
  563. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  564. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +280 -0
  565. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +229 -0
  566. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +243 -0
  567. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +97 -0
  568. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1042 -0
  569. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +240 -0
  570. vllm/model_executor/layers/fused_moe/fused_moe.py +2081 -0
  571. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +247 -0
  572. vllm/model_executor/layers/fused_moe/layer.py +1951 -0
  573. vllm/model_executor/layers/fused_moe/modular_kernel.py +892 -0
  574. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  575. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  576. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  577. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  578. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +321 -0
  579. vllm/model_executor/layers/fused_moe/prepare_finalize.py +72 -0
  580. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +431 -0
  581. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  582. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  583. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +171 -0
  584. vllm/model_executor/layers/fused_moe/trtllm_moe.py +197 -0
  585. vllm/model_executor/layers/fused_moe/utils.py +270 -0
  586. vllm/model_executor/layers/layernorm.py +381 -0
  587. vllm/model_executor/layers/lightning_attn.py +661 -0
  588. vllm/model_executor/layers/linear.py +1567 -0
  589. vllm/model_executor/layers/logits_processor.py +199 -0
  590. vllm/model_executor/layers/mamba/__init__.py +0 -0
  591. vllm/model_executor/layers/mamba/abstract.py +45 -0
  592. vllm/model_executor/layers/mamba/linear_attn.py +432 -0
  593. vllm/model_executor/layers/mamba/mamba2_metadata.py +186 -0
  594. vllm/model_executor/layers/mamba/mamba_mixer.py +517 -0
  595. vllm/model_executor/layers/mamba/mamba_mixer2.py +803 -0
  596. vllm/model_executor/layers/mamba/mamba_utils.py +202 -0
  597. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  598. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +982 -0
  599. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  600. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  601. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  602. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +574 -0
  603. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  604. vllm/model_executor/layers/mamba/ops/ssd_combined.py +248 -0
  605. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +248 -0
  606. vllm/model_executor/layers/mamba/short_conv.py +270 -0
  607. vllm/model_executor/layers/mla.py +158 -0
  608. vllm/model_executor/layers/pooler.py +732 -0
  609. vllm/model_executor/layers/quantization/__init__.py +157 -0
  610. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  611. vllm/model_executor/layers/quantization/awq.py +228 -0
  612. vllm/model_executor/layers/quantization/awq_marlin.py +548 -0
  613. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  614. vllm/model_executor/layers/quantization/base_config.py +164 -0
  615. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  616. vllm/model_executor/layers/quantization/bitsandbytes.py +621 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +795 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1651 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  625. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +161 -0
  626. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  627. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  628. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  629. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +156 -0
  630. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  631. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  632. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +227 -0
  633. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +135 -0
  634. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +21 -0
  635. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  636. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  637. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  638. vllm/model_executor/layers/quantization/deepgemm.py +81 -0
  639. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  640. vllm/model_executor/layers/quantization/experts_int8.py +215 -0
  641. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  642. vllm/model_executor/layers/quantization/fp8.py +1179 -0
  643. vllm/model_executor/layers/quantization/gguf.py +597 -0
  644. vllm/model_executor/layers/quantization/gptq.py +300 -0
  645. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  646. vllm/model_executor/layers/quantization/gptq_marlin.py +700 -0
  647. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  648. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  649. vllm/model_executor/layers/quantization/inc.py +61 -0
  650. vllm/model_executor/layers/quantization/input_quant_fp8.py +103 -0
  651. vllm/model_executor/layers/quantization/ipex_quant.py +410 -0
  652. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  653. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  654. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  655. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  656. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  657. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  658. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  659. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  660. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  661. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  662. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  663. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  664. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  665. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +163 -0
  666. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  667. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  668. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  669. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  670. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  671. vllm/model_executor/layers/quantization/modelopt.py +1548 -0
  672. vllm/model_executor/layers/quantization/moe_wna16.py +473 -0
  673. vllm/model_executor/layers/quantization/mxfp4.py +951 -0
  674. vllm/model_executor/layers/quantization/petit.py +306 -0
  675. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  676. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  677. vllm/model_executor/layers/quantization/quark/quark.py +431 -0
  678. vllm/model_executor/layers/quantization/quark/quark_moe.py +434 -0
  679. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  680. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  681. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +112 -0
  682. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  683. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  684. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  685. vllm/model_executor/layers/quantization/rtn.py +456 -0
  686. vllm/model_executor/layers/quantization/schema.py +86 -0
  687. vllm/model_executor/layers/quantization/torchao.py +214 -0
  688. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  689. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  690. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  691. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  902. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  903. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +85 -0
  904. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +258 -0
  905. vllm/model_executor/layers/quantization/utils/fp8_utils.py +795 -0
  906. vllm/model_executor/layers/quantization/utils/gptq_utils.py +96 -0
  907. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  908. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  909. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  910. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  911. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  912. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  913. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  914. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  915. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +132 -0
  916. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  917. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  918. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  919. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  920. vllm/model_executor/layers/quantization/utils/quant_utils.py +627 -0
  921. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  922. vllm/model_executor/layers/resampler.py +270 -0
  923. vllm/model_executor/layers/rotary_embedding/__init__.py +190 -0
  924. vllm/model_executor/layers/rotary_embedding/base.py +156 -0
  925. vllm/model_executor/layers/rotary_embedding/common.py +105 -0
  926. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +140 -0
  927. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  928. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  929. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  930. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  931. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  932. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  933. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  934. vllm/model_executor/layers/rotary_embedding/mrope.py +1140 -0
  935. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  936. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  937. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  938. vllm/model_executor/layers/sampler.py +1198 -0
  939. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  940. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  941. vllm/model_executor/layers/utils.py +196 -0
  942. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  943. vllm/model_executor/model_loader/__init__.py +138 -0
  944. vllm/model_executor/model_loader/base_loader.py +52 -0
  945. vllm/model_executor/model_loader/bitsandbytes_loader.py +787 -0
  946. vllm/model_executor/model_loader/default_loader.py +278 -0
  947. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  948. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  949. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  950. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  951. vllm/model_executor/model_loader/tensorizer.py +743 -0
  952. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  953. vllm/model_executor/model_loader/tpu.py +114 -0
  954. vllm/model_executor/model_loader/utils.py +271 -0
  955. vllm/model_executor/model_loader/weight_utils.py +946 -0
  956. vllm/model_executor/models/__init__.py +30 -0
  957. vllm/model_executor/models/adapters.py +542 -0
  958. vllm/model_executor/models/aimv2.py +246 -0
  959. vllm/model_executor/models/apertus.py +582 -0
  960. vllm/model_executor/models/arcee.py +423 -0
  961. vllm/model_executor/models/arctic.py +560 -0
  962. vllm/model_executor/models/aria.py +662 -0
  963. vllm/model_executor/models/aya_vision.py +470 -0
  964. vllm/model_executor/models/baichuan.py +475 -0
  965. vllm/model_executor/models/bailing_moe.py +529 -0
  966. vllm/model_executor/models/bamba.py +582 -0
  967. vllm/model_executor/models/bart.py +1343 -0
  968. vllm/model_executor/models/bert.py +613 -0
  969. vllm/model_executor/models/bert_with_rope.py +687 -0
  970. vllm/model_executor/models/blip.py +339 -0
  971. vllm/model_executor/models/blip2.py +716 -0
  972. vllm/model_executor/models/bloom.py +374 -0
  973. vllm/model_executor/models/chameleon.py +1141 -0
  974. vllm/model_executor/models/chatglm.py +479 -0
  975. vllm/model_executor/models/clip.py +407 -0
  976. vllm/model_executor/models/cohere2_vision.py +484 -0
  977. vllm/model_executor/models/commandr.py +467 -0
  978. vllm/model_executor/models/config.py +434 -0
  979. vllm/model_executor/models/constant_size_cache.py +137 -0
  980. vllm/model_executor/models/dbrx.py +473 -0
  981. vllm/model_executor/models/deepseek.py +491 -0
  982. vllm/model_executor/models/deepseek_eagle.py +241 -0
  983. vllm/model_executor/models/deepseek_mtp.py +282 -0
  984. vllm/model_executor/models/deepseek_v2.py +1058 -0
  985. vllm/model_executor/models/deepseek_vl2.py +661 -0
  986. vllm/model_executor/models/donut.py +387 -0
  987. vllm/model_executor/models/dots1.py +547 -0
  988. vllm/model_executor/models/ernie45.py +43 -0
  989. vllm/model_executor/models/ernie45_moe.py +608 -0
  990. vllm/model_executor/models/ernie45_vl.py +1510 -0
  991. vllm/model_executor/models/ernie45_vl_moe.py +728 -0
  992. vllm/model_executor/models/ernie_mtp.py +287 -0
  993. vllm/model_executor/models/exaone.py +552 -0
  994. vllm/model_executor/models/exaone4.py +535 -0
  995. vllm/model_executor/models/fairseq2_llama.py +154 -0
  996. vllm/model_executor/models/falcon.py +511 -0
  997. vllm/model_executor/models/falcon_h1.py +739 -0
  998. vllm/model_executor/models/florence2.py +1107 -0
  999. vllm/model_executor/models/fuyu.py +401 -0
  1000. vllm/model_executor/models/gemma.py +428 -0
  1001. vllm/model_executor/models/gemma2.py +425 -0
  1002. vllm/model_executor/models/gemma3.py +542 -0
  1003. vllm/model_executor/models/gemma3_mm.py +723 -0
  1004. vllm/model_executor/models/gemma3n.py +830 -0
  1005. vllm/model_executor/models/gemma3n_mm.py +767 -0
  1006. vllm/model_executor/models/glm.py +23 -0
  1007. vllm/model_executor/models/glm4.py +305 -0
  1008. vllm/model_executor/models/glm4_1v.py +1669 -0
  1009. vllm/model_executor/models/glm4_moe.py +703 -0
  1010. vllm/model_executor/models/glm4_moe_mtp.py +306 -0
  1011. vllm/model_executor/models/glm4v.py +654 -0
  1012. vllm/model_executor/models/gpt2.py +383 -0
  1013. vllm/model_executor/models/gpt_bigcode.py +346 -0
  1014. vllm/model_executor/models/gpt_j.py +340 -0
  1015. vllm/model_executor/models/gpt_neox.py +333 -0
  1016. vllm/model_executor/models/gpt_oss.py +687 -0
  1017. vllm/model_executor/models/granite.py +498 -0
  1018. vllm/model_executor/models/granite_speech.py +799 -0
  1019. vllm/model_executor/models/granitemoe.py +541 -0
  1020. vllm/model_executor/models/granitemoehybrid.py +684 -0
  1021. vllm/model_executor/models/granitemoeshared.py +342 -0
  1022. vllm/model_executor/models/gritlm.py +262 -0
  1023. vllm/model_executor/models/grok1.py +550 -0
  1024. vllm/model_executor/models/h2ovl.py +536 -0
  1025. vllm/model_executor/models/hunyuan_v1.py +937 -0
  1026. vllm/model_executor/models/hyperclovax_vision.py +1206 -0
  1027. vllm/model_executor/models/idefics2_vision_model.py +416 -0
  1028. vllm/model_executor/models/idefics3.py +758 -0
  1029. vllm/model_executor/models/interfaces.py +854 -0
  1030. vllm/model_executor/models/interfaces_base.py +195 -0
  1031. vllm/model_executor/models/intern_vit.py +481 -0
  1032. vllm/model_executor/models/internlm2.py +453 -0
  1033. vllm/model_executor/models/internlm2_ve.py +148 -0
  1034. vllm/model_executor/models/interns1.py +832 -0
  1035. vllm/model_executor/models/interns1_vit.py +418 -0
  1036. vllm/model_executor/models/internvl.py +1423 -0
  1037. vllm/model_executor/models/jais.py +374 -0
  1038. vllm/model_executor/models/jamba.py +630 -0
  1039. vllm/model_executor/models/jina_vl.py +144 -0
  1040. vllm/model_executor/models/keye.py +1684 -0
  1041. vllm/model_executor/models/keye_vl1_5.py +601 -0
  1042. vllm/model_executor/models/kimi_vl.py +620 -0
  1043. vllm/model_executor/models/lfm2.py +558 -0
  1044. vllm/model_executor/models/llama.py +671 -0
  1045. vllm/model_executor/models/llama4.py +732 -0
  1046. vllm/model_executor/models/llama4_eagle.py +241 -0
  1047. vllm/model_executor/models/llama_eagle.py +171 -0
  1048. vllm/model_executor/models/llama_eagle3.py +292 -0
  1049. vllm/model_executor/models/llava.py +872 -0
  1050. vllm/model_executor/models/llava_next.py +572 -0
  1051. vllm/model_executor/models/llava_next_video.py +479 -0
  1052. vllm/model_executor/models/llava_onevision.py +945 -0
  1053. vllm/model_executor/models/mamba.py +310 -0
  1054. vllm/model_executor/models/mamba2.py +346 -0
  1055. vllm/model_executor/models/mamba_cache.py +83 -0
  1056. vllm/model_executor/models/medusa.py +219 -0
  1057. vllm/model_executor/models/midashenglm.py +788 -0
  1058. vllm/model_executor/models/mimo.py +191 -0
  1059. vllm/model_executor/models/mimo_mtp.py +273 -0
  1060. vllm/model_executor/models/minicpm.py +593 -0
  1061. vllm/model_executor/models/minicpm3.py +230 -0
  1062. vllm/model_executor/models/minicpm_eagle.py +391 -0
  1063. vllm/model_executor/models/minicpmo.py +804 -0
  1064. vllm/model_executor/models/minicpmv.py +1786 -0
  1065. vllm/model_executor/models/minimax_cache.py +36 -0
  1066. vllm/model_executor/models/minimax_text_01.py +1027 -0
  1067. vllm/model_executor/models/minimax_vl_01.py +431 -0
  1068. vllm/model_executor/models/mistral3.py +628 -0
  1069. vllm/model_executor/models/mixtral.py +494 -0
  1070. vllm/model_executor/models/mllama.py +1697 -0
  1071. vllm/model_executor/models/mllama4.py +1079 -0
  1072. vllm/model_executor/models/mlp_speculator.py +206 -0
  1073. vllm/model_executor/models/modernbert.py +374 -0
  1074. vllm/model_executor/models/module_mapping.py +72 -0
  1075. vllm/model_executor/models/molmo.py +1569 -0
  1076. vllm/model_executor/models/moonvit.py +663 -0
  1077. vllm/model_executor/models/motif.py +345 -0
  1078. vllm/model_executor/models/mpt.py +332 -0
  1079. vllm/model_executor/models/nano_nemotron_vl.py +1395 -0
  1080. vllm/model_executor/models/nemotron.py +509 -0
  1081. vllm/model_executor/models/nemotron_h.py +633 -0
  1082. vllm/model_executor/models/nemotron_nas.py +484 -0
  1083. vllm/model_executor/models/nemotron_vl.py +655 -0
  1084. vllm/model_executor/models/nvlm_d.py +203 -0
  1085. vllm/model_executor/models/olmo.py +406 -0
  1086. vllm/model_executor/models/olmo2.py +428 -0
  1087. vllm/model_executor/models/olmoe.py +485 -0
  1088. vllm/model_executor/models/opt.py +413 -0
  1089. vllm/model_executor/models/orion.py +350 -0
  1090. vllm/model_executor/models/ovis.py +572 -0
  1091. vllm/model_executor/models/ovis2_5.py +644 -0
  1092. vllm/model_executor/models/paligemma.py +414 -0
  1093. vllm/model_executor/models/persimmon.py +345 -0
  1094. vllm/model_executor/models/phi.py +357 -0
  1095. vllm/model_executor/models/phi3.py +19 -0
  1096. vllm/model_executor/models/phi3v.py +701 -0
  1097. vllm/model_executor/models/phi4_multimodal.py +1478 -0
  1098. vllm/model_executor/models/phi4flash.py +737 -0
  1099. vllm/model_executor/models/phi4mm.py +1281 -0
  1100. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1101. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1102. vllm/model_executor/models/phimoe.py +681 -0
  1103. vllm/model_executor/models/pixtral.py +1348 -0
  1104. vllm/model_executor/models/plamo2.py +1126 -0
  1105. vllm/model_executor/models/qwen.py +363 -0
  1106. vllm/model_executor/models/qwen2.py +526 -0
  1107. vllm/model_executor/models/qwen2_5_omni_thinker.py +985 -0
  1108. vllm/model_executor/models/qwen2_5_vl.py +1256 -0
  1109. vllm/model_executor/models/qwen2_audio.py +492 -0
  1110. vllm/model_executor/models/qwen2_moe.py +558 -0
  1111. vllm/model_executor/models/qwen2_rm.py +122 -0
  1112. vllm/model_executor/models/qwen2_vl.py +1512 -0
  1113. vllm/model_executor/models/qwen3.py +344 -0
  1114. vllm/model_executor/models/qwen3_moe.py +704 -0
  1115. vllm/model_executor/models/qwen3_next.py +1298 -0
  1116. vllm/model_executor/models/qwen3_next_mtp.py +285 -0
  1117. vllm/model_executor/models/qwen_vl.py +795 -0
  1118. vllm/model_executor/models/registry.py +891 -0
  1119. vllm/model_executor/models/roberta.py +252 -0
  1120. vllm/model_executor/models/rvl.py +103 -0
  1121. vllm/model_executor/models/seed_oss.py +488 -0
  1122. vllm/model_executor/models/siglip.py +524 -0
  1123. vllm/model_executor/models/siglip2navit.py +688 -0
  1124. vllm/model_executor/models/skyworkr1v.py +914 -0
  1125. vllm/model_executor/models/smolvlm.py +44 -0
  1126. vllm/model_executor/models/solar.py +506 -0
  1127. vllm/model_executor/models/stablelm.py +344 -0
  1128. vllm/model_executor/models/starcoder2.py +357 -0
  1129. vllm/model_executor/models/step3_text.py +521 -0
  1130. vllm/model_executor/models/step3_vl.py +1091 -0
  1131. vllm/model_executor/models/swin.py +475 -0
  1132. vllm/model_executor/models/tarsier.py +649 -0
  1133. vllm/model_executor/models/telechat2.py +151 -0
  1134. vllm/model_executor/models/teleflm.py +79 -0
  1135. vllm/model_executor/models/terratorch.py +294 -0
  1136. vllm/model_executor/models/transformers.py +883 -0
  1137. vllm/model_executor/models/ultravox.py +667 -0
  1138. vllm/model_executor/models/utils.py +770 -0
  1139. vllm/model_executor/models/vision.py +125 -0
  1140. vllm/model_executor/models/voxtral.py +789 -0
  1141. vllm/model_executor/models/whisper.py +966 -0
  1142. vllm/model_executor/models/zamba2.py +1056 -0
  1143. vllm/model_executor/parameter.py +599 -0
  1144. vllm/model_executor/sampling_metadata.py +597 -0
  1145. vllm/model_executor/utils.py +97 -0
  1146. vllm/model_executor/warmup/__init__.py +0 -0
  1147. vllm/model_executor/warmup/deep_gemm_warmup.py +223 -0
  1148. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1149. vllm/multimodal/__init__.py +35 -0
  1150. vllm/multimodal/audio.py +116 -0
  1151. vllm/multimodal/base.py +219 -0
  1152. vllm/multimodal/cache.py +507 -0
  1153. vllm/multimodal/hasher.py +110 -0
  1154. vllm/multimodal/image.py +130 -0
  1155. vllm/multimodal/inputs.py +979 -0
  1156. vllm/multimodal/parse.py +496 -0
  1157. vllm/multimodal/processing.py +1921 -0
  1158. vllm/multimodal/profiling.py +313 -0
  1159. vllm/multimodal/registry.py +375 -0
  1160. vllm/multimodal/utils.py +754 -0
  1161. vllm/multimodal/video.py +312 -0
  1162. vllm/outputs.py +517 -0
  1163. vllm/platforms/__init__.py +263 -0
  1164. vllm/platforms/cpu.py +353 -0
  1165. vllm/platforms/cuda.py +731 -0
  1166. vllm/platforms/interface.py +599 -0
  1167. vllm/platforms/rocm.py +504 -0
  1168. vllm/platforms/tpu.py +236 -0
  1169. vllm/platforms/xpu.py +243 -0
  1170. vllm/plugins/__init__.py +72 -0
  1171. vllm/plugins/io_processors/__init__.py +68 -0
  1172. vllm/plugins/io_processors/interface.py +67 -0
  1173. vllm/plugins/lora_resolvers/README.md +16 -0
  1174. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1175. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1176. vllm/pooling_params.py +183 -0
  1177. vllm/profiler/__init__.py +0 -0
  1178. vllm/profiler/layerwise_profile.py +375 -0
  1179. vllm/profiler/utils.py +148 -0
  1180. vllm/py.typed +2 -0
  1181. vllm/ray/__init__.py +0 -0
  1182. vllm/ray/lazy_utils.py +22 -0
  1183. vllm/ray/ray_env.py +72 -0
  1184. vllm/reasoning/__init__.py +25 -0
  1185. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1186. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1187. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1188. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1189. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1190. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1191. vllm/reasoning/mistral_reasoning_parser.py +47 -0
  1192. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1193. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1194. vllm/sampling_params.py +577 -0
  1195. vllm/scalar_type.py +349 -0
  1196. vllm/scripts.py +15 -0
  1197. vllm/sequence.py +1465 -0
  1198. vllm/tasks.py +11 -0
  1199. vllm/test_utils.py +130 -0
  1200. vllm/third_party/__init__.py +0 -0
  1201. vllm/third_party/pynvml.py +6140 -0
  1202. vllm/tracing.py +136 -0
  1203. vllm/transformers_utils/__init__.py +24 -0
  1204. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1205. vllm/transformers_utils/chat_templates/registry.py +71 -0
  1206. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1207. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1208. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1209. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1210. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1211. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1212. vllm/transformers_utils/config.py +1043 -0
  1213. vllm/transformers_utils/config_parser_base.py +20 -0
  1214. vllm/transformers_utils/configs/__init__.py +55 -0
  1215. vllm/transformers_utils/configs/arctic.py +207 -0
  1216. vllm/transformers_utils/configs/chatglm.py +72 -0
  1217. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1218. vllm/transformers_utils/configs/eagle.py +84 -0
  1219. vllm/transformers_utils/configs/falcon.py +90 -0
  1220. vllm/transformers_utils/configs/jais.py +238 -0
  1221. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1222. vllm/transformers_utils/configs/medusa.py +63 -0
  1223. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1224. vllm/transformers_utils/configs/mistral.py +165 -0
  1225. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1226. vllm/transformers_utils/configs/moonvit.py +33 -0
  1227. vllm/transformers_utils/configs/nemotron.py +205 -0
  1228. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1229. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1230. vllm/transformers_utils/configs/ovis.py +176 -0
  1231. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1232. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1233. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1234. vllm/transformers_utils/configs/speculators/base.py +91 -0
  1235. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1236. vllm/transformers_utils/configs/ultravox.py +120 -0
  1237. vllm/transformers_utils/detokenizer.py +169 -0
  1238. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1239. vllm/transformers_utils/dynamic_module.py +60 -0
  1240. vllm/transformers_utils/processor.py +245 -0
  1241. vllm/transformers_utils/processors/__init__.py +16 -0
  1242. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1243. vllm/transformers_utils/processors/ovis.py +420 -0
  1244. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1245. vllm/transformers_utils/runai_utils.py +99 -0
  1246. vllm/transformers_utils/s3_utils.py +90 -0
  1247. vllm/transformers_utils/tokenizer.py +293 -0
  1248. vllm/transformers_utils/tokenizer_base.py +149 -0
  1249. vllm/transformers_utils/tokenizer_group.py +132 -0
  1250. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1251. vllm/transformers_utils/tokenizers/mistral.py +520 -0
  1252. vllm/transformers_utils/utils.py +99 -0
  1253. vllm/triton_utils/__init__.py +16 -0
  1254. vllm/triton_utils/importing.py +95 -0
  1255. vllm/usage/__init__.py +0 -0
  1256. vllm/usage/usage_lib.py +259 -0
  1257. vllm/utils/__init__.py +3438 -0
  1258. vllm/utils/deep_gemm.py +212 -0
  1259. vllm/utils/flashinfer.py +372 -0
  1260. vllm/utils/jsontree.py +90 -0
  1261. vllm/utils/tensor_schema.py +236 -0
  1262. vllm/v1/__init__.py +0 -0
  1263. vllm/v1/attention/__init__.py +0 -0
  1264. vllm/v1/attention/backends/__init__.py +0 -0
  1265. vllm/v1/attention/backends/cpu_attn.py +922 -0
  1266. vllm/v1/attention/backends/flash_attn.py +800 -0
  1267. vllm/v1/attention/backends/flashinfer.py +1128 -0
  1268. vllm/v1/attention/backends/flex_attention.py +796 -0
  1269. vllm/v1/attention/backends/gdn_attn.py +320 -0
  1270. vllm/v1/attention/backends/linear_attn.py +68 -0
  1271. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1272. vllm/v1/attention/backends/mamba2_attn.py +224 -0
  1273. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1274. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1275. vllm/v1/attention/backends/mla/common.py +1608 -0
  1276. vllm/v1/attention/backends/mla/cutlass_mla.py +301 -0
  1277. vllm/v1/attention/backends/mla/flashattn_mla.py +273 -0
  1278. vllm/v1/attention/backends/mla/flashinfer_mla.py +110 -0
  1279. vllm/v1/attention/backends/mla/flashmla.py +213 -0
  1280. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1281. vllm/v1/attention/backends/mla/triton_mla.py +175 -0
  1282. vllm/v1/attention/backends/pallas.py +413 -0
  1283. vllm/v1/attention/backends/rocm_aiter_fa.py +548 -0
  1284. vllm/v1/attention/backends/short_conv_attn.py +82 -0
  1285. vllm/v1/attention/backends/tree_attn.py +450 -0
  1286. vllm/v1/attention/backends/triton_attn.py +430 -0
  1287. vllm/v1/attention/backends/utils.py +834 -0
  1288. vllm/v1/attention/backends/xformers.py +437 -0
  1289. vllm/v1/core/__init__.py +0 -0
  1290. vllm/v1/core/block_pool.py +330 -0
  1291. vllm/v1/core/encoder_cache_manager.py +333 -0
  1292. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1293. vllm/v1/core/kv_cache_manager.py +398 -0
  1294. vllm/v1/core/kv_cache_utils.py +1169 -0
  1295. vllm/v1/core/sched/__init__.py +0 -0
  1296. vllm/v1/core/sched/async_scheduler.py +47 -0
  1297. vllm/v1/core/sched/interface.py +158 -0
  1298. vllm/v1/core/sched/output.py +162 -0
  1299. vllm/v1/core/sched/request_queue.py +224 -0
  1300. vllm/v1/core/sched/scheduler.py +1287 -0
  1301. vllm/v1/core/sched/utils.py +69 -0
  1302. vllm/v1/core/single_type_kv_cache_manager.py +670 -0
  1303. vllm/v1/cudagraph_dispatcher.py +121 -0
  1304. vllm/v1/engine/__init__.py +202 -0
  1305. vllm/v1/engine/async_llm.py +757 -0
  1306. vllm/v1/engine/coordinator.py +357 -0
  1307. vllm/v1/engine/core.py +1245 -0
  1308. vllm/v1/engine/core_client.py +1333 -0
  1309. vllm/v1/engine/detokenizer.py +300 -0
  1310. vllm/v1/engine/exceptions.py +17 -0
  1311. vllm/v1/engine/llm_engine.py +332 -0
  1312. vllm/v1/engine/logprobs.py +201 -0
  1313. vllm/v1/engine/output_processor.py +558 -0
  1314. vllm/v1/engine/parallel_sampling.py +133 -0
  1315. vllm/v1/engine/processor.py +524 -0
  1316. vllm/v1/engine/utils.py +857 -0
  1317. vllm/v1/executor/__init__.py +0 -0
  1318. vllm/v1/executor/abstract.py +126 -0
  1319. vllm/v1/executor/multiproc_executor.py +683 -0
  1320. vllm/v1/executor/ray_distributed_executor.py +109 -0
  1321. vllm/v1/kv_cache_interface.py +275 -0
  1322. vllm/v1/metrics/__init__.py +0 -0
  1323. vllm/v1/metrics/loggers.py +717 -0
  1324. vllm/v1/metrics/prometheus.py +82 -0
  1325. vllm/v1/metrics/ray_wrappers.py +133 -0
  1326. vllm/v1/metrics/reader.py +246 -0
  1327. vllm/v1/metrics/stats.py +248 -0
  1328. vllm/v1/outputs.py +147 -0
  1329. vllm/v1/pool/__init__.py +0 -0
  1330. vllm/v1/pool/metadata.py +77 -0
  1331. vllm/v1/request.py +237 -0
  1332. vllm/v1/sample/__init__.py +0 -0
  1333. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1334. vllm/v1/sample/logits_processor/builtin.py +273 -0
  1335. vllm/v1/sample/logits_processor/interface.py +97 -0
  1336. vllm/v1/sample/logits_processor/state.py +161 -0
  1337. vllm/v1/sample/metadata.py +43 -0
  1338. vllm/v1/sample/ops/__init__.py +0 -0
  1339. vllm/v1/sample/ops/bad_words.py +39 -0
  1340. vllm/v1/sample/ops/logprobs.py +26 -0
  1341. vllm/v1/sample/ops/penalties.py +43 -0
  1342. vllm/v1/sample/ops/topk_topp_sampler.py +254 -0
  1343. vllm/v1/sample/rejection_sampler.py +623 -0
  1344. vllm/v1/sample/sampler.py +281 -0
  1345. vllm/v1/sample/tpu/__init__.py +0 -0
  1346. vllm/v1/sample/tpu/metadata.py +124 -0
  1347. vllm/v1/sample/tpu/sampler.py +213 -0
  1348. vllm/v1/serial_utils.py +395 -0
  1349. vllm/v1/spec_decode/__init__.py +0 -0
  1350. vllm/v1/spec_decode/eagle.py +740 -0
  1351. vllm/v1/spec_decode/medusa.py +66 -0
  1352. vllm/v1/spec_decode/metadata.py +62 -0
  1353. vllm/v1/spec_decode/metrics.py +191 -0
  1354. vllm/v1/spec_decode/ngram_proposer.py +157 -0
  1355. vllm/v1/spec_decode/utils.py +14 -0
  1356. vllm/v1/structured_output/__init__.py +297 -0
  1357. vllm/v1/structured_output/backend_guidance.py +245 -0
  1358. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1359. vllm/v1/structured_output/backend_outlines.py +320 -0
  1360. vllm/v1/structured_output/backend_types.py +134 -0
  1361. vllm/v1/structured_output/backend_xgrammar.py +323 -0
  1362. vllm/v1/structured_output/request.py +86 -0
  1363. vllm/v1/structured_output/utils.py +373 -0
  1364. vllm/v1/utils.py +382 -0
  1365. vllm/v1/worker/__init__.py +0 -0
  1366. vllm/v1/worker/block_table.py +221 -0
  1367. vllm/v1/worker/cpu_model_runner.py +163 -0
  1368. vllm/v1/worker/cpu_worker.py +183 -0
  1369. vllm/v1/worker/gpu_input_batch.py +821 -0
  1370. vllm/v1/worker/gpu_model_runner.py +3743 -0
  1371. vllm/v1/worker/gpu_worker.py +697 -0
  1372. vllm/v1/worker/kv_connector_model_runner_mixin.py +122 -0
  1373. vllm/v1/worker/lora_model_runner_mixin.py +192 -0
  1374. vllm/v1/worker/tpu_input_batch.py +585 -0
  1375. vllm/v1/worker/tpu_model_runner.py +1947 -0
  1376. vllm/v1/worker/tpu_worker.py +340 -0
  1377. vllm/v1/worker/utils.py +290 -0
  1378. vllm/v1/worker/worker_base.py +65 -0
  1379. vllm/v1/worker/xpu_model_runner.py +53 -0
  1380. vllm/v1/worker/xpu_worker.py +179 -0
  1381. vllm/version.py +41 -0
  1382. vllm/vllm_flash_attn/.gitkeep +0 -0
  1383. vllm/worker/__init__.py +0 -0
  1384. vllm/worker/cache_engine.py +145 -0
  1385. vllm/worker/enc_dec_model_runner.py +553 -0
  1386. vllm/worker/model_runner.py +2016 -0
  1387. vllm/worker/model_runner_base.py +307 -0
  1388. vllm/worker/utils.py +49 -0
  1389. vllm/worker/worker.py +670 -0
  1390. vllm/worker/worker_base.py +651 -0
  1391. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/METADATA +326 -0
  1392. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/RECORD +1395 -0
  1393. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/WHEEL +5 -0
  1394. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/entry_points.txt +5 -0
  1395. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1921 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from abc import ABC, abstractmethod
4
+ from collections import defaultdict
5
+ from collections.abc import (Callable, Generator, ItemsView, Iterable, Mapping,
6
+ Sequence)
7
+ from dataclasses import dataclass, field, replace
8
+ from enum import Enum
9
+ from functools import lru_cache
10
+ from typing import (TYPE_CHECKING, Generic, NamedTuple, Optional, Protocol,
11
+ TypeVar, Union, cast)
12
+
13
+ import regex as re
14
+ import torch
15
+ from typing_extensions import assert_never
16
+
17
+ from vllm.inputs import InputProcessingContext
18
+ from vllm.logger import init_logger
19
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, decode_tokens,
20
+ encode_tokens)
21
+ from vllm.utils import flatten_2d_lists, full_groupby
22
+
23
+ from .hasher import MultiModalHasher
24
+ from .inputs import (MultiModalDataDict, MultiModalEncDecInputs,
25
+ MultiModalFieldConfig, MultiModalInputs,
26
+ MultiModalKwargsItem, MultiModalKwargsItems,
27
+ MultiModalKwargsOptionalItems, MultiModalUUIDDict,
28
+ PlaceholderRange)
29
+ from .parse import (DictEmbeddingItems, EmbeddingItems, MultiModalDataItems,
30
+ MultiModalDataParser)
31
+
32
+ if TYPE_CHECKING:
33
+ from transformers.configuration_utils import PretrainedConfig
34
+ from transformers.feature_extraction_utils import BatchFeature
35
+ from transformers.processing_utils import ProcessorMixin
36
+
37
+ from .cache import BaseMultiModalProcessorCache
38
+ from .profiling import BaseDummyInputsBuilder
39
+
40
+ logger = init_logger(__name__)
41
+
42
+ _S = TypeVar("_S", str, list[int])
43
+
44
+ PromptSeq = Union[str, list[int]]
45
+ """A token sequence (list of token IDs) or text."""
46
+
47
+
48
+ @lru_cache(maxsize=2048)
49
+ def _cached_encode(
50
+ tokenizer: AnyTokenizer,
51
+ text: str,
52
+ *,
53
+ add_special_tokens: Optional[bool] = None,
54
+ ) -> list[int]:
55
+ return encode_tokens(tokenizer,
56
+ text,
57
+ add_special_tokens=add_special_tokens)
58
+
59
+
60
+ @lru_cache(maxsize=2048)
61
+ def _cached_decode(
62
+ tokenizer: AnyTokenizer,
63
+ token_ids: tuple[int, ...],
64
+ *,
65
+ skip_special_tokens: Optional[bool] = None,
66
+ ) -> str:
67
+ return decode_tokens(tokenizer,
68
+ list(token_ids),
69
+ skip_special_tokens=skip_special_tokens)
70
+
71
+
72
+ def _seq2text(tokenizer: AnyTokenizer, seq: PromptSeq) -> str:
73
+ if isinstance(seq, str):
74
+ return seq
75
+
76
+ return _cached_decode(tokenizer, tuple(seq))
77
+
78
+
79
+ def _seq2tokens(tokenizer: AnyTokenizer, seq: PromptSeq) -> list[int]:
80
+ if isinstance(seq, str):
81
+ return _cached_encode(tokenizer, seq, add_special_tokens=False)
82
+
83
+ return seq
84
+
85
+
86
+ class _GetMatchIndex(Protocol):
87
+
88
+ def __call__(
89
+ self,
90
+ tokenizer: AnyTokenizer,
91
+ prompt: PromptSeq,
92
+ start_idx: int = 0,
93
+ ) -> Optional[int]:
94
+ ...
95
+
96
+
97
+ @dataclass
98
+ class PromptIndex:
99
+ """Resolves to an index in the prompt."""
100
+ get_match_index: _GetMatchIndex
101
+
102
+
103
+ class PromptIndexTargets:
104
+
105
+ @staticmethod
106
+ def start() -> PromptIndex:
107
+ """
108
+ Resolves to the start of the prompt (before the first token).
109
+
110
+ This results in a match even if the prompt is empty.
111
+ """
112
+ return PromptIndex(lambda tokenizer, prompt, start_idx=0: 0)
113
+
114
+ @staticmethod
115
+ def prefix(seq: PromptSeq) -> PromptIndex:
116
+ """
117
+ Resolves to a location in the prompt after the given prefix.
118
+ """
119
+
120
+ def get_match_index(
121
+ tokenizer: AnyTokenizer,
122
+ prompt: PromptSeq,
123
+ start_idx: int = 0,
124
+ ) -> Optional[int]:
125
+ if start_idx != 0:
126
+ return None
127
+
128
+ prefix = seq
129
+
130
+ if isinstance(prompt, str):
131
+ if not isinstance(prefix, str):
132
+ # Make both `str`
133
+ prefix = decode_tokens(tokenizer, prefix)
134
+ else:
135
+ if isinstance(prefix, str):
136
+ # Make both `list[int]`
137
+ prefix = encode_tokens(tokenizer,
138
+ prefix,
139
+ add_special_tokens=False)
140
+
141
+ match_idx = len(prefix)
142
+ return match_idx if prompt[:match_idx] == prefix else None
143
+
144
+ return PromptIndex(get_match_index)
145
+
146
+ @staticmethod
147
+ def end() -> PromptIndex:
148
+ """
149
+ Resolves to the end of the prompt (after the last token).
150
+
151
+ This results in a match even if the prompt is empty.
152
+ """
153
+ return PromptIndex(lambda tokenizer, prompt, start_idx=0: len(prompt))
154
+
155
+
156
+ UpdateTarget = Union[PromptSeq, PromptIndex]
157
+ """
158
+ The token sequence or text to update.
159
+ """
160
+
161
+ PromptUpdateTarget = Union[Callable[[int], UpdateTarget], UpdateTarget]
162
+ """
163
+ Given the index of the processed item within
164
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
165
+ output the corresponding token sequence (or text).
166
+
167
+ For convenience, you can directly pass in the token sequence (or text)
168
+ instead of a function if it does not depend on the input.
169
+ """
170
+
171
+
172
+ @dataclass
173
+ class PromptUpdateDetails(Generic[_S]):
174
+ """Details about the token sequence or text that are part of the update."""
175
+
176
+ full: _S
177
+ """The full content."""
178
+
179
+ is_embed: Optional[Callable[[AnyTokenizer, PromptSeq],
180
+ torch.Tensor]] = None
181
+ """
182
+ Given [`full`][vllm.multimodal.processing.PromptUpdateDetails.full],
183
+ return a boolean mask of shape `(len(full),)` indicating which positions
184
+ of `full` to assign embeddings to.
185
+
186
+ `None` (default) means to assign embeddings to all positions of `full`.
187
+
188
+ The embeddings are obtained by calling
189
+ [`SupportsMultiModal.get_multimodal_embeddings`][vllm.model_executor.models.interfaces.SupportsMultiModal.get_multimodal_embeddings].
190
+ """
191
+
192
+ @staticmethod
193
+ def from_seq(seq: _S) -> "PromptUpdateDetails[_S]":
194
+ return PromptUpdateDetails(full=seq)
195
+
196
+ @staticmethod
197
+ def select_text(
198
+ seq: _S,
199
+ embed_text: str,
200
+ ) -> "PromptUpdateDetails[_S]":
201
+
202
+ def is_embed(tokenizer: AnyTokenizer, full: PromptSeq) -> torch.Tensor:
203
+ embed_token_ids = encode_tokens(tokenizer, embed_text)
204
+ token_ids = _seq2tokens(tokenizer, full)
205
+
206
+ return torch.isin(
207
+ torch.tensor(token_ids),
208
+ torch.tensor(embed_token_ids),
209
+ )
210
+
211
+ return PromptUpdateDetails(full=seq, is_embed=is_embed)
212
+
213
+ @staticmethod
214
+ def select_token_id(
215
+ seq: _S,
216
+ embed_token_id: int,
217
+ ) -> "PromptUpdateDetails[_S]":
218
+
219
+ def is_embed(tokenizer: AnyTokenizer, full: PromptSeq) -> torch.Tensor:
220
+ token_ids = _seq2tokens(tokenizer, full)
221
+
222
+ return torch.tensor(token_ids) == embed_token_id
223
+
224
+ return PromptUpdateDetails(full=seq, is_embed=is_embed)
225
+
226
+
227
+ PromptUpdateInfo = Union[PromptSeq, PromptUpdateDetails]
228
+ """
229
+ The token sequence or text that are part of the update.
230
+
231
+ If only part of the content corresponds to feature placeholders, you can
232
+ use [`PromptUpdateDetails`][vllm.multimodal.processing.PromptUpdateDetails] to
233
+ specify which part.
234
+ """
235
+
236
+ PromptUpdateContent = Union[Callable[[int], PromptUpdateInfo],
237
+ PromptUpdateInfo]
238
+ """
239
+ Given the index of the processed item within
240
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
241
+ output the corresponding token sequence (or text).
242
+
243
+ For convenience, you can directly pass in the token sequence (or text)
244
+ instead of a function if it does not depend on the input.
245
+ """
246
+
247
+
248
+ class UpdateMode(str, Enum):
249
+ INSERT = "insert"
250
+ REPLACE = "replace"
251
+
252
+
253
+ @dataclass
254
+ class PromptUpdate(ABC):
255
+ """
256
+ Defines how to update a prompt with placeholder tokens.
257
+ """
258
+
259
+ modality: str
260
+ """The modality for which the update is made."""
261
+
262
+ target: PromptUpdateTarget
263
+ """The token sequence (or text) to update."""
264
+
265
+ @property
266
+ @abstractmethod
267
+ def content(self) -> PromptUpdateContent:
268
+ """The placeholder tokens that are part of the update."""
269
+ raise NotImplementedError
270
+
271
+ @property
272
+ @abstractmethod
273
+ def mode(self) -> UpdateMode:
274
+ """Defines how to update the prompt."""
275
+ raise NotImplementedError
276
+
277
+ def _resolve_target(self, item_idx: int) -> UpdateTarget:
278
+ target = self.target
279
+ if callable(target):
280
+ target = target(item_idx)
281
+
282
+ return target
283
+
284
+ def _resolve_content(self, item_idx: int) -> PromptUpdateDetails:
285
+ content = self.content
286
+ if callable(content):
287
+ content = content(item_idx)
288
+
289
+ if not isinstance(content, PromptUpdateDetails):
290
+ content = PromptUpdateDetails.from_seq(content)
291
+
292
+ return content
293
+
294
+ def resolve(self, item_idx: int) -> "ResolvedPromptUpdate":
295
+ """
296
+ Given the index of the processed item within
297
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
298
+ output a copy of this object with its lazy attributes resolved.
299
+ """
300
+ return ResolvedPromptUpdate(
301
+ modality=self.modality,
302
+ item_idx=item_idx,
303
+ mode=self.mode,
304
+ target=self._resolve_target(item_idx),
305
+ content=self._resolve_content(item_idx),
306
+ )
307
+
308
+
309
+ @dataclass
310
+ class PromptInsertion(PromptUpdate):
311
+ """
312
+ Defines how to insert placeholder tokens into a prompt.
313
+
314
+ Example:
315
+
316
+ For each image, insert a number of ``<image>`` feature placeholders
317
+ equal to the feature size of the vision encoder after the ``<s>`` token:
318
+
319
+ ```python
320
+ PromptInsertion(
321
+ modality="image",
322
+ target="<s>",
323
+ insertion="<image>" * image_feature_size,
324
+ )
325
+ ```
326
+
327
+ Insert these tokens at the start of the prompt:
328
+
329
+ ```python
330
+ PromptInsertion(
331
+ modality="image",
332
+ target=PromptIndexTargets.start(),
333
+ insertion="<image>" * image_feature_size,
334
+ )
335
+ ```
336
+
337
+ Insert these tokens after a prefix ``Images:``:
338
+
339
+ ```python
340
+ PromptInsertion(
341
+ modality="image",
342
+ target=PromptIndexTargets.prefix("Images:"),
343
+ insertion="<image>" * image_feature_size,
344
+ )
345
+ ```
346
+
347
+ Insert these tokens at the end of the prompt:
348
+
349
+ ```python
350
+ PromptInsertion(
351
+ modality="image",
352
+ target=PromptIndexTargets.end(),
353
+ insertion="<image>" * image_feature_size,
354
+ )
355
+ ```
356
+ """
357
+
358
+ insertion: PromptUpdateContent = field(repr=False)
359
+ """
360
+ Given the index of the processed item within
361
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
362
+ output the token sequence (or text) to insert right after
363
+ [`target`][vllm.multimodal.processing.PromptUpdate.target].
364
+
365
+ For convenience, you can directly pass in the token sequence (or text)
366
+ instead of a function if it does not depend on the input.
367
+ """
368
+
369
+ @property
370
+ def content(self) -> PromptUpdateContent:
371
+ return self.insertion
372
+
373
+ @property
374
+ def mode(self) -> UpdateMode:
375
+ return UpdateMode.INSERT
376
+
377
+
378
+ @dataclass
379
+ class PromptReplacement(PromptUpdate):
380
+ """
381
+ Defines how to replace portions of an input prompt with placeholder tokens.
382
+
383
+ Example:
384
+
385
+ For each image, replace one ``<image>`` input placeholder in the prompt
386
+ with a number of ``<image>`` feature placeholders
387
+ equal to the feature size of the vision encoder:
388
+
389
+ ```python
390
+ PromptReplacement(
391
+ modality="image",
392
+ target="<image>",
393
+ replacement="<image>" * image_feature_size,
394
+ )
395
+ ```
396
+
397
+ As above, but further pad the feature placeholders with ``<image_bos>``
398
+ and `<image_eos>``, which are not supposed to be passed to the vision
399
+ encoder:
400
+
401
+ ```python
402
+ PromptReplacement(
403
+ modality="image",
404
+ target="<image>",
405
+ replacement=PromptUpdateDetails(
406
+ full="".join([
407
+ "<image_bos>",
408
+ "<image>" * image_feature_size,
409
+ "<image_eos>",
410
+ ]),
411
+ features="<image>" * image_feature_size,
412
+ ),
413
+ )
414
+ ```
415
+
416
+ To avoid unnecessary tokenization during prompt replacement,
417
+ we recommended passing token sequences instead of text:
418
+
419
+ ```python
420
+ PromptReplacement(
421
+ modality="image",
422
+ target=[image_token_id],
423
+ replacement=PromptUpdateDetails(
424
+ full=([image_bos_id] + [image_token_id] * image_feature_size
425
+ + [image_eos_id]),
426
+ features=[image_token_id] * image_feature_size,
427
+ ),
428
+ )
429
+ ```
430
+ """
431
+
432
+ replacement: PromptUpdateContent = field(repr=False)
433
+ """
434
+ Given the index of the processed item within
435
+ [`modality`][vllm.multimodal.processing.PromptUpdate.modality],
436
+ output the token sequence (or text) to replace
437
+ [`target`][vllm.multimodal.processing.PromptUpdate.target].
438
+
439
+ For convenience, you can directly pass in the token sequence (or text)
440
+ instead of a function if it does not depend on the input.
441
+ """
442
+
443
+ @property
444
+ def content(self) -> PromptUpdateContent:
445
+ return self.replacement
446
+
447
+ @property
448
+ def mode(self) -> UpdateMode:
449
+ return UpdateMode.REPLACE
450
+
451
+
452
+ class _HasModalityAttr(Protocol):
453
+ modality: str
454
+
455
+
456
+ class _HasModalityProp(Protocol):
457
+
458
+ @property
459
+ def modality(self) -> str:
460
+ ...
461
+
462
+
463
+ _M = TypeVar("_M", bound=Union[_HasModalityAttr, _HasModalityProp])
464
+
465
+
466
+ def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]:
467
+ """Convenience function to apply [`full_groupby`][vllm.utils.full_groupby]
468
+ based on modality."""
469
+ return full_groupby(values, key=lambda x: x.modality)
470
+
471
+
472
+ class PromptTargetMatch(NamedTuple):
473
+ start_idx: int
474
+ end_idx: int
475
+
476
+
477
+ @dataclass(frozen=True)
478
+ class ResolvedPromptUpdate:
479
+ """
480
+ A [`PromptUpdate`][vllm.multimodal.processing.PromptUpdate] with its
481
+ lazy attributes resolved, apart from those related to tokenization.
482
+ """
483
+
484
+ modality: str
485
+ """The modality for which the update is made."""
486
+
487
+ item_idx: int
488
+ """The index within `modality` of the item this update pertains to."""
489
+
490
+ mode: UpdateMode
491
+ """Defines how to update the prompt."""
492
+
493
+ target: UpdateTarget
494
+ """The token sequence (or text) to update."""
495
+
496
+ content: PromptUpdateDetails = field(repr=False)
497
+ """The placeholder tokens that are part of the update."""
498
+
499
+ def iter_token_matches(
500
+ self,
501
+ prompt: list[int],
502
+ tokenizer: AnyTokenizer,
503
+ *,
504
+ start_idx: int = 0,
505
+ ) -> Generator[PromptTargetMatch]:
506
+ """Yield each instance of `self.target` found in `prompt`."""
507
+ target = self.target
508
+
509
+ if isinstance(target, PromptIndex):
510
+ match_idx = target.get_match_index(tokenizer, prompt, start_idx)
511
+ if match_idx is not None:
512
+ yield PromptTargetMatch(match_idx, match_idx)
513
+
514
+ return
515
+
516
+ target_token_ids = _seq2tokens(tokenizer, target)
517
+
518
+ for match in iter_token_matches(prompt,
519
+ target_token_ids,
520
+ start_idx=start_idx):
521
+ yield PromptTargetMatch(match.start_idx, match.end_idx)
522
+
523
+ def iter_text_matches(
524
+ self,
525
+ prompt: str,
526
+ tokenizer: AnyTokenizer,
527
+ *,
528
+ start_idx: int = 0,
529
+ ) -> Generator[PromptTargetMatch]:
530
+ """Yield each instance of `self.target` found in `prompt`."""
531
+ target = self.target
532
+
533
+ if isinstance(target, PromptIndex):
534
+ match_idx = target.get_match_index(tokenizer, prompt, start_idx)
535
+ if match_idx is not None:
536
+ yield PromptTargetMatch(match_idx, match_idx)
537
+
538
+ return
539
+
540
+ target_text = _seq2text(tokenizer, target)
541
+
542
+ for match in re.finditer(re.escape(target_text), prompt,
543
+ pos=start_idx):
544
+ yield PromptTargetMatch(match.start(), match.end())
545
+
546
+ def iter_matches(
547
+ self,
548
+ prompt: Union[list[int], str],
549
+ tokenizer: AnyTokenizer,
550
+ *,
551
+ start_idx: int = 0,
552
+ ) -> Generator[PromptTargetMatch]:
553
+ """Yield each instance of `self.target` found in `prompt`."""
554
+ if isinstance(prompt, str):
555
+ return self.iter_text_matches(prompt,
556
+ tokenizer,
557
+ start_idx=start_idx)
558
+
559
+ return self.iter_token_matches(prompt, tokenizer, start_idx=start_idx)
560
+
561
+ def with_target(self, target: UpdateTarget):
562
+ return replace(self, target=target)
563
+
564
+ def with_content(self, content: PromptUpdateInfo):
565
+ if not isinstance(content, PromptUpdateDetails):
566
+ content = PromptUpdateDetails.from_seq(content)
567
+
568
+ return replace(self, content=content)
569
+
570
+
571
+ class _TokenMatch(NamedTuple):
572
+ start_idx: int
573
+ end_idx: int
574
+
575
+
576
+ def iter_token_matches(
577
+ token_ids: list[int],
578
+ match_ids: list[int],
579
+ *,
580
+ start_idx: int = 0,
581
+ ) -> Generator[_TokenMatch]:
582
+ """
583
+ Yield each occurrence of `match_ids` in `token_ids`.
584
+
585
+ Note that empty matches are ignored.
586
+ """
587
+ prompt_len = len(token_ids)
588
+ match_len = len(match_ids)
589
+
590
+ if match_len == 0:
591
+ return
592
+
593
+ while start_idx < prompt_len - match_len + 1:
594
+ end_idx = start_idx + match_len
595
+
596
+ if token_ids[start_idx:end_idx] == match_ids:
597
+ yield _TokenMatch(start_idx=start_idx, end_idx=end_idx)
598
+
599
+ # Exclude overlapping matches
600
+ start_idx = end_idx
601
+ else:
602
+ start_idx += 1
603
+
604
+
605
+ def replace_token_matches(
606
+ token_ids: list[int],
607
+ match_ids: list[int],
608
+ new_ids: list[int],
609
+ ) -> list[int]:
610
+ """
611
+ Replace each occurrence of `match_ids` in `token_ids`
612
+ with `new_ids`.
613
+
614
+ Note that empty matches are ignored.
615
+ """
616
+ out_seqs = list[list[int]]()
617
+ prev_end_idx = 0
618
+
619
+ for match in iter_token_matches(token_ids, match_ids):
620
+ start_idx = match.start_idx
621
+ end_idx = match.end_idx
622
+
623
+ out_seqs.append(token_ids[prev_end_idx:start_idx])
624
+ out_seqs.append(new_ids)
625
+ prev_end_idx = end_idx
626
+
627
+ out_seqs.append(token_ids[prev_end_idx:])
628
+
629
+ return flatten_2d_lists(out_seqs)
630
+
631
+
632
+ @dataclass
633
+ class PlaceholderFeaturesInfo:
634
+ modality: str
635
+ item_idx: int
636
+ start_idx: int
637
+ tokens: list[int]
638
+ is_embed: Optional[torch.Tensor]
639
+
640
+ @property
641
+ def length(self) -> int:
642
+ return len(self.tokens)
643
+
644
+ def to_range(self) -> PlaceholderRange:
645
+ # TODO: Is it worth it to optimize this by stripping the
646
+ # leading and ending positions where `is_embed=False`?
647
+ return PlaceholderRange(
648
+ offset=self.start_idx,
649
+ length=self.length,
650
+ is_embed=self.is_embed,
651
+ )
652
+
653
+
654
+ _MatchToApply = tuple[tuple[str, int], tuple[PromptTargetMatch, int]]
655
+
656
+
657
+ def _find_matches(
658
+ prompt: _S,
659
+ mm_prompt_updates: "MultiModalPromptUpdates",
660
+ tokenizer: AnyTokenizer,
661
+ *,
662
+ prev_end_idx: int = 0,
663
+ current_result: "MultiModalPromptUpdatesApplyResult",
664
+ ) -> tuple[Optional[UpdateMode], list[_MatchToApply]]:
665
+ mode: Optional[UpdateMode] = None
666
+ mm_matches = dict[tuple[str, int], tuple[PromptTargetMatch, int]]()
667
+
668
+ for modality, modality_updates in mm_prompt_updates.items():
669
+ for item_idx, item_updates in enumerate(modality_updates):
670
+ if current_result[modality][item_idx] is not None:
671
+ continue # Updates have already been applied for this item
672
+
673
+ for update_idx, update in enumerate(item_updates):
674
+ if (modality, item_idx) in mm_matches:
675
+ break # Already found a match for this item
676
+
677
+ for match in update.iter_matches(
678
+ prompt,
679
+ tokenizer,
680
+ start_idx=prev_end_idx,
681
+ ):
682
+ # All matches should share the same mode
683
+ if mode is None:
684
+ mode = update.mode
685
+ elif mode != update.mode:
686
+ continue
687
+
688
+ mm_matches[(modality, item_idx)] = match, update_idx
689
+ break # Get only the first valid match per item
690
+
691
+ # Prioritize earlier matches
692
+ matches_to_apply = sorted(mm_matches.items(), key=lambda item: item[1][0])
693
+
694
+ # To avoid conflicts, only replace one non-empty item at a time
695
+ if mode == UpdateMode.REPLACE:
696
+ matches_to_apply_ = list[_MatchToApply]()
697
+ has_non_empty_matches = False
698
+
699
+ for item in matches_to_apply:
700
+ _, (match, _) = item
701
+ if match.start_idx == match.end_idx:
702
+ matches_to_apply_.append(item)
703
+ elif not has_non_empty_matches:
704
+ has_non_empty_matches = True
705
+ matches_to_apply_.append(item)
706
+
707
+ matches_to_apply = matches_to_apply_
708
+
709
+ return mode, matches_to_apply
710
+
711
+
712
+ def _apply_matches(
713
+ prompt: _S,
714
+ mm_prompt_updates: "MultiModalPromptUpdates",
715
+ tokenizer: AnyTokenizer,
716
+ ) -> tuple[list[_S], "MultiModalPromptUpdatesApplyResult"]:
717
+ prompt_len = len(prompt)
718
+
719
+ out_seqs = list[Union[str, list[int]]]()
720
+ out_result: MultiModalPromptUpdatesApplyResult = {
721
+ m: [None] * len(items)
722
+ for m, items in mm_prompt_updates.items()
723
+ }
724
+
725
+ start_idx = prev_end_idx = 0
726
+ while start_idx < max(prompt_len, 1): # Allow inserts into empty prompt
727
+ found = False
728
+
729
+ mode, matches_to_apply = _find_matches(
730
+ prompt,
731
+ mm_prompt_updates,
732
+ tokenizer,
733
+ prev_end_idx=prev_end_idx,
734
+ current_result=out_result,
735
+ )
736
+
737
+ if mode is not None:
738
+ for (modality, item_idx), (match, update_idx) in matches_to_apply:
739
+ found = True
740
+
741
+ matched_update = mm_prompt_updates[modality][item_idx][
742
+ update_idx]
743
+ matched_content = matched_update.content.full
744
+
745
+ if mode == UpdateMode.INSERT:
746
+ end_idx_to_insert = match.end_idx
747
+ elif mode == UpdateMode.REPLACE:
748
+ end_idx_to_insert = match.start_idx
749
+ else:
750
+ assert_never(mode)
751
+
752
+ out_seqs.append(prompt[prev_end_idx:end_idx_to_insert])
753
+ out_seqs.append(
754
+ _seq2text(tokenizer, matched_content
755
+ ) if isinstance(prompt, str) else _seq2tokens(
756
+ tokenizer, matched_content))
757
+ out_result[modality][item_idx] = update_idx
758
+
759
+ # Exclude overlapping matches
760
+ start_idx = prev_end_idx = match.end_idx
761
+
762
+ if not found:
763
+ start_idx += 1
764
+
765
+ out_seqs.append(prompt[prev_end_idx:])
766
+
767
+ return cast(list[_S], out_seqs), out_result
768
+
769
+
770
+ def apply_token_matches(
771
+ prompt: list[int],
772
+ mm_prompt_updates: "MultiModalPromptUpdates",
773
+ tokenizer: AnyTokenizer,
774
+ ) -> tuple[list[int], "MultiModalPromptUpdatesApplyResult"]:
775
+ """
776
+ Apply the updates in `mm_prompt_updates` to `prompt`.
777
+
778
+ Matches are exclusive even when multiple modalities share
779
+ the same placeholder tokens. In that case, the modality that
780
+ appears earlier in `mm_prompt_updates` takes priority.
781
+ """
782
+ token_id_seqs, result = _apply_matches(prompt, mm_prompt_updates,
783
+ tokenizer)
784
+
785
+ return flatten_2d_lists(token_id_seqs), result
786
+
787
+
788
+ def apply_text_matches(
789
+ prompt: str,
790
+ mm_prompt_updates: "MultiModalPromptUpdates",
791
+ tokenizer: AnyTokenizer,
792
+ ) -> tuple[str, "MultiModalPromptUpdatesApplyResult"]:
793
+ """
794
+ Apply the updates in `mm_prompt_updates` to `prompt`.
795
+
796
+ Matches are exclusive even when multiple modalities share
797
+ the same placeholder tokens. In that case, the modality that
798
+ appears earlier in `mm_prompt_updates` takes priority.
799
+ """
800
+ texts, result = _apply_matches(prompt, mm_prompt_updates, tokenizer)
801
+
802
+ return "".join(texts), result
803
+
804
+
805
+ def _iter_placeholders(
806
+ prompt: list[int],
807
+ mm_prompt_updates: "MultiModalPromptUpdates",
808
+ tokenizer: AnyTokenizer,
809
+ ) -> Iterable[PlaceholderFeaturesInfo]:
810
+ """
811
+ Yield each set of placeholder tokens found in `prompt`.
812
+
813
+ Matches are exclusive even when multiple modalities share
814
+ the same placeholder tokens. In that case, the modality that
815
+ appears earlier in `mm_prompt_updates` takes priority.
816
+
817
+ Note that empty matches are ignored.
818
+ """
819
+ prompt_len = len(prompt)
820
+ mm_item_counts = {m: len(items) for m, items in mm_prompt_updates.items()}
821
+
822
+ item_idx_by_modality = defaultdict[str, int](lambda: 0)
823
+
824
+ start_idx = 0
825
+ while start_idx < prompt_len:
826
+ found = False
827
+
828
+ for modality, modality_updates in mm_prompt_updates.items():
829
+ item_idx = item_idx_by_modality[modality]
830
+ if item_idx >= mm_item_counts.get(modality, 0):
831
+ continue
832
+
833
+ for update in modality_updates[item_idx]:
834
+ content = update.content
835
+ content_tokens_full = _seq2tokens(tokenizer, content.full)
836
+ content_len_full = len(content_tokens_full)
837
+ end_idx_full = start_idx + content_len_full
838
+
839
+ if content_len_full == 0 or end_idx_full > prompt_len:
840
+ continue
841
+
842
+ if prompt[start_idx:end_idx_full] == content_tokens_full:
843
+ content_is_embed = content.is_embed
844
+ if content_is_embed is not None:
845
+ content_is_embed = content_is_embed(
846
+ tokenizer, content.full)
847
+
848
+ yield PlaceholderFeaturesInfo(
849
+ modality=modality,
850
+ item_idx=item_idx,
851
+ start_idx=start_idx,
852
+ tokens=content_tokens_full,
853
+ is_embed=content_is_embed,
854
+ )
855
+
856
+ # Exclude overlapping matches
857
+ start_idx = end_idx_full
858
+ item_idx_by_modality[modality] += 1
859
+ found = True
860
+ break
861
+
862
+ if found:
863
+ break # Go back to the outer while loop
864
+
865
+ if not found:
866
+ start_idx += 1
867
+
868
+
869
+ def find_mm_placeholders(
870
+ prompt: list[int],
871
+ mm_prompt_updates: "MultiModalPromptUpdates",
872
+ tokenizer: AnyTokenizer,
873
+ ) -> Mapping[str, list[PlaceholderFeaturesInfo]]:
874
+ it = _iter_placeholders(prompt, mm_prompt_updates, tokenizer)
875
+ return dict(full_groupby_modality(it))
876
+
877
+
878
+ class BaseProcessingInfo:
879
+ """Base class to provide the information necessary for data processing."""
880
+
881
+ def __init__(self, ctx: InputProcessingContext) -> None:
882
+ super().__init__()
883
+
884
+ self.ctx = ctx
885
+
886
+ @property
887
+ def model_id(self) -> str:
888
+ return self.ctx.model_config.model
889
+
890
+ def get_tokenizer(self) -> AnyTokenizer:
891
+ return self.ctx.tokenizer
892
+
893
+ def get_hf_config(self) -> "PretrainedConfig":
894
+ return self.ctx.get_hf_config()
895
+
896
+ def get_hf_processor(self, **kwargs: object) -> "ProcessorMixin":
897
+ """
898
+ Subclasses can override this method to handle
899
+ specific kwargs from model config or user inputs.
900
+ """
901
+ return self.ctx.get_hf_processor(**kwargs)
902
+
903
+ @abstractmethod
904
+ def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
905
+ """
906
+ Return the maximum supported number of items for each modality.
907
+
908
+ A value of `None` means unlimited number of items.
909
+
910
+ Omitting a modality from the returned dictionary means that
911
+ it is not supported at all.
912
+ """
913
+ raise NotImplementedError
914
+
915
+ def get_allowed_mm_limits(self) -> Mapping[str, int]:
916
+ """Return the maximum allowed number of items for each modality."""
917
+ supported_mm_limits = self.get_supported_mm_limits()
918
+ mm_config = self.ctx.get_mm_config()
919
+
920
+ allowed_limits = dict[str, int]()
921
+ for modality, supported_limit in supported_mm_limits.items():
922
+ user_limit = mm_config.get_limit_per_prompt(modality)
923
+
924
+ allowed_limits[modality] = (user_limit if supported_limit is None
925
+ else min(user_limit, supported_limit))
926
+
927
+ return allowed_limits
928
+
929
+ def get_mm_max_tokens_per_item(
930
+ self,
931
+ seq_len: int,
932
+ mm_counts: Mapping[str, int],
933
+ ) -> Optional[Mapping[str, int]]:
934
+ """
935
+ Return the maximum number of tokens per item of for each modality.
936
+
937
+ When `None` (the default) is returned, vLLM will generate dummy inputs
938
+ (images/videos) at maximum possible sizes and process them to determine
939
+ the maximum token count per modality.
940
+
941
+ This approach works but can be very slow for certain models (e.g.,
942
+ Qwen2.5-VL), leading to very long startup time. For better performance,
943
+ each model can override this method to return pre-computed maximum token
944
+ counts, avoiding the need for dummy input generation and processing.
945
+
946
+ Note:
947
+ The maximum number of tokens per item of each modality returned
948
+ from this function should respect the model's maximum sequence
949
+ length and the maximum number of items of each modality allowed,
950
+ and agree with dummy inputs (images/videos) at maximum possible
951
+ sizes.
952
+ """
953
+ return None
954
+
955
+
956
+ _I = TypeVar("_I", bound=BaseProcessingInfo)
957
+
958
+ MultiModalHashes = dict[str, list[str]]
959
+ """
960
+ A collection of hashes with a similar structure as
961
+ [`MultiModalKwargsItems`][vllm.multimodal.inputs.MultiModalKwargsItems].
962
+ """
963
+
964
+ MultiModalPromptUpdates = Mapping[str, list[Sequence[ResolvedPromptUpdate]]]
965
+ """
966
+ A collection of prompt updates with a similar structure as
967
+ [`MultiModalKwargsItems`][vllm.multimodal.inputs.MultiModalKwargsItems].
968
+ """
969
+
970
+ MultiModalPromptUpdatesApplyResult = Mapping[str, list[Optional[int]]]
971
+ """
972
+ For an item `MultiModalPromptUpdates[k][i]`,
973
+ `MultiModalPromptUpdatesApplyResult[k][i]` represents the index of the
974
+ `ResolvedPromptUpdate` instance that has been applied, or `None` if none of the
975
+ `ResolvedPromptUpdate` instances have been applied.
976
+ """
977
+
978
+
979
+ class MultiModalProcessingInfo(NamedTuple):
980
+ kwargs: MultiModalKwargsOptionalItems
981
+ hashes: MultiModalHashes
982
+ prompt_updates: MultiModalPromptUpdates
983
+
984
+
985
+ class BaseMultiModalProcessor(ABC, Generic[_I]):
986
+ """
987
+ Abstract base class to process multi-modal inputs to be used in vLLM.
988
+
989
+ Not to be confused with `transformers.ProcessorMixin`.
990
+ """
991
+
992
+ def __init__(
993
+ self,
994
+ info: _I,
995
+ dummy_inputs: "BaseDummyInputsBuilder[_I]",
996
+ *,
997
+ cache: Optional["BaseMultiModalProcessorCache"] = None,
998
+ ) -> None:
999
+ super().__init__()
1000
+
1001
+ self.info = info
1002
+ self.dummy_inputs = dummy_inputs
1003
+ self.cache = cache
1004
+
1005
+ self.data_parser = self._get_data_parser()
1006
+
1007
+ # Avoid unnecessary recomputation
1008
+ self._supported_mm_limits = self.info.get_supported_mm_limits()
1009
+ self._allowed_mm_limits = self.info.get_allowed_mm_limits()
1010
+
1011
+ @property
1012
+ def supported_mm_limits(self):
1013
+ return self._supported_mm_limits
1014
+
1015
+ @property
1016
+ def allowed_mm_limits(self):
1017
+ return self._allowed_mm_limits
1018
+
1019
+ def __call__(
1020
+ self,
1021
+ prompt: str,
1022
+ mm_data: MultiModalDataDict,
1023
+ hf_processor_mm_kwargs: Mapping[str, object],
1024
+ *,
1025
+ mm_uuids: Optional[MultiModalUUIDDict] = None,
1026
+ ) -> MultiModalInputs:
1027
+ return self.apply(prompt,
1028
+ mm_data,
1029
+ hf_processor_mm_kwargs,
1030
+ mm_uuids=mm_uuids)
1031
+
1032
+ def _get_data_parser(self) -> MultiModalDataParser:
1033
+ """
1034
+ Construct a parser to preprocess multi-modal data items
1035
+ before passing them to
1036
+ [`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
1037
+
1038
+ You can support additional modalities by creating a subclass
1039
+ of [`MultiModalDataParser`][vllm.multimodal.parse.MultiModalDataParser]
1040
+ that has additional subparsers.
1041
+ """
1042
+ return MultiModalDataParser()
1043
+
1044
+ def validate_num_items(
1045
+ self,
1046
+ modality: str,
1047
+ num_items: int,
1048
+ ) -> None:
1049
+ supported_limit = self.supported_mm_limits.get(modality, 0)
1050
+ allowed_limit = self.allowed_mm_limits.get(modality, 0)
1051
+
1052
+ if supported_limit is None:
1053
+ supported_limit = allowed_limit
1054
+
1055
+ limit = min(supported_limit, allowed_limit)
1056
+
1057
+ if num_items > limit:
1058
+ msg = (f"At most {limit} {modality}(s) may be provided in "
1059
+ "one prompt.")
1060
+
1061
+ if num_items <= supported_limit:
1062
+ msg += " Set `--limit-mm-per-prompt` to increase this limit."
1063
+
1064
+ raise ValueError(msg)
1065
+
1066
+ def _to_mm_items(
1067
+ self,
1068
+ mm_data: MultiModalDataDict,
1069
+ ) -> MultiModalDataItems:
1070
+ """
1071
+ Normalize
1072
+ [`MultiModalDataDict`][vllm.multimodal.inputs.MultiModalDataDict]
1073
+ to [`MultiModalDataItems`][vllm.multimodal.parse.MultiModalDataItems]
1074
+ before passing them to
1075
+ [`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
1076
+ """
1077
+ mm_items = self.data_parser.parse_mm_data(mm_data)
1078
+
1079
+ for modality, items in mm_items.items():
1080
+ self.validate_num_items(modality, len(items))
1081
+
1082
+ return mm_items
1083
+
1084
+ @abstractmethod
1085
+ def _get_mm_fields_config(
1086
+ self,
1087
+ hf_inputs: "BatchFeature",
1088
+ hf_processor_mm_kwargs: Mapping[str, object],
1089
+ ) -> Mapping[str, MultiModalFieldConfig]:
1090
+ """Given the HF-processed data, output the metadata of each field."""
1091
+ raise NotImplementedError
1092
+
1093
+ @abstractmethod
1094
+ def _get_prompt_updates(
1095
+ self,
1096
+ mm_items: MultiModalDataItems,
1097
+ hf_processor_mm_kwargs: Mapping[str, object],
1098
+ out_mm_kwargs: MultiModalKwargsItems,
1099
+ ) -> Sequence[PromptUpdate]:
1100
+ """
1101
+ Given the original multi-modal items for this modality
1102
+ and HF-processed data, output the updates to perform.
1103
+
1104
+ The information returned by this method is used to update token inputs
1105
+ which bypass the HF processor. It is also used to update the output of
1106
+ HF processor if the HF process does not apply prompt updates to text
1107
+ inputs.
1108
+
1109
+ Moreover, this information is critical to determine the token positions
1110
+ in order to construct
1111
+ [`PlaceholderRange`][vllm.multimodal.inputs.PlaceholderRange]
1112
+ for each multi-modal item.
1113
+ """
1114
+ raise NotImplementedError
1115
+
1116
+ def _bind_and_group_updates(
1117
+ self,
1118
+ prompt_updates: Sequence[PromptUpdate],
1119
+ mm_item_counts: Mapping[str, int],
1120
+ ) -> MultiModalPromptUpdates:
1121
+ return {
1122
+ modality: [[update.resolve(item_idx) for update in updates]
1123
+ for item_idx in range(mm_item_counts.get(modality, 0))]
1124
+ for modality, updates in full_groupby_modality(prompt_updates)
1125
+ }
1126
+
1127
+ def _get_mm_prompt_updates(
1128
+ self,
1129
+ mm_items: MultiModalDataItems,
1130
+ hf_processor_mm_kwargs: Mapping[str, object],
1131
+ out_mm_kwargs: MultiModalKwargsItems,
1132
+ ) -> MultiModalPromptUpdates:
1133
+ unbound_prompt_updates = self._get_prompt_updates(
1134
+ mm_items=mm_items,
1135
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1136
+ out_mm_kwargs=out_mm_kwargs,
1137
+ )
1138
+
1139
+ mm_prompt_updates = self._bind_and_group_updates(
1140
+ unbound_prompt_updates,
1141
+ mm_items.get_all_counts(),
1142
+ )
1143
+
1144
+ for modality, prompt_updates in mm_prompt_updates.items():
1145
+ for item_idx, item_prompt_updates in enumerate(prompt_updates):
1146
+ if len(item_prompt_updates) > 1:
1147
+ logger.warning_once(
1148
+ "Detected %d prompt updates for `mm_items[%r][%s]`. "
1149
+ "Multiple prompt updates per item is now "
1150
+ "deprecated and may be removed in v0.13. "
1151
+ "Instead, please specify dynamic update targets "
1152
+ "in the same prompt update definition by passing "
1153
+ "a function to `PromptUpdate.target`.",
1154
+ len(prompt_updates),
1155
+ modality,
1156
+ item_idx,
1157
+ )
1158
+
1159
+ return mm_prompt_updates
1160
+
1161
+ def _find_mm_placeholders(
1162
+ self,
1163
+ new_token_ids: list[int],
1164
+ mm_prompt_updates: MultiModalPromptUpdates,
1165
+ ) -> Mapping[str, list[PlaceholderFeaturesInfo]]:
1166
+ tokenizer = self.info.get_tokenizer()
1167
+
1168
+ return find_mm_placeholders(new_token_ids, mm_prompt_updates,
1169
+ tokenizer)
1170
+
1171
+ def _get_hf_mm_data(
1172
+ self,
1173
+ mm_items: MultiModalDataItems,
1174
+ ) -> tuple[Mapping[str, object], Mapping[str, object]]:
1175
+ processor_data = dict[str, object]()
1176
+ passthrough_data = dict[str, object]()
1177
+
1178
+ for items in mm_items.values():
1179
+ processor_data.update(items.get_processor_data())
1180
+ passthrough_data.update(items.get_passthrough_data())
1181
+
1182
+ return processor_data, passthrough_data
1183
+
1184
+ def _call_hf_processor(
1185
+ self,
1186
+ prompt: str,
1187
+ # Not to be confused with `mm_data` in `self.apply`.
1188
+ # This refers to the data to be passed to HF processor.
1189
+ mm_data: Mapping[str, object],
1190
+ mm_kwargs: Mapping[str, object],
1191
+ tok_kwargs: Mapping[str, object],
1192
+ ) -> "BatchFeature":
1193
+ """
1194
+ Call the HF processor on the prompt text and
1195
+ associated multi-modal data.
1196
+ """
1197
+ return self.info.ctx.call_hf_processor(
1198
+ self.info.get_hf_processor(**mm_kwargs),
1199
+ dict(text=prompt, **mm_data),
1200
+ dict(**mm_kwargs, **tok_kwargs),
1201
+ )
1202
+
1203
+ def _hf_processor_applies_updates(
1204
+ self,
1205
+ prompt_text: str,
1206
+ mm_items: MultiModalDataItems,
1207
+ hf_processor_mm_kwargs: Mapping[str, object],
1208
+ tokenization_kwargs: Mapping[str, object],
1209
+ ) -> bool:
1210
+ """
1211
+ Return whether the HF processor applies prompt updates.
1212
+
1213
+ For most HF processors, this should be `True` when multi-modal
1214
+ data items are passed, but `False` when multi-modal embeddings
1215
+ are passed.
1216
+ """
1217
+ return not any(
1218
+ isinstance(items, (EmbeddingItems, DictEmbeddingItems))
1219
+ for items in mm_items.values())
1220
+
1221
+ def _apply_hf_processor_text_mm(
1222
+ self,
1223
+ prompt_text: str,
1224
+ mm_items: MultiModalDataItems,
1225
+ hf_processor_mm_kwargs: Mapping[str, object],
1226
+ tokenization_kwargs: Mapping[str, object],
1227
+ ) -> tuple[list[int], "BatchFeature", bool]:
1228
+ """
1229
+ Apply the HF processor on the prompt text and multi-modal data
1230
+ together.
1231
+
1232
+ In addition, return whether prompt updates have been applied.
1233
+ """
1234
+ processor_data, passthrough_data = self._get_hf_mm_data(mm_items)
1235
+
1236
+ processed_data = self._call_hf_processor(
1237
+ prompt=prompt_text,
1238
+ mm_data=processor_data,
1239
+ mm_kwargs=hf_processor_mm_kwargs,
1240
+ tok_kwargs=tokenization_kwargs,
1241
+ )
1242
+ processed_data.update(passthrough_data)
1243
+
1244
+ prompt_ids, = processed_data.pop("input_ids").tolist()
1245
+
1246
+ is_update_applied = self._hf_processor_applies_updates(
1247
+ prompt_text=prompt_text,
1248
+ mm_items=mm_items,
1249
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1250
+ tokenization_kwargs=tokenization_kwargs,
1251
+ )
1252
+
1253
+ return prompt_ids, processed_data, is_update_applied
1254
+
1255
+ def _apply_hf_processor_text_only(
1256
+ self,
1257
+ prompt_text: str,
1258
+ tokenization_kwargs: Mapping[str, object],
1259
+ ) -> list[int]:
1260
+ """
1261
+ Apply the HF processor on the prompt text only.
1262
+
1263
+ Since HF processor requires that text and multi-modal items
1264
+ correspond to each other, we create dummy multi-modal items
1265
+ to go along with the text.
1266
+ """
1267
+ prompt_ids, _, _ = self._apply_hf_processor_text_mm(
1268
+ prompt_text=prompt_text,
1269
+ mm_items=MultiModalDataItems({}),
1270
+ hf_processor_mm_kwargs={},
1271
+ tokenization_kwargs=tokenization_kwargs,
1272
+ )
1273
+
1274
+ return prompt_ids
1275
+
1276
+ def _apply_hf_processor_tokens_only(
1277
+ self,
1278
+ prompt_tokens: list[int],
1279
+ ) -> list[int]:
1280
+ """
1281
+ Apply the HF processor on the prompt tokens only.
1282
+
1283
+ Most HF processors accept prompt text but not prompt tokens.
1284
+ If the HF processor adds or removes tokens that are not related to
1285
+ multi-modal data, you should override this method so it is consistent
1286
+ with the output of
1287
+ [`_apply_hf_processor_text_only`][vllm.multimodal.processing.BaseMultiModalProcessor._apply_hf_processor_text_only]
1288
+ on the
1289
+ corresponding text.
1290
+ """
1291
+ return prompt_tokens
1292
+
1293
+ def _apply_hf_processor_mm_only(
1294
+ self,
1295
+ mm_items: MultiModalDataItems,
1296
+ hf_processor_mm_kwargs: Mapping[str, object],
1297
+ tokenization_kwargs: Mapping[str, object],
1298
+ ) -> "BatchFeature":
1299
+ """
1300
+ Apply the HF processor on the multi-modal data only.
1301
+
1302
+ Since HF processor requires that text and multi-modal items
1303
+ correspond to each other, we generate dummy text using
1304
+ [`DummyInputsBuilder`][vllm.multimodal.profiling.BaseDummyInputsBuilder]
1305
+ to go along with the multi-modal data.
1306
+ """
1307
+ mm_counts = mm_items.get_all_counts()
1308
+
1309
+ _, mm_processed_data, _ = self._apply_hf_processor_text_mm(
1310
+ prompt_text=self.dummy_inputs.get_dummy_text(mm_counts),
1311
+ mm_items=mm_items,
1312
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1313
+ tokenization_kwargs=tokenization_kwargs,
1314
+ )
1315
+
1316
+ return mm_processed_data
1317
+
1318
+ def _apply_hf_processor_main(
1319
+ self,
1320
+ prompt: Union[str, list[int]],
1321
+ mm_items: MultiModalDataItems,
1322
+ hf_processor_mm_kwargs: Mapping[str, object],
1323
+ tokenization_kwargs: Mapping[str, object],
1324
+ *,
1325
+ enable_hf_prompt_update: bool,
1326
+ ) -> tuple[list[int], "BatchFeature", bool]:
1327
+ """
1328
+ Apply the HF processor on the prompt text and multi-modal data.
1329
+
1330
+ In addition, return whether prompt updates have been applied
1331
+ (for most HF processors, this should be `True`).
1332
+
1333
+ Note:
1334
+ If `enable_hf_prompt_update=False`, we use HF processor
1335
+ to perform prompt updates if available; HF processor requires
1336
+ that the prompt corresponds to multi-modal items.
1337
+ """
1338
+ if isinstance(prompt, str):
1339
+ if enable_hf_prompt_update:
1340
+ return self._apply_hf_processor_text_mm(
1341
+ prompt_text=prompt,
1342
+ mm_items=mm_items,
1343
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1344
+ tokenization_kwargs=tokenization_kwargs,
1345
+ )
1346
+
1347
+ prompt_ids = self._apply_hf_processor_text_only(
1348
+ prompt, tokenization_kwargs)
1349
+ else:
1350
+ prompt_ids = self._apply_hf_processor_tokens_only(prompt)
1351
+
1352
+ mm_processed_data = self._apply_hf_processor_mm_only(
1353
+ mm_items=mm_items,
1354
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1355
+ tokenization_kwargs=tokenization_kwargs,
1356
+ )
1357
+
1358
+ return prompt_ids, mm_processed_data, False
1359
+
1360
+ def _hash_mm_items(
1361
+ self,
1362
+ mm_items: MultiModalDataItems,
1363
+ hf_processor_mm_kwargs: Mapping[str, object],
1364
+ tokenization_kwargs: Mapping[str, object],
1365
+ *,
1366
+ mm_uuids: Optional[MultiModalUUIDDict] = None,
1367
+ ) -> MultiModalHashes:
1368
+ """Create MM hashes to be returned (only used in V1).
1369
+
1370
+
1371
+ Note: When overrides are provided via callers of `apply`,
1372
+ `_hash_mm_items` will be bypassed and the overrides will be used.
1373
+ """
1374
+ model_id = self.info.model_id
1375
+
1376
+ hashes: MultiModalHashes = {}
1377
+ mm_uuids = mm_uuids or {}
1378
+
1379
+ for modality, items in mm_items.items():
1380
+ if modality in mm_uuids:
1381
+ mm_uuids_per_modality = mm_uuids[modality]
1382
+ if isinstance(mm_uuids_per_modality, str):
1383
+ mm_uuids_per_modality = [mm_uuids_per_modality]
1384
+
1385
+ # For None entries, compute a hash; otherwise, use provided ID.
1386
+ computed: list[str] = []
1387
+ for i, item in enumerate(items):
1388
+ item_uuid = mm_uuids_per_modality[i]
1389
+
1390
+ # NOTE: Even if a item_uuid is provided, we still compute a
1391
+ # hash if `hf_processor_mm_kwargs` or `tokenization_kwargs`
1392
+ # are provided. This is because the processed multimodal
1393
+ # inputs can be different depending on the processor kwargs.
1394
+ if item_uuid is None or \
1395
+ hf_processor_mm_kwargs or \
1396
+ tokenization_kwargs:
1397
+
1398
+ # NOTE: use provided hash string to hash with kwargs
1399
+ # if available for better performance.
1400
+ item = item_uuid if item_uuid is not None else item
1401
+ computed.append(
1402
+ MultiModalHasher.hash_kwargs(
1403
+ model_id=model_id,
1404
+ **{modality: item},
1405
+ **hf_processor_mm_kwargs,
1406
+ **tokenization_kwargs))
1407
+ else:
1408
+ computed.append(item_uuid)
1409
+ hashes[modality] = computed
1410
+ else:
1411
+ hashes[modality] = [
1412
+ MultiModalHasher.hash_kwargs(model_id=model_id,
1413
+ **{modality: item},
1414
+ **hf_processor_mm_kwargs,
1415
+ **tokenization_kwargs)
1416
+ for item in items
1417
+ ]
1418
+
1419
+ return hashes
1420
+
1421
+ def _get_cache_missing_items(
1422
+ self,
1423
+ cache: "BaseMultiModalProcessorCache",
1424
+ mm_data_items: MultiModalDataItems,
1425
+ mm_hashes: MultiModalHashes,
1426
+ ) -> MultiModalDataItems:
1427
+ mm_is_cached = {
1428
+ modality: cache.is_cached(hashes)
1429
+ for modality, hashes in mm_hashes.items()
1430
+ }
1431
+
1432
+ mm_missing_idxs = {
1433
+ modality: [
1434
+ idx for idx, item_is_cached in enumerate(items_is_cached)
1435
+ if not item_is_cached
1436
+ ]
1437
+ for modality, items_is_cached in mm_is_cached.items()
1438
+ }
1439
+ mm_missing_data = {
1440
+ modality: [mm_data_items[modality][idx] for idx in idxs]
1441
+ for modality, idxs in mm_missing_idxs.items()
1442
+ }
1443
+
1444
+ return self._to_mm_items(mm_missing_data)
1445
+
1446
+ def _recompute_cached_prompt_update(
1447
+ self,
1448
+ cached_update: ResolvedPromptUpdate,
1449
+ new_item_idx: int,
1450
+ ) -> ResolvedPromptUpdate:
1451
+ """
1452
+ Override this if other attributes of `ResolvedPromptUpdate`
1453
+ also need to be recomputed after retrieving from the cache.
1454
+ """
1455
+ return replace(cached_update, item_idx=new_item_idx)
1456
+
1457
+ def _merge_mm_kwargs(
1458
+ self,
1459
+ cache: "BaseMultiModalProcessorCache",
1460
+ mm_hashes: MultiModalHashes,
1461
+ mm_missing_kwargs: MultiModalKwargsItems,
1462
+ mm_missing_prompt_updates: MultiModalPromptUpdates,
1463
+ ) -> tuple[MultiModalKwargsOptionalItems, MultiModalPromptUpdates]:
1464
+ # Need to calculate this at the beginning to avoid skipping cache logic
1465
+ # for subsequently repeated items in the same modality
1466
+ mm_is_cached = {
1467
+ modality: cache.is_cached(hashes)
1468
+ for modality, hashes in mm_hashes.items()
1469
+ }
1470
+
1471
+ mm_missing_next_idx = defaultdict[str, int](lambda: 0)
1472
+
1473
+ merged_kwargs = defaultdict[str,
1474
+ list[Optional[MultiModalKwargsItem]]](list)
1475
+ merged_prompt_updates = defaultdict[
1476
+ str, list[Sequence[ResolvedPromptUpdate]]](list)
1477
+ for modality, hashes in mm_hashes.items():
1478
+ missing_kwargs = mm_missing_kwargs.get(modality, [])
1479
+ missing_prompt_updates = mm_missing_prompt_updates.get(
1480
+ modality, [])
1481
+
1482
+ for item_idx, item_hash in enumerate(hashes):
1483
+ kwargs: Optional[MultiModalKwargsItem]
1484
+ if not mm_is_cached[modality][item_idx]:
1485
+ missing_next_idx = mm_missing_next_idx[modality]
1486
+ kwargs = missing_kwargs[missing_next_idx]
1487
+ updates = missing_prompt_updates[missing_next_idx]
1488
+
1489
+ mm_missing_next_idx[modality] += 1
1490
+
1491
+ item = kwargs, updates
1492
+ else:
1493
+ item = None
1494
+
1495
+ kwargs, updates = cache.get_and_update_item(item, item_hash)
1496
+
1497
+ merged_kwargs[modality].append(kwargs)
1498
+ merged_prompt_updates[modality].append([
1499
+ self._recompute_cached_prompt_update(update, item_idx)
1500
+ for update in updates
1501
+ ])
1502
+
1503
+ mm_kwargs = MultiModalKwargsItems(merged_kwargs)
1504
+ mm_prompt_updates = dict(merged_prompt_updates)
1505
+
1506
+ return mm_kwargs, mm_prompt_updates
1507
+
1508
+ def _apply_hf_processor(
1509
+ self,
1510
+ prompt: Union[str, list[int]],
1511
+ mm_data_items: MultiModalDataItems,
1512
+ hf_processor_mm_kwargs: Mapping[str, object],
1513
+ tokenization_kwargs: Mapping[str, object],
1514
+ *,
1515
+ mm_uuids: Optional[MultiModalUUIDDict] = None,
1516
+ ) -> tuple[list[int], MultiModalProcessingInfo, bool]:
1517
+ (
1518
+ prompt_ids,
1519
+ mm_processed_data,
1520
+ is_update_applied,
1521
+ ) = self._apply_hf_processor_main(
1522
+ prompt=prompt,
1523
+ mm_items=mm_data_items,
1524
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1525
+ tokenization_kwargs=tokenization_kwargs,
1526
+ enable_hf_prompt_update=True,
1527
+ )
1528
+
1529
+ mm_kwargs = MultiModalKwargsItems.from_hf_inputs(
1530
+ mm_processed_data,
1531
+ self._get_mm_fields_config(mm_processed_data,
1532
+ hf_processor_mm_kwargs),
1533
+ )
1534
+
1535
+ # Use overrides if provided; fallback to data-dependent hashing.
1536
+ mm_hashes = self._hash_mm_items(mm_data_items,
1537
+ hf_processor_mm_kwargs,
1538
+ tokenization_kwargs,
1539
+ mm_uuids=mm_uuids)
1540
+
1541
+ mm_prompt_updates = self._get_mm_prompt_updates(
1542
+ mm_data_items,
1543
+ hf_processor_mm_kwargs,
1544
+ mm_kwargs,
1545
+ )
1546
+
1547
+ mm_info = MultiModalProcessingInfo(
1548
+ kwargs=mm_kwargs,
1549
+ hashes=mm_hashes,
1550
+ prompt_updates=mm_prompt_updates,
1551
+ )
1552
+
1553
+ return prompt_ids, mm_info, is_update_applied
1554
+
1555
+ def _cached_apply_hf_processor(
1556
+ self,
1557
+ prompt: Union[str, list[int]],
1558
+ mm_data_items: MultiModalDataItems,
1559
+ hf_processor_mm_kwargs: Mapping[str, object],
1560
+ tokenization_kwargs: Mapping[str, object],
1561
+ *,
1562
+ mm_uuids: Optional[MultiModalUUIDDict] = None,
1563
+ ) -> tuple[list[int], MultiModalProcessingInfo, bool]:
1564
+ """
1565
+ Apply the HF processor on the full prompt text,
1566
+ caching the results and reusing cached results.
1567
+ """
1568
+ cache = self.cache
1569
+
1570
+ _, passthrough_data = self._get_hf_mm_data(mm_data_items)
1571
+ if cache is None or passthrough_data:
1572
+ return self._apply_hf_processor(
1573
+ prompt=prompt,
1574
+ mm_data_items=mm_data_items,
1575
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1576
+ tokenization_kwargs=tokenization_kwargs,
1577
+ mm_uuids=mm_uuids,
1578
+ )
1579
+
1580
+ mm_hashes = self._hash_mm_items(mm_data_items,
1581
+ hf_processor_mm_kwargs,
1582
+ tokenization_kwargs,
1583
+ mm_uuids=mm_uuids)
1584
+
1585
+ mm_missing_data_items = self._get_cache_missing_items(
1586
+ cache=cache,
1587
+ mm_data_items=mm_data_items,
1588
+ mm_hashes=mm_hashes,
1589
+ )
1590
+
1591
+ # NOTE: `prompt` does not correspond to `mm_missing_data_items`,
1592
+ # so we can't apply prompt updates until the new multimodal
1593
+ # items are combined with the cached multimodal items
1594
+ (
1595
+ prompt_ids,
1596
+ mm_missing_processed_data,
1597
+ is_update_applied,
1598
+ ) = self._apply_hf_processor_main(
1599
+ prompt=prompt,
1600
+ mm_items=mm_missing_data_items,
1601
+ hf_processor_mm_kwargs=hf_processor_mm_kwargs,
1602
+ tokenization_kwargs=tokenization_kwargs,
1603
+ enable_hf_prompt_update=False,
1604
+ )
1605
+
1606
+ mm_missing_kwargs = MultiModalKwargsItems.from_hf_inputs(
1607
+ mm_missing_processed_data,
1608
+ self._get_mm_fields_config(mm_missing_processed_data,
1609
+ hf_processor_mm_kwargs),
1610
+ )
1611
+
1612
+ mm_missing_prompt_updates = self._get_mm_prompt_updates(
1613
+ mm_missing_data_items,
1614
+ hf_processor_mm_kwargs,
1615
+ mm_missing_kwargs,
1616
+ )
1617
+
1618
+ mm_kwargs, mm_prompt_updates = self._merge_mm_kwargs(
1619
+ cache,
1620
+ mm_hashes=mm_hashes,
1621
+ mm_missing_kwargs=mm_missing_kwargs,
1622
+ mm_missing_prompt_updates=mm_missing_prompt_updates,
1623
+ )
1624
+
1625
+ mm_info = MultiModalProcessingInfo(
1626
+ kwargs=mm_kwargs,
1627
+ hashes=mm_hashes,
1628
+ prompt_updates=mm_prompt_updates,
1629
+ )
1630
+
1631
+ return prompt_ids, mm_info, is_update_applied
1632
+
1633
+ def _apply_token_matches(
1634
+ self,
1635
+ prompt: list[int],
1636
+ mm_prompt_updates: MultiModalPromptUpdates,
1637
+ ) -> tuple[list[int], MultiModalPromptUpdatesApplyResult]:
1638
+ tokenizer = self.info.get_tokenizer()
1639
+ return apply_token_matches(prompt, mm_prompt_updates, tokenizer)
1640
+
1641
+ def _apply_text_matches(
1642
+ self,
1643
+ prompt: str,
1644
+ mm_prompt_updates: MultiModalPromptUpdates,
1645
+ ) -> tuple[str, MultiModalPromptUpdatesApplyResult]:
1646
+ tokenizer = self.info.get_tokenizer()
1647
+ return apply_text_matches(prompt, mm_prompt_updates, tokenizer)
1648
+
1649
+ def _apply_prompt_updates(
1650
+ self,
1651
+ token_ids: list[int],
1652
+ mm_prompt_updates: MultiModalPromptUpdates,
1653
+ ) -> tuple[list[int], str, Mapping[str, list[PlaceholderFeaturesInfo]]]:
1654
+ tokenizer = self.info.get_tokenizer()
1655
+
1656
+ new_token_ids, match_result = self._apply_token_matches(
1657
+ token_ids,
1658
+ mm_prompt_updates,
1659
+ )
1660
+
1661
+ # If the search text does not represent a special token,
1662
+ # it may have different token IDs in the prompt, because
1663
+ # the tokens may go across the boundaries of the search text.
1664
+ # ----
1665
+ # e.g. when searching for "foo" in "food", if "food" itself makes
1666
+ # up a token, then the token ID of "foo" will not appear at all
1667
+ # ----
1668
+ # Since it is inefficient to search for all possible tokenizations
1669
+ # of the search text in the prompt, we instead perform string-based
1670
+ # updates on the decoded token IDs, then encode them back.
1671
+ if all(
1672
+ all(update_idx is not None for update_idx in update_idxs)
1673
+ for update_idxs in match_result.values()):
1674
+ new_text = decode_tokens(tokenizer, new_token_ids)
1675
+ else:
1676
+ new_text, match_result = self._apply_text_matches(
1677
+ decode_tokens(tokenizer, token_ids),
1678
+ mm_prompt_updates,
1679
+ )
1680
+
1681
+ new_token_ids = encode_tokens(
1682
+ tokenizer,
1683
+ new_text,
1684
+ add_special_tokens=False,
1685
+ )
1686
+
1687
+ matched_updates = defaultdict[
1688
+ str, list[Sequence[ResolvedPromptUpdate]]](list)
1689
+ for modality, update_idxs in match_result.items():
1690
+ for item_idx, update_idx in enumerate(update_idxs):
1691
+ assert update_idx is not None, (
1692
+ "Failed to apply prompt replacement for "
1693
+ f"mm_items[{modality!r}][{item_idx}]")
1694
+
1695
+ matched_updates[modality].append(
1696
+ [mm_prompt_updates[modality][item_idx][update_idx]])
1697
+
1698
+ placeholders = self._find_mm_placeholders(
1699
+ new_token_ids,
1700
+ dict(matched_updates),
1701
+ )
1702
+
1703
+ return new_token_ids, new_text, placeholders
1704
+
1705
+ def _validate_mm_kwargs(
1706
+ self,
1707
+ mm_kwargs: MultiModalKwargsOptionalItems,
1708
+ mm_item_counts: Mapping[str, int],
1709
+ ) -> None:
1710
+ for modality, item_count in mm_item_counts.items():
1711
+ items = mm_kwargs.get(modality, [])
1712
+
1713
+ if len(items) != item_count:
1714
+ raise RuntimeError(
1715
+ f"Expected there to be {item_count} {modality} items in "
1716
+ f"keyword arguments corresponding to {item_count} "
1717
+ f"{modality} data items, but only found {len(items)}! "
1718
+ "There is likely a problem with your "
1719
+ "implementation of merged multi-modal processor for this "
1720
+ "model (usually arising from an inconsistency between "
1721
+ "`_call_hf_processor` and `_get_mm_fields_config`).")
1722
+
1723
+ def _validate_mm_placeholders(
1724
+ self,
1725
+ mm_placeholders: Mapping[str, list[PlaceholderFeaturesInfo]],
1726
+ mm_item_counts: Mapping[str, int],
1727
+ ) -> None:
1728
+ for modality, item_count in mm_item_counts.items():
1729
+ placeholders = mm_placeholders.get(modality, [])
1730
+
1731
+ if len(placeholders) != item_count:
1732
+ # NOTE: If you are a model developer, this can also arise from
1733
+ # an inconsistency between `_call_hf_processor` and
1734
+ # `_get_mm_fields_config` implementations
1735
+ raise RuntimeError(
1736
+ f"Expected there to be {item_count} prompt updates "
1737
+ f"corresponding to {item_count} {modality} items, but "
1738
+ f"instead found {len(placeholders)} prompt updates! "
1739
+ "This is likely because you forgot to include input "
1740
+ "placeholder tokens (e.g., `<image>`, `<|image_pad|>`) "
1741
+ "in the prompt. If the model has a chat template, make "
1742
+ "sure you have applied it before calling `LLM.generate`.")
1743
+
1744
+ def _maybe_apply_prompt_updates(
1745
+ self,
1746
+ mm_items: MultiModalDataItems,
1747
+ prompt_ids: list[int],
1748
+ mm_kwargs: MultiModalKwargsOptionalItems,
1749
+ mm_prompt_updates: MultiModalPromptUpdates,
1750
+ is_update_applied: bool,
1751
+ ) -> tuple[list[int], str, Mapping[str, list[PlaceholderFeaturesInfo]]]:
1752
+ mm_item_counts = mm_items.get_all_counts()
1753
+ self._validate_mm_kwargs(mm_kwargs, mm_item_counts)
1754
+
1755
+ if is_update_applied:
1756
+ mm_placeholders = self._find_mm_placeholders(
1757
+ prompt_ids,
1758
+ mm_prompt_updates,
1759
+ )
1760
+ self._validate_mm_placeholders(mm_placeholders, mm_item_counts)
1761
+
1762
+ tokenizer = self.info.get_tokenizer()
1763
+ prompt = decode_tokens(tokenizer, prompt_ids)
1764
+ else:
1765
+ (
1766
+ prompt_ids,
1767
+ prompt,
1768
+ mm_placeholders,
1769
+ ) = self._apply_prompt_updates(
1770
+ prompt_ids,
1771
+ mm_prompt_updates,
1772
+ )
1773
+ self._validate_mm_placeholders(mm_placeholders, mm_item_counts)
1774
+
1775
+ return prompt_ids, prompt, mm_placeholders
1776
+
1777
+ def apply(
1778
+ self,
1779
+ prompt: Union[str, list[int]],
1780
+ mm_data: MultiModalDataDict,
1781
+ hf_processor_mm_kwargs: Mapping[str, object],
1782
+ tokenization_kwargs: Optional[Mapping[str, object]] = None,
1783
+ *,
1784
+ mm_uuids: Optional[MultiModalUUIDDict] = None,
1785
+ ) -> MultiModalInputs:
1786
+ """
1787
+ Process multi-modal inputs to be used in vLLM.
1788
+
1789
+ The main steps are:
1790
+
1791
+ 1. Apply HF Processor on prompt text and multi-modal data together,
1792
+ outputting token IDs and processed tensors.
1793
+ 2. Find and update sequences in the token IDs with placeholder tokens.
1794
+ The number of placeholder tokens equals the feature size of the
1795
+ multi-modal data outputted by the multi-modal encoder.
1796
+ 3. Extract information about the placeholder tokens from the
1797
+ processed token IDs.
1798
+ """
1799
+ mm_items = self._to_mm_items(mm_data)
1800
+
1801
+ if tokenization_kwargs is None:
1802
+ tokenization_kwargs = {}
1803
+
1804
+ (
1805
+ prompt_ids,
1806
+ mm_info,
1807
+ is_update_applied,
1808
+ ) = self._cached_apply_hf_processor(
1809
+ prompt,
1810
+ mm_items,
1811
+ hf_processor_mm_kwargs,
1812
+ tokenization_kwargs=tokenization_kwargs,
1813
+ mm_uuids=mm_uuids,
1814
+ )
1815
+
1816
+ # NOTE: tokenization_kwargs are not required to init processor
1817
+ prompt_ids, prompt, mm_placeholders = self._maybe_apply_prompt_updates(
1818
+ mm_items=mm_items,
1819
+ prompt_ids=prompt_ids,
1820
+ mm_kwargs=mm_info.kwargs,
1821
+ mm_prompt_updates=mm_info.prompt_updates,
1822
+ is_update_applied=is_update_applied,
1823
+ )
1824
+
1825
+ mm_placeholder_ranges = {
1826
+ modality: [item.to_range() for item in placeholders]
1827
+ for modality, placeholders in mm_placeholders.items()
1828
+ }
1829
+
1830
+ return MultiModalInputs(
1831
+ type="multimodal",
1832
+ prompt=prompt,
1833
+ prompt_token_ids=prompt_ids,
1834
+ mm_kwargs=mm_info.kwargs,
1835
+ mm_hashes=mm_info.hashes,
1836
+ mm_placeholders=mm_placeholder_ranges,
1837
+ )
1838
+
1839
+
1840
+ class EncDecMultiModalProcessor(BaseMultiModalProcessor[_I]):
1841
+
1842
+ @abstractmethod
1843
+ def create_encoder_prompt(
1844
+ self,
1845
+ prompt: Union[str, list[int]],
1846
+ mm_data: MultiModalDataDict,
1847
+ ) -> Union[str, list[int]]:
1848
+ """
1849
+ Create input prompt for the encoder. HF processor will be applied on
1850
+ this prompt during profiling and generation.
1851
+ """
1852
+ raise NotImplementedError
1853
+
1854
+ @property
1855
+ def pad_dummy_encoder_prompt(self) -> bool:
1856
+ return False
1857
+
1858
+ def create_decoder_prompt(
1859
+ self,
1860
+ prompt: Union[str, list[int]],
1861
+ mm_data: MultiModalDataDict,
1862
+ ) -> Union[str, list[int]]:
1863
+ """Create input prompt for the decoder."""
1864
+ return prompt
1865
+
1866
+ def _get_enc_dec_inputs(
1867
+ self,
1868
+ prompt: Union[str, list[int]],
1869
+ mm_data: MultiModalDataDict,
1870
+ encoder_inputs: MultiModalInputs,
1871
+ ):
1872
+ tokenizer = self.info.get_tokenizer()
1873
+ decoder_prompt = self.create_decoder_prompt(prompt, mm_data)
1874
+ if isinstance(decoder_prompt, str):
1875
+ decoder_prompt_ids = encode_tokens(tokenizer,
1876
+ decoder_prompt,
1877
+ add_special_tokens=False)
1878
+ else:
1879
+ decoder_prompt_ids = decoder_prompt
1880
+ decoder_prompt = decode_tokens(tokenizer, decoder_prompt)
1881
+
1882
+ mm_inputs = MultiModalEncDecInputs(
1883
+ encoder_prompt=encoder_inputs["prompt"],
1884
+ encoder_prompt_token_ids=encoder_inputs["prompt_token_ids"],
1885
+ **encoder_inputs)
1886
+ mm_inputs.update({
1887
+ "prompt": decoder_prompt,
1888
+ "prompt_token_ids": decoder_prompt_ids
1889
+ })
1890
+ return mm_inputs
1891
+
1892
+ def apply(
1893
+ self,
1894
+ prompt: Union[str, list[int]],
1895
+ mm_data: MultiModalDataDict,
1896
+ hf_processor_mm_kwargs: Mapping[str, object],
1897
+ tokenization_kwargs: Optional[Mapping[str, object]] = None,
1898
+ *,
1899
+ mm_uuids: Optional[MultiModalUUIDDict] = None,
1900
+ ) -> MultiModalEncDecInputs:
1901
+ """
1902
+ Process multi-modal inputs to be used in vLLM.
1903
+ The main processing steps are modified to fit encoder-decoder model:
1904
+ 1. Create encoder prompt from input prompt text.
1905
+ 2. Apply the HF processor on encoder prompt.
1906
+ 3. Copy the input prompt text as decoder prompt inputs.
1907
+ """
1908
+ encoder_prompt = self.create_encoder_prompt(prompt, mm_data)
1909
+ encoder_inputs = super().apply(
1910
+ encoder_prompt,
1911
+ mm_data,
1912
+ hf_processor_mm_kwargs,
1913
+ tokenization_kwargs,
1914
+ mm_uuids=mm_uuids,
1915
+ )
1916
+
1917
+ return self._get_enc_dec_inputs(
1918
+ prompt=prompt,
1919
+ mm_data=mm_data,
1920
+ encoder_inputs=encoder_inputs,
1921
+ )