vllm-cpu-avx512vnni 0.10.2.post2__cp312-cp312-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu-avx512vnni might be problematic. Click here for more details.

Files changed (1395) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +220 -0
  3. vllm/_bc_linter.py +59 -0
  4. vllm/_custom_ops.py +2022 -0
  5. vllm/_ipex_ops.py +404 -0
  6. vllm/_version.py +34 -0
  7. vllm/adapter_commons/__init__.py +0 -0
  8. vllm/adapter_commons/layers.py +16 -0
  9. vllm/adapter_commons/models.py +106 -0
  10. vllm/adapter_commons/request.py +26 -0
  11. vllm/adapter_commons/utils.py +93 -0
  12. vllm/adapter_commons/worker_manager.py +39 -0
  13. vllm/assets/__init__.py +0 -0
  14. vllm/assets/audio.py +45 -0
  15. vllm/assets/base.py +41 -0
  16. vllm/assets/image.py +50 -0
  17. vllm/assets/video.py +138 -0
  18. vllm/attention/__init__.py +19 -0
  19. vllm/attention/backends/__init__.py +0 -0
  20. vllm/attention/backends/abstract.py +348 -0
  21. vllm/attention/backends/differential_flash_attn.py +935 -0
  22. vllm/attention/backends/dual_chunk_flash_attn.py +1499 -0
  23. vllm/attention/backends/flash_attn.py +933 -0
  24. vllm/attention/backends/flashmla.py +238 -0
  25. vllm/attention/backends/mla/__init__.py +0 -0
  26. vllm/attention/backends/mla/common.py +1310 -0
  27. vllm/attention/backends/placeholder_attn.py +340 -0
  28. vllm/attention/backends/rocm_aiter_mla.py +410 -0
  29. vllm/attention/backends/rocm_flash_attn.py +953 -0
  30. vllm/attention/backends/triton_mla.py +111 -0
  31. vllm/attention/backends/utils.py +610 -0
  32. vllm/attention/backends/xformers.py +805 -0
  33. vllm/attention/layer.py +552 -0
  34. vllm/attention/layers/__init__.py +0 -0
  35. vllm/attention/layers/chunked_local_attention.py +91 -0
  36. vllm/attention/layers/cross_attention.py +159 -0
  37. vllm/attention/layers/encoder_only_attention.py +86 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/chunked_prefill_paged_decode.py +405 -0
  40. vllm/attention/ops/common.py +139 -0
  41. vllm/attention/ops/flashmla.py +123 -0
  42. vllm/attention/ops/merge_attn_states.py +43 -0
  43. vllm/attention/ops/paged_attn.py +261 -0
  44. vllm/attention/ops/pallas_kv_cache_update.py +124 -0
  45. vllm/attention/ops/prefix_prefill.py +928 -0
  46. vllm/attention/ops/rocm_aiter_mla.py +104 -0
  47. vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
  48. vllm/attention/ops/triton_decode_attention.py +676 -0
  49. vllm/attention/ops/triton_flash_attention.py +984 -0
  50. vllm/attention/ops/triton_merge_attn_states.py +97 -0
  51. vllm/attention/ops/triton_unified_attention.py +854 -0
  52. vllm/attention/selector.py +243 -0
  53. vllm/attention/utils/__init__.py +0 -0
  54. vllm/attention/utils/fa_utils.py +85 -0
  55. vllm/attention/utils/kv_sharing_utils.py +33 -0
  56. vllm/beam_search.py +87 -0
  57. vllm/benchmarks/__init__.py +0 -0
  58. vllm/benchmarks/datasets.py +2651 -0
  59. vllm/benchmarks/latency.py +170 -0
  60. vllm/benchmarks/lib/__init__.py +3 -0
  61. vllm/benchmarks/lib/endpoint_request_func.py +510 -0
  62. vllm/benchmarks/lib/ready_checker.py +72 -0
  63. vllm/benchmarks/lib/utils.py +80 -0
  64. vllm/benchmarks/serve.py +1247 -0
  65. vllm/benchmarks/throughput.py +696 -0
  66. vllm/collect_env.py +823 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/activation_quant_fusion.py +193 -0
  69. vllm/compilation/backends.py +641 -0
  70. vllm/compilation/base_static_graph.py +51 -0
  71. vllm/compilation/collective_fusion.py +1190 -0
  72. vllm/compilation/compiler_interface.py +572 -0
  73. vllm/compilation/counter.py +47 -0
  74. vllm/compilation/cuda_graph.py +193 -0
  75. vllm/compilation/cuda_piecewise_backend.py +117 -0
  76. vllm/compilation/decorators.py +316 -0
  77. vllm/compilation/fix_functionalization.py +208 -0
  78. vllm/compilation/fusion.py +600 -0
  79. vllm/compilation/fusion_attn.py +303 -0
  80. vllm/compilation/fx_utils.py +84 -0
  81. vllm/compilation/inductor_pass.py +136 -0
  82. vllm/compilation/monitor.py +57 -0
  83. vllm/compilation/multi_output_match.py +109 -0
  84. vllm/compilation/noop_elimination.py +165 -0
  85. vllm/compilation/pass_manager.py +88 -0
  86. vllm/compilation/sequence_parallelism.py +484 -0
  87. vllm/compilation/torch25_custom_graph_pass.py +42 -0
  88. vllm/compilation/vllm_inductor_pass.py +50 -0
  89. vllm/compilation/wrapper.py +138 -0
  90. vllm/config/__init__.py +3921 -0
  91. vllm/config/cache.py +214 -0
  92. vllm/config/compilation.py +580 -0
  93. vllm/config/kv_events.py +50 -0
  94. vllm/config/kv_transfer.py +111 -0
  95. vllm/config/load.py +113 -0
  96. vllm/config/lora.py +132 -0
  97. vllm/config/parallel.py +446 -0
  98. vllm/config/scheduler.py +304 -0
  99. vllm/config/utils.py +29 -0
  100. vllm/connections.py +174 -0
  101. vllm/core/__init__.py +0 -0
  102. vllm/core/block/__init__.py +0 -0
  103. vllm/core/block/block_table.py +399 -0
  104. vllm/core/block/common.py +371 -0
  105. vllm/core/block/cpu_gpu_block_allocator.py +439 -0
  106. vllm/core/block/interfaces.py +319 -0
  107. vllm/core/block/naive_block.py +466 -0
  108. vllm/core/block/prefix_caching_block.py +1135 -0
  109. vllm/core/block/utils.py +28 -0
  110. vllm/core/block_manager.py +523 -0
  111. vllm/core/evictor.py +157 -0
  112. vllm/core/interfaces.py +139 -0
  113. vllm/core/placeholder_block_space_manager.py +103 -0
  114. vllm/core/scheduler.py +2028 -0
  115. vllm/device_allocator/__init__.py +0 -0
  116. vllm/device_allocator/cumem.py +286 -0
  117. vllm/distributed/__init__.py +6 -0
  118. vllm/distributed/communication_op.py +41 -0
  119. vllm/distributed/device_communicators/__init__.py +0 -0
  120. vllm/distributed/device_communicators/all2all.py +259 -0
  121. vllm/distributed/device_communicators/all_reduce_utils.py +292 -0
  122. vllm/distributed/device_communicators/base_device_communicator.py +277 -0
  123. vllm/distributed/device_communicators/cpu_communicator.py +201 -0
  124. vllm/distributed/device_communicators/cuda_communicator.py +294 -0
  125. vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
  126. vllm/distributed/device_communicators/custom_all_reduce.py +311 -0
  127. vllm/distributed/device_communicators/pynccl.py +290 -0
  128. vllm/distributed/device_communicators/pynccl_wrapper.py +382 -0
  129. vllm/distributed/device_communicators/quick_all_reduce.py +278 -0
  130. vllm/distributed/device_communicators/ray_communicator.py +258 -0
  131. vllm/distributed/device_communicators/shm_broadcast.py +585 -0
  132. vllm/distributed/device_communicators/symm_mem.py +136 -0
  133. vllm/distributed/device_communicators/tpu_communicator.py +102 -0
  134. vllm/distributed/device_communicators/xpu_communicator.py +69 -0
  135. vllm/distributed/eplb/__init__.py +8 -0
  136. vllm/distributed/eplb/eplb_state.py +619 -0
  137. vllm/distributed/eplb/rebalance_algo.py +234 -0
  138. vllm/distributed/eplb/rebalance_execute.py +424 -0
  139. vllm/distributed/kv_events.py +362 -0
  140. vllm/distributed/kv_transfer/README.md +29 -0
  141. vllm/distributed/kv_transfer/__init__.py +13 -0
  142. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  143. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  144. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  145. vllm/distributed/kv_transfer/kv_connector/factory.py +108 -0
  146. vllm/distributed/kv_transfer/kv_connector/utils.py +246 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/base.py +356 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +167 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +266 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1319 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +484 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +542 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +266 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +414 -0
  157. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  158. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
  159. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
  160. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
  161. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  162. vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
  163. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +290 -0
  164. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
  165. vllm/distributed/kv_transfer/kv_transfer_state.py +73 -0
  166. vllm/distributed/parallel_state.py +1489 -0
  167. vllm/distributed/tpu_distributed_utils.py +178 -0
  168. vllm/distributed/utils.py +536 -0
  169. vllm/engine/__init__.py +0 -0
  170. vllm/engine/arg_utils.py +1857 -0
  171. vllm/engine/async_llm_engine.py +1044 -0
  172. vllm/engine/async_timeout.py +173 -0
  173. vllm/engine/llm_engine.py +1849 -0
  174. vllm/engine/metrics.py +577 -0
  175. vllm/engine/metrics_types.py +84 -0
  176. vllm/engine/multiprocessing/__init__.py +145 -0
  177. vllm/engine/multiprocessing/client.py +643 -0
  178. vllm/engine/multiprocessing/engine.py +470 -0
  179. vllm/engine/output_processor/__init__.py +0 -0
  180. vllm/engine/output_processor/interfaces.py +61 -0
  181. vllm/engine/output_processor/single_step.py +145 -0
  182. vllm/engine/output_processor/stop_checker.py +131 -0
  183. vllm/engine/output_processor/util.py +28 -0
  184. vllm/engine/protocol.py +343 -0
  185. vllm/entrypoints/__init__.py +0 -0
  186. vllm/entrypoints/api_server.py +178 -0
  187. vllm/entrypoints/chat_utils.py +1535 -0
  188. vllm/entrypoints/cli/__init__.py +12 -0
  189. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  190. vllm/entrypoints/cli/benchmark/base.py +25 -0
  191. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  192. vllm/entrypoints/cli/benchmark/main.py +58 -0
  193. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  194. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  195. vllm/entrypoints/cli/collect_env.py +36 -0
  196. vllm/entrypoints/cli/main.py +60 -0
  197. vllm/entrypoints/cli/openai.py +214 -0
  198. vllm/entrypoints/cli/run_batch.py +69 -0
  199. vllm/entrypoints/cli/serve.py +232 -0
  200. vllm/entrypoints/cli/types.py +29 -0
  201. vllm/entrypoints/constants.py +10 -0
  202. vllm/entrypoints/context.py +444 -0
  203. vllm/entrypoints/harmony_utils.py +431 -0
  204. vllm/entrypoints/launcher.py +168 -0
  205. vllm/entrypoints/llm.py +1579 -0
  206. vllm/entrypoints/logger.py +79 -0
  207. vllm/entrypoints/openai/__init__.py +0 -0
  208. vllm/entrypoints/openai/api_server.py +2011 -0
  209. vllm/entrypoints/openai/cli_args.py +281 -0
  210. vllm/entrypoints/openai/logits_processors.py +90 -0
  211. vllm/entrypoints/openai/protocol.py +2590 -0
  212. vllm/entrypoints/openai/run_batch.py +497 -0
  213. vllm/entrypoints/openai/serving_chat.py +1591 -0
  214. vllm/entrypoints/openai/serving_classification.py +176 -0
  215. vllm/entrypoints/openai/serving_completion.py +688 -0
  216. vllm/entrypoints/openai/serving_embedding.py +632 -0
  217. vllm/entrypoints/openai/serving_engine.py +996 -0
  218. vllm/entrypoints/openai/serving_models.py +288 -0
  219. vllm/entrypoints/openai/serving_pooling.py +277 -0
  220. vllm/entrypoints/openai/serving_responses.py +1690 -0
  221. vllm/entrypoints/openai/serving_score.py +479 -0
  222. vllm/entrypoints/openai/serving_tokenization.py +196 -0
  223. vllm/entrypoints/openai/serving_transcription.py +136 -0
  224. vllm/entrypoints/openai/speech_to_text.py +388 -0
  225. vllm/entrypoints/openai/tool_parsers/__init__.py +51 -0
  226. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
  227. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +367 -0
  228. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
  229. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +185 -0
  230. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
  231. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
  232. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +418 -0
  233. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +372 -0
  234. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
  235. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
  236. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +377 -0
  237. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
  238. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +269 -0
  239. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +816 -0
  240. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
  241. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +73 -0
  242. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
  243. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
  244. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +707 -0
  245. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +679 -0
  246. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +296 -0
  247. vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
  248. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +524 -0
  249. vllm/entrypoints/renderer.py +395 -0
  250. vllm/entrypoints/score_utils.py +232 -0
  251. vllm/entrypoints/ssl.py +75 -0
  252. vllm/entrypoints/tool.py +139 -0
  253. vllm/entrypoints/tool_server.py +195 -0
  254. vllm/entrypoints/utils.py +328 -0
  255. vllm/env_override.py +23 -0
  256. vllm/envs.py +1354 -0
  257. vllm/executor/__init__.py +0 -0
  258. vllm/executor/executor_base.py +378 -0
  259. vllm/executor/mp_distributed_executor.py +244 -0
  260. vllm/executor/msgspec_utils.py +35 -0
  261. vllm/executor/multiproc_worker_utils.py +279 -0
  262. vllm/executor/ray_distributed_executor.py +699 -0
  263. vllm/executor/ray_utils.py +410 -0
  264. vllm/executor/uniproc_executor.py +152 -0
  265. vllm/forward_context.py +273 -0
  266. vllm/inputs/__init__.py +44 -0
  267. vllm/inputs/data.py +356 -0
  268. vllm/inputs/parse.py +151 -0
  269. vllm/inputs/preprocess.py +973 -0
  270. vllm/inputs/registry.py +251 -0
  271. vllm/logger.py +229 -0
  272. vllm/logging_utils/__init__.py +8 -0
  273. vllm/logging_utils/dump_input.py +81 -0
  274. vllm/logging_utils/formatter.py +79 -0
  275. vllm/logits_process.py +119 -0
  276. vllm/logprobs.py +28 -0
  277. vllm/lora/__init__.py +0 -0
  278. vllm/lora/layers/__init__.py +34 -0
  279. vllm/lora/layers/base.py +69 -0
  280. vllm/lora/layers/base_linear.py +184 -0
  281. vllm/lora/layers/column_parallel_linear.py +622 -0
  282. vllm/lora/layers/logits_processor.py +247 -0
  283. vllm/lora/layers/qkv_x_parallel_linear.py +8 -0
  284. vllm/lora/layers/replicated_linear.py +61 -0
  285. vllm/lora/layers/row_parallel_linear.py +201 -0
  286. vllm/lora/layers/utils.py +60 -0
  287. vllm/lora/layers/vocal_parallel_embedding.py +172 -0
  288. vllm/lora/lora.py +199 -0
  289. vllm/lora/models.py +792 -0
  290. vllm/lora/ops/__init__.py +0 -0
  291. vllm/lora/ops/ipex_ops/__init__.py +7 -0
  292. vllm/lora/ops/ipex_ops/lora_ops.py +44 -0
  293. vllm/lora/ops/torch_ops/__init__.py +16 -0
  294. vllm/lora/ops/torch_ops/lora_ops.py +119 -0
  295. vllm/lora/ops/triton_ops/__init__.py +12 -0
  296. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  297. vllm/lora/ops/triton_ops/lora_expand_op.py +291 -0
  298. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
  299. vllm/lora/ops/triton_ops/lora_shrink_op.py +245 -0
  300. vllm/lora/ops/triton_ops/utils.py +126 -0
  301. vllm/lora/ops/xla_ops/__init__.py +7 -0
  302. vllm/lora/ops/xla_ops/lora_ops.py +145 -0
  303. vllm/lora/peft_helper.py +127 -0
  304. vllm/lora/punica_wrapper/__init__.py +10 -0
  305. vllm/lora/punica_wrapper/punica_base.py +458 -0
  306. vllm/lora/punica_wrapper/punica_cpu.py +349 -0
  307. vllm/lora/punica_wrapper/punica_gpu.py +279 -0
  308. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  309. vllm/lora/punica_wrapper/punica_tpu.py +391 -0
  310. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  311. vllm/lora/punica_wrapper/utils.py +136 -0
  312. vllm/lora/request.py +99 -0
  313. vllm/lora/resolver.py +85 -0
  314. vllm/lora/utils.py +246 -0
  315. vllm/lora/worker_manager.py +256 -0
  316. vllm/model_executor/__init__.py +16 -0
  317. vllm/model_executor/custom_op.py +194 -0
  318. vllm/model_executor/layers/__init__.py +0 -0
  319. vllm/model_executor/layers/activation.py +575 -0
  320. vllm/model_executor/layers/attention_layer_base.py +23 -0
  321. vllm/model_executor/layers/fla/__init__.py +8 -0
  322. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  323. vllm/model_executor/layers/fla/ops/chunk.py +225 -0
  324. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +290 -0
  325. vllm/model_executor/layers/fla/ops/chunk_o.py +177 -0
  326. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +140 -0
  327. vllm/model_executor/layers/fla/ops/cumsum.py +226 -0
  328. vllm/model_executor/layers/fla/ops/fused_recurrent.py +366 -0
  329. vllm/model_executor/layers/fla/ops/index.py +39 -0
  330. vllm/model_executor/layers/fla/ops/l2norm.py +143 -0
  331. vllm/model_executor/layers/fla/ops/layernorm_guard.py +337 -0
  332. vllm/model_executor/layers/fla/ops/op.py +39 -0
  333. vllm/model_executor/layers/fla/ops/solve_tril.py +365 -0
  334. vllm/model_executor/layers/fla/ops/utils.py +180 -0
  335. vllm/model_executor/layers/fla/ops/wy_fast.py +114 -0
  336. vllm/model_executor/layers/fused_moe/__init__.py +80 -0
  337. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +304 -0
  338. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +164 -0
  339. vllm/model_executor/layers/fused_moe/config.py +497 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  560. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +297 -0
  561. vllm/model_executor/layers/fused_moe/cutlass_moe.py +996 -0
  562. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +370 -0
  563. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +413 -0
  564. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +280 -0
  565. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +229 -0
  566. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +243 -0
  567. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +97 -0
  568. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1042 -0
  569. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +240 -0
  570. vllm/model_executor/layers/fused_moe/fused_moe.py +2081 -0
  571. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +247 -0
  572. vllm/model_executor/layers/fused_moe/layer.py +1951 -0
  573. vllm/model_executor/layers/fused_moe/modular_kernel.py +892 -0
  574. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +87 -0
  575. vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
  576. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +205 -0
  577. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  578. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +321 -0
  579. vllm/model_executor/layers/fused_moe/prepare_finalize.py +72 -0
  580. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +431 -0
  581. vllm/model_executor/layers/fused_moe/routing_simulator.py +291 -0
  582. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +146 -0
  583. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +171 -0
  584. vllm/model_executor/layers/fused_moe/trtllm_moe.py +197 -0
  585. vllm/model_executor/layers/fused_moe/utils.py +270 -0
  586. vllm/model_executor/layers/layernorm.py +381 -0
  587. vllm/model_executor/layers/lightning_attn.py +661 -0
  588. vllm/model_executor/layers/linear.py +1567 -0
  589. vllm/model_executor/layers/logits_processor.py +199 -0
  590. vllm/model_executor/layers/mamba/__init__.py +0 -0
  591. vllm/model_executor/layers/mamba/abstract.py +45 -0
  592. vllm/model_executor/layers/mamba/linear_attn.py +432 -0
  593. vllm/model_executor/layers/mamba/mamba2_metadata.py +186 -0
  594. vllm/model_executor/layers/mamba/mamba_mixer.py +517 -0
  595. vllm/model_executor/layers/mamba/mamba_mixer2.py +803 -0
  596. vllm/model_executor/layers/mamba/mamba_utils.py +202 -0
  597. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  598. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +982 -0
  599. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +168 -0
  600. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
  601. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
  602. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +574 -0
  603. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
  604. vllm/model_executor/layers/mamba/ops/ssd_combined.py +248 -0
  605. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +248 -0
  606. vllm/model_executor/layers/mamba/short_conv.py +270 -0
  607. vllm/model_executor/layers/mla.py +158 -0
  608. vllm/model_executor/layers/pooler.py +732 -0
  609. vllm/model_executor/layers/quantization/__init__.py +157 -0
  610. vllm/model_executor/layers/quantization/auto_round.py +388 -0
  611. vllm/model_executor/layers/quantization/awq.py +228 -0
  612. vllm/model_executor/layers/quantization/awq_marlin.py +548 -0
  613. vllm/model_executor/layers/quantization/awq_triton.py +320 -0
  614. vllm/model_executor/layers/quantization/base_config.py +164 -0
  615. vllm/model_executor/layers/quantization/bitblas.py +464 -0
  616. vllm/model_executor/layers/quantization/bitsandbytes.py +621 -0
  617. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  618. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +795 -0
  619. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1651 -0
  620. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +27 -0
  621. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +366 -0
  622. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  623. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
  624. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +105 -0
  625. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +161 -0
  626. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +169 -0
  627. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +135 -0
  628. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
  629. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +156 -0
  630. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
  631. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
  632. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +227 -0
  633. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +135 -0
  634. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +21 -0
  635. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  636. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
  637. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  638. vllm/model_executor/layers/quantization/deepgemm.py +81 -0
  639. vllm/model_executor/layers/quantization/deepspeedfp.py +196 -0
  640. vllm/model_executor/layers/quantization/experts_int8.py +215 -0
  641. vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
  642. vllm/model_executor/layers/quantization/fp8.py +1179 -0
  643. vllm/model_executor/layers/quantization/gguf.py +597 -0
  644. vllm/model_executor/layers/quantization/gptq.py +300 -0
  645. vllm/model_executor/layers/quantization/gptq_bitblas.py +448 -0
  646. vllm/model_executor/layers/quantization/gptq_marlin.py +700 -0
  647. vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
  648. vllm/model_executor/layers/quantization/hqq_marlin.py +333 -0
  649. vllm/model_executor/layers/quantization/inc.py +61 -0
  650. vllm/model_executor/layers/quantization/input_quant_fp8.py +103 -0
  651. vllm/model_executor/layers/quantization/ipex_quant.py +410 -0
  652. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  653. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +91 -0
  654. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +93 -0
  655. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
  656. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +302 -0
  657. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +92 -0
  658. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +117 -0
  659. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +92 -0
  660. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
  661. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +144 -0
  662. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +139 -0
  663. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
  664. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +89 -0
  665. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +163 -0
  666. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +206 -0
  667. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
  668. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
  669. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  670. vllm/model_executor/layers/quantization/kv_cache.py +139 -0
  671. vllm/model_executor/layers/quantization/modelopt.py +1548 -0
  672. vllm/model_executor/layers/quantization/moe_wna16.py +473 -0
  673. vllm/model_executor/layers/quantization/mxfp4.py +951 -0
  674. vllm/model_executor/layers/quantization/petit.py +306 -0
  675. vllm/model_executor/layers/quantization/ptpc_fp8.py +129 -0
  676. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  677. vllm/model_executor/layers/quantization/quark/quark.py +431 -0
  678. vllm/model_executor/layers/quantization/quark/quark_moe.py +434 -0
  679. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  680. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  681. vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +112 -0
  682. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +163 -0
  683. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
  684. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  685. vllm/model_executor/layers/quantization/rtn.py +456 -0
  686. vllm/model_executor/layers/quantization/schema.py +86 -0
  687. vllm/model_executor/layers/quantization/torchao.py +214 -0
  688. vllm/model_executor/layers/quantization/tpu_int8.py +125 -0
  689. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  690. vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
  691. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +210 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  722. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  723. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  724. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  725. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  726. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  727. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  728. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  729. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  730. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  731. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  732. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  733. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  734. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  735. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  736. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  737. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  738. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  739. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  740. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  741. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  742. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  743. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  744. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  745. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  746. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  747. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  748. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  749. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  750. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  751. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  752. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  753. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  754. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  755. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  756. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  757. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  758. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  759. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  760. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  761. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  762. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  763. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  764. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  902. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  903. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +85 -0
  904. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +258 -0
  905. vllm/model_executor/layers/quantization/utils/fp8_utils.py +795 -0
  906. vllm/model_executor/layers/quantization/utils/gptq_utils.py +96 -0
  907. vllm/model_executor/layers/quantization/utils/int8_utils.py +492 -0
  908. vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
  909. vllm/model_executor/layers/quantization/utils/machete_utils.py +50 -0
  910. vllm/model_executor/layers/quantization/utils/marlin_utils.py +479 -0
  911. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +396 -0
  912. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +345 -0
  913. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
  914. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  915. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +132 -0
  916. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +20 -0
  917. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +137 -0
  918. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +59 -0
  919. vllm/model_executor/layers/quantization/utils/petit_utils.py +122 -0
  920. vllm/model_executor/layers/quantization/utils/quant_utils.py +627 -0
  921. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +458 -0
  922. vllm/model_executor/layers/resampler.py +270 -0
  923. vllm/model_executor/layers/rotary_embedding/__init__.py +190 -0
  924. vllm/model_executor/layers/rotary_embedding/base.py +156 -0
  925. vllm/model_executor/layers/rotary_embedding/common.py +105 -0
  926. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +140 -0
  927. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +197 -0
  928. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +41 -0
  929. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +67 -0
  930. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +80 -0
  931. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  932. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  933. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +81 -0
  934. vllm/model_executor/layers/rotary_embedding/mrope.py +1140 -0
  935. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +42 -0
  936. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +129 -0
  937. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +68 -0
  938. vllm/model_executor/layers/sampler.py +1198 -0
  939. vllm/model_executor/layers/shared_fused_moe/__init__.py +6 -0
  940. vllm/model_executor/layers/shared_fused_moe/shared_fused_moe.py +56 -0
  941. vllm/model_executor/layers/utils.py +196 -0
  942. vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
  943. vllm/model_executor/model_loader/__init__.py +138 -0
  944. vllm/model_executor/model_loader/base_loader.py +52 -0
  945. vllm/model_executor/model_loader/bitsandbytes_loader.py +787 -0
  946. vllm/model_executor/model_loader/default_loader.py +278 -0
  947. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  948. vllm/model_executor/model_loader/gguf_loader.py +155 -0
  949. vllm/model_executor/model_loader/runai_streamer_loader.py +104 -0
  950. vllm/model_executor/model_loader/sharded_state_loader.py +199 -0
  951. vllm/model_executor/model_loader/tensorizer.py +743 -0
  952. vllm/model_executor/model_loader/tensorizer_loader.py +143 -0
  953. vllm/model_executor/model_loader/tpu.py +114 -0
  954. vllm/model_executor/model_loader/utils.py +271 -0
  955. vllm/model_executor/model_loader/weight_utils.py +946 -0
  956. vllm/model_executor/models/__init__.py +30 -0
  957. vllm/model_executor/models/adapters.py +542 -0
  958. vllm/model_executor/models/aimv2.py +246 -0
  959. vllm/model_executor/models/apertus.py +582 -0
  960. vllm/model_executor/models/arcee.py +423 -0
  961. vllm/model_executor/models/arctic.py +560 -0
  962. vllm/model_executor/models/aria.py +662 -0
  963. vllm/model_executor/models/aya_vision.py +470 -0
  964. vllm/model_executor/models/baichuan.py +475 -0
  965. vllm/model_executor/models/bailing_moe.py +529 -0
  966. vllm/model_executor/models/bamba.py +582 -0
  967. vllm/model_executor/models/bart.py +1343 -0
  968. vllm/model_executor/models/bert.py +613 -0
  969. vllm/model_executor/models/bert_with_rope.py +687 -0
  970. vllm/model_executor/models/blip.py +339 -0
  971. vllm/model_executor/models/blip2.py +716 -0
  972. vllm/model_executor/models/bloom.py +374 -0
  973. vllm/model_executor/models/chameleon.py +1141 -0
  974. vllm/model_executor/models/chatglm.py +479 -0
  975. vllm/model_executor/models/clip.py +407 -0
  976. vllm/model_executor/models/cohere2_vision.py +484 -0
  977. vllm/model_executor/models/commandr.py +467 -0
  978. vllm/model_executor/models/config.py +434 -0
  979. vllm/model_executor/models/constant_size_cache.py +137 -0
  980. vllm/model_executor/models/dbrx.py +473 -0
  981. vllm/model_executor/models/deepseek.py +491 -0
  982. vllm/model_executor/models/deepseek_eagle.py +241 -0
  983. vllm/model_executor/models/deepseek_mtp.py +282 -0
  984. vllm/model_executor/models/deepseek_v2.py +1058 -0
  985. vllm/model_executor/models/deepseek_vl2.py +661 -0
  986. vllm/model_executor/models/donut.py +387 -0
  987. vllm/model_executor/models/dots1.py +547 -0
  988. vllm/model_executor/models/ernie45.py +43 -0
  989. vllm/model_executor/models/ernie45_moe.py +608 -0
  990. vllm/model_executor/models/ernie45_vl.py +1510 -0
  991. vllm/model_executor/models/ernie45_vl_moe.py +728 -0
  992. vllm/model_executor/models/ernie_mtp.py +287 -0
  993. vllm/model_executor/models/exaone.py +552 -0
  994. vllm/model_executor/models/exaone4.py +535 -0
  995. vllm/model_executor/models/fairseq2_llama.py +154 -0
  996. vllm/model_executor/models/falcon.py +511 -0
  997. vllm/model_executor/models/falcon_h1.py +739 -0
  998. vllm/model_executor/models/florence2.py +1107 -0
  999. vllm/model_executor/models/fuyu.py +401 -0
  1000. vllm/model_executor/models/gemma.py +428 -0
  1001. vllm/model_executor/models/gemma2.py +425 -0
  1002. vllm/model_executor/models/gemma3.py +542 -0
  1003. vllm/model_executor/models/gemma3_mm.py +723 -0
  1004. vllm/model_executor/models/gemma3n.py +830 -0
  1005. vllm/model_executor/models/gemma3n_mm.py +767 -0
  1006. vllm/model_executor/models/glm.py +23 -0
  1007. vllm/model_executor/models/glm4.py +305 -0
  1008. vllm/model_executor/models/glm4_1v.py +1669 -0
  1009. vllm/model_executor/models/glm4_moe.py +703 -0
  1010. vllm/model_executor/models/glm4_moe_mtp.py +306 -0
  1011. vllm/model_executor/models/glm4v.py +654 -0
  1012. vllm/model_executor/models/gpt2.py +383 -0
  1013. vllm/model_executor/models/gpt_bigcode.py +346 -0
  1014. vllm/model_executor/models/gpt_j.py +340 -0
  1015. vllm/model_executor/models/gpt_neox.py +333 -0
  1016. vllm/model_executor/models/gpt_oss.py +687 -0
  1017. vllm/model_executor/models/granite.py +498 -0
  1018. vllm/model_executor/models/granite_speech.py +799 -0
  1019. vllm/model_executor/models/granitemoe.py +541 -0
  1020. vllm/model_executor/models/granitemoehybrid.py +684 -0
  1021. vllm/model_executor/models/granitemoeshared.py +342 -0
  1022. vllm/model_executor/models/gritlm.py +262 -0
  1023. vllm/model_executor/models/grok1.py +550 -0
  1024. vllm/model_executor/models/h2ovl.py +536 -0
  1025. vllm/model_executor/models/hunyuan_v1.py +937 -0
  1026. vllm/model_executor/models/hyperclovax_vision.py +1206 -0
  1027. vllm/model_executor/models/idefics2_vision_model.py +416 -0
  1028. vllm/model_executor/models/idefics3.py +758 -0
  1029. vllm/model_executor/models/interfaces.py +854 -0
  1030. vllm/model_executor/models/interfaces_base.py +195 -0
  1031. vllm/model_executor/models/intern_vit.py +481 -0
  1032. vllm/model_executor/models/internlm2.py +453 -0
  1033. vllm/model_executor/models/internlm2_ve.py +148 -0
  1034. vllm/model_executor/models/interns1.py +832 -0
  1035. vllm/model_executor/models/interns1_vit.py +418 -0
  1036. vllm/model_executor/models/internvl.py +1423 -0
  1037. vllm/model_executor/models/jais.py +374 -0
  1038. vllm/model_executor/models/jamba.py +630 -0
  1039. vllm/model_executor/models/jina_vl.py +144 -0
  1040. vllm/model_executor/models/keye.py +1684 -0
  1041. vllm/model_executor/models/keye_vl1_5.py +601 -0
  1042. vllm/model_executor/models/kimi_vl.py +620 -0
  1043. vllm/model_executor/models/lfm2.py +558 -0
  1044. vllm/model_executor/models/llama.py +671 -0
  1045. vllm/model_executor/models/llama4.py +732 -0
  1046. vllm/model_executor/models/llama4_eagle.py +241 -0
  1047. vllm/model_executor/models/llama_eagle.py +171 -0
  1048. vllm/model_executor/models/llama_eagle3.py +292 -0
  1049. vllm/model_executor/models/llava.py +872 -0
  1050. vllm/model_executor/models/llava_next.py +572 -0
  1051. vllm/model_executor/models/llava_next_video.py +479 -0
  1052. vllm/model_executor/models/llava_onevision.py +945 -0
  1053. vllm/model_executor/models/mamba.py +310 -0
  1054. vllm/model_executor/models/mamba2.py +346 -0
  1055. vllm/model_executor/models/mamba_cache.py +83 -0
  1056. vllm/model_executor/models/medusa.py +219 -0
  1057. vllm/model_executor/models/midashenglm.py +788 -0
  1058. vllm/model_executor/models/mimo.py +191 -0
  1059. vllm/model_executor/models/mimo_mtp.py +273 -0
  1060. vllm/model_executor/models/minicpm.py +593 -0
  1061. vllm/model_executor/models/minicpm3.py +230 -0
  1062. vllm/model_executor/models/minicpm_eagle.py +391 -0
  1063. vllm/model_executor/models/minicpmo.py +804 -0
  1064. vllm/model_executor/models/minicpmv.py +1786 -0
  1065. vllm/model_executor/models/minimax_cache.py +36 -0
  1066. vllm/model_executor/models/minimax_text_01.py +1027 -0
  1067. vllm/model_executor/models/minimax_vl_01.py +431 -0
  1068. vllm/model_executor/models/mistral3.py +628 -0
  1069. vllm/model_executor/models/mixtral.py +494 -0
  1070. vllm/model_executor/models/mllama.py +1697 -0
  1071. vllm/model_executor/models/mllama4.py +1079 -0
  1072. vllm/model_executor/models/mlp_speculator.py +206 -0
  1073. vllm/model_executor/models/modernbert.py +374 -0
  1074. vllm/model_executor/models/module_mapping.py +72 -0
  1075. vllm/model_executor/models/molmo.py +1569 -0
  1076. vllm/model_executor/models/moonvit.py +663 -0
  1077. vllm/model_executor/models/motif.py +345 -0
  1078. vllm/model_executor/models/mpt.py +332 -0
  1079. vllm/model_executor/models/nano_nemotron_vl.py +1395 -0
  1080. vllm/model_executor/models/nemotron.py +509 -0
  1081. vllm/model_executor/models/nemotron_h.py +633 -0
  1082. vllm/model_executor/models/nemotron_nas.py +484 -0
  1083. vllm/model_executor/models/nemotron_vl.py +655 -0
  1084. vllm/model_executor/models/nvlm_d.py +203 -0
  1085. vllm/model_executor/models/olmo.py +406 -0
  1086. vllm/model_executor/models/olmo2.py +428 -0
  1087. vllm/model_executor/models/olmoe.py +485 -0
  1088. vllm/model_executor/models/opt.py +413 -0
  1089. vllm/model_executor/models/orion.py +350 -0
  1090. vllm/model_executor/models/ovis.py +572 -0
  1091. vllm/model_executor/models/ovis2_5.py +644 -0
  1092. vllm/model_executor/models/paligemma.py +414 -0
  1093. vllm/model_executor/models/persimmon.py +345 -0
  1094. vllm/model_executor/models/phi.py +357 -0
  1095. vllm/model_executor/models/phi3.py +19 -0
  1096. vllm/model_executor/models/phi3v.py +701 -0
  1097. vllm/model_executor/models/phi4_multimodal.py +1478 -0
  1098. vllm/model_executor/models/phi4flash.py +737 -0
  1099. vllm/model_executor/models/phi4mm.py +1281 -0
  1100. vllm/model_executor/models/phi4mm_audio.py +1254 -0
  1101. vllm/model_executor/models/phi4mm_utils.py +1875 -0
  1102. vllm/model_executor/models/phimoe.py +681 -0
  1103. vllm/model_executor/models/pixtral.py +1348 -0
  1104. vllm/model_executor/models/plamo2.py +1126 -0
  1105. vllm/model_executor/models/qwen.py +363 -0
  1106. vllm/model_executor/models/qwen2.py +526 -0
  1107. vllm/model_executor/models/qwen2_5_omni_thinker.py +985 -0
  1108. vllm/model_executor/models/qwen2_5_vl.py +1256 -0
  1109. vllm/model_executor/models/qwen2_audio.py +492 -0
  1110. vllm/model_executor/models/qwen2_moe.py +558 -0
  1111. vllm/model_executor/models/qwen2_rm.py +122 -0
  1112. vllm/model_executor/models/qwen2_vl.py +1512 -0
  1113. vllm/model_executor/models/qwen3.py +344 -0
  1114. vllm/model_executor/models/qwen3_moe.py +704 -0
  1115. vllm/model_executor/models/qwen3_next.py +1298 -0
  1116. vllm/model_executor/models/qwen3_next_mtp.py +285 -0
  1117. vllm/model_executor/models/qwen_vl.py +795 -0
  1118. vllm/model_executor/models/registry.py +891 -0
  1119. vllm/model_executor/models/roberta.py +252 -0
  1120. vllm/model_executor/models/rvl.py +103 -0
  1121. vllm/model_executor/models/seed_oss.py +488 -0
  1122. vllm/model_executor/models/siglip.py +524 -0
  1123. vllm/model_executor/models/siglip2navit.py +688 -0
  1124. vllm/model_executor/models/skyworkr1v.py +914 -0
  1125. vllm/model_executor/models/smolvlm.py +44 -0
  1126. vllm/model_executor/models/solar.py +506 -0
  1127. vllm/model_executor/models/stablelm.py +344 -0
  1128. vllm/model_executor/models/starcoder2.py +357 -0
  1129. vllm/model_executor/models/step3_text.py +521 -0
  1130. vllm/model_executor/models/step3_vl.py +1091 -0
  1131. vllm/model_executor/models/swin.py +475 -0
  1132. vllm/model_executor/models/tarsier.py +649 -0
  1133. vllm/model_executor/models/telechat2.py +151 -0
  1134. vllm/model_executor/models/teleflm.py +79 -0
  1135. vllm/model_executor/models/terratorch.py +294 -0
  1136. vllm/model_executor/models/transformers.py +883 -0
  1137. vllm/model_executor/models/ultravox.py +667 -0
  1138. vllm/model_executor/models/utils.py +770 -0
  1139. vllm/model_executor/models/vision.py +125 -0
  1140. vllm/model_executor/models/voxtral.py +789 -0
  1141. vllm/model_executor/models/whisper.py +966 -0
  1142. vllm/model_executor/models/zamba2.py +1056 -0
  1143. vllm/model_executor/parameter.py +599 -0
  1144. vllm/model_executor/sampling_metadata.py +597 -0
  1145. vllm/model_executor/utils.py +97 -0
  1146. vllm/model_executor/warmup/__init__.py +0 -0
  1147. vllm/model_executor/warmup/deep_gemm_warmup.py +223 -0
  1148. vllm/model_executor/warmup/kernel_warmup.py +83 -0
  1149. vllm/multimodal/__init__.py +35 -0
  1150. vllm/multimodal/audio.py +116 -0
  1151. vllm/multimodal/base.py +219 -0
  1152. vllm/multimodal/cache.py +507 -0
  1153. vllm/multimodal/hasher.py +110 -0
  1154. vllm/multimodal/image.py +130 -0
  1155. vllm/multimodal/inputs.py +979 -0
  1156. vllm/multimodal/parse.py +496 -0
  1157. vllm/multimodal/processing.py +1921 -0
  1158. vllm/multimodal/profiling.py +313 -0
  1159. vllm/multimodal/registry.py +375 -0
  1160. vllm/multimodal/utils.py +754 -0
  1161. vllm/multimodal/video.py +312 -0
  1162. vllm/outputs.py +517 -0
  1163. vllm/platforms/__init__.py +263 -0
  1164. vllm/platforms/cpu.py +353 -0
  1165. vllm/platforms/cuda.py +731 -0
  1166. vllm/platforms/interface.py +599 -0
  1167. vllm/platforms/rocm.py +504 -0
  1168. vllm/platforms/tpu.py +236 -0
  1169. vllm/platforms/xpu.py +243 -0
  1170. vllm/plugins/__init__.py +72 -0
  1171. vllm/plugins/io_processors/__init__.py +68 -0
  1172. vllm/plugins/io_processors/interface.py +67 -0
  1173. vllm/plugins/lora_resolvers/README.md +16 -0
  1174. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1175. vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
  1176. vllm/pooling_params.py +183 -0
  1177. vllm/profiler/__init__.py +0 -0
  1178. vllm/profiler/layerwise_profile.py +375 -0
  1179. vllm/profiler/utils.py +148 -0
  1180. vllm/py.typed +2 -0
  1181. vllm/ray/__init__.py +0 -0
  1182. vllm/ray/lazy_utils.py +22 -0
  1183. vllm/ray/ray_env.py +72 -0
  1184. vllm/reasoning/__init__.py +25 -0
  1185. vllm/reasoning/abs_reasoning_parsers.py +202 -0
  1186. vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
  1187. vllm/reasoning/glm4_moe_reasoning_parser.py +151 -0
  1188. vllm/reasoning/gptoss_reasoning_parser.py +87 -0
  1189. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1190. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +245 -0
  1191. vllm/reasoning/mistral_reasoning_parser.py +47 -0
  1192. vllm/reasoning/qwen3_reasoning_parser.py +151 -0
  1193. vllm/reasoning/step3_reasoning_parser.py +109 -0
  1194. vllm/sampling_params.py +577 -0
  1195. vllm/scalar_type.py +349 -0
  1196. vllm/scripts.py +15 -0
  1197. vllm/sequence.py +1465 -0
  1198. vllm/tasks.py +11 -0
  1199. vllm/test_utils.py +130 -0
  1200. vllm/third_party/__init__.py +0 -0
  1201. vllm/third_party/pynvml.py +6140 -0
  1202. vllm/tracing.py +136 -0
  1203. vllm/transformers_utils/__init__.py +24 -0
  1204. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1205. vllm/transformers_utils/chat_templates/registry.py +71 -0
  1206. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1207. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1208. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1209. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1210. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1211. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1212. vllm/transformers_utils/config.py +1043 -0
  1213. vllm/transformers_utils/config_parser_base.py +20 -0
  1214. vllm/transformers_utils/configs/__init__.py +55 -0
  1215. vllm/transformers_utils/configs/arctic.py +207 -0
  1216. vllm/transformers_utils/configs/chatglm.py +72 -0
  1217. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  1218. vllm/transformers_utils/configs/eagle.py +84 -0
  1219. vllm/transformers_utils/configs/falcon.py +90 -0
  1220. vllm/transformers_utils/configs/jais.py +238 -0
  1221. vllm/transformers_utils/configs/kimi_vl.py +37 -0
  1222. vllm/transformers_utils/configs/medusa.py +63 -0
  1223. vllm/transformers_utils/configs/midashenglm.py +101 -0
  1224. vllm/transformers_utils/configs/mistral.py +165 -0
  1225. vllm/transformers_utils/configs/mlp_speculator.py +68 -0
  1226. vllm/transformers_utils/configs/moonvit.py +33 -0
  1227. vllm/transformers_utils/configs/nemotron.py +205 -0
  1228. vllm/transformers_utils/configs/nemotron_h.py +259 -0
  1229. vllm/transformers_utils/configs/nemotron_vl.py +56 -0
  1230. vllm/transformers_utils/configs/ovis.py +176 -0
  1231. vllm/transformers_utils/configs/qwen3_next.py +275 -0
  1232. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1233. vllm/transformers_utils/configs/speculators/algos.py +32 -0
  1234. vllm/transformers_utils/configs/speculators/base.py +91 -0
  1235. vllm/transformers_utils/configs/step3_vl.py +123 -0
  1236. vllm/transformers_utils/configs/ultravox.py +120 -0
  1237. vllm/transformers_utils/detokenizer.py +169 -0
  1238. vllm/transformers_utils/detokenizer_utils.py +199 -0
  1239. vllm/transformers_utils/dynamic_module.py +60 -0
  1240. vllm/transformers_utils/processor.py +245 -0
  1241. vllm/transformers_utils/processors/__init__.py +16 -0
  1242. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  1243. vllm/transformers_utils/processors/ovis.py +420 -0
  1244. vllm/transformers_utils/processors/ovis2_5.py +458 -0
  1245. vllm/transformers_utils/runai_utils.py +99 -0
  1246. vllm/transformers_utils/s3_utils.py +90 -0
  1247. vllm/transformers_utils/tokenizer.py +293 -0
  1248. vllm/transformers_utils/tokenizer_base.py +149 -0
  1249. vllm/transformers_utils/tokenizer_group.py +132 -0
  1250. vllm/transformers_utils/tokenizers/__init__.py +10 -0
  1251. vllm/transformers_utils/tokenizers/mistral.py +520 -0
  1252. vllm/transformers_utils/utils.py +99 -0
  1253. vllm/triton_utils/__init__.py +16 -0
  1254. vllm/triton_utils/importing.py +95 -0
  1255. vllm/usage/__init__.py +0 -0
  1256. vllm/usage/usage_lib.py +259 -0
  1257. vllm/utils/__init__.py +3438 -0
  1258. vllm/utils/deep_gemm.py +212 -0
  1259. vllm/utils/flashinfer.py +372 -0
  1260. vllm/utils/jsontree.py +90 -0
  1261. vllm/utils/tensor_schema.py +236 -0
  1262. vllm/v1/__init__.py +0 -0
  1263. vllm/v1/attention/__init__.py +0 -0
  1264. vllm/v1/attention/backends/__init__.py +0 -0
  1265. vllm/v1/attention/backends/cpu_attn.py +922 -0
  1266. vllm/v1/attention/backends/flash_attn.py +800 -0
  1267. vllm/v1/attention/backends/flashinfer.py +1128 -0
  1268. vllm/v1/attention/backends/flex_attention.py +796 -0
  1269. vllm/v1/attention/backends/gdn_attn.py +320 -0
  1270. vllm/v1/attention/backends/linear_attn.py +68 -0
  1271. vllm/v1/attention/backends/mamba1_attn.py +81 -0
  1272. vllm/v1/attention/backends/mamba2_attn.py +224 -0
  1273. vllm/v1/attention/backends/mamba_attn.py +52 -0
  1274. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1275. vllm/v1/attention/backends/mla/common.py +1608 -0
  1276. vllm/v1/attention/backends/mla/cutlass_mla.py +301 -0
  1277. vllm/v1/attention/backends/mla/flashattn_mla.py +273 -0
  1278. vllm/v1/attention/backends/mla/flashinfer_mla.py +110 -0
  1279. vllm/v1/attention/backends/mla/flashmla.py +213 -0
  1280. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +255 -0
  1281. vllm/v1/attention/backends/mla/triton_mla.py +175 -0
  1282. vllm/v1/attention/backends/pallas.py +413 -0
  1283. vllm/v1/attention/backends/rocm_aiter_fa.py +548 -0
  1284. vllm/v1/attention/backends/short_conv_attn.py +82 -0
  1285. vllm/v1/attention/backends/tree_attn.py +450 -0
  1286. vllm/v1/attention/backends/triton_attn.py +430 -0
  1287. vllm/v1/attention/backends/utils.py +834 -0
  1288. vllm/v1/attention/backends/xformers.py +437 -0
  1289. vllm/v1/core/__init__.py +0 -0
  1290. vllm/v1/core/block_pool.py +330 -0
  1291. vllm/v1/core/encoder_cache_manager.py +333 -0
  1292. vllm/v1/core/kv_cache_coordinator.py +440 -0
  1293. vllm/v1/core/kv_cache_manager.py +398 -0
  1294. vllm/v1/core/kv_cache_utils.py +1169 -0
  1295. vllm/v1/core/sched/__init__.py +0 -0
  1296. vllm/v1/core/sched/async_scheduler.py +47 -0
  1297. vllm/v1/core/sched/interface.py +158 -0
  1298. vllm/v1/core/sched/output.py +162 -0
  1299. vllm/v1/core/sched/request_queue.py +224 -0
  1300. vllm/v1/core/sched/scheduler.py +1287 -0
  1301. vllm/v1/core/sched/utils.py +69 -0
  1302. vllm/v1/core/single_type_kv_cache_manager.py +670 -0
  1303. vllm/v1/cudagraph_dispatcher.py +121 -0
  1304. vllm/v1/engine/__init__.py +202 -0
  1305. vllm/v1/engine/async_llm.py +757 -0
  1306. vllm/v1/engine/coordinator.py +357 -0
  1307. vllm/v1/engine/core.py +1245 -0
  1308. vllm/v1/engine/core_client.py +1333 -0
  1309. vllm/v1/engine/detokenizer.py +300 -0
  1310. vllm/v1/engine/exceptions.py +17 -0
  1311. vllm/v1/engine/llm_engine.py +332 -0
  1312. vllm/v1/engine/logprobs.py +201 -0
  1313. vllm/v1/engine/output_processor.py +558 -0
  1314. vllm/v1/engine/parallel_sampling.py +133 -0
  1315. vllm/v1/engine/processor.py +524 -0
  1316. vllm/v1/engine/utils.py +857 -0
  1317. vllm/v1/executor/__init__.py +0 -0
  1318. vllm/v1/executor/abstract.py +126 -0
  1319. vllm/v1/executor/multiproc_executor.py +683 -0
  1320. vllm/v1/executor/ray_distributed_executor.py +109 -0
  1321. vllm/v1/kv_cache_interface.py +275 -0
  1322. vllm/v1/metrics/__init__.py +0 -0
  1323. vllm/v1/metrics/loggers.py +717 -0
  1324. vllm/v1/metrics/prometheus.py +82 -0
  1325. vllm/v1/metrics/ray_wrappers.py +133 -0
  1326. vllm/v1/metrics/reader.py +246 -0
  1327. vllm/v1/metrics/stats.py +248 -0
  1328. vllm/v1/outputs.py +147 -0
  1329. vllm/v1/pool/__init__.py +0 -0
  1330. vllm/v1/pool/metadata.py +77 -0
  1331. vllm/v1/request.py +237 -0
  1332. vllm/v1/sample/__init__.py +0 -0
  1333. vllm/v1/sample/logits_processor/__init__.py +294 -0
  1334. vllm/v1/sample/logits_processor/builtin.py +273 -0
  1335. vllm/v1/sample/logits_processor/interface.py +97 -0
  1336. vllm/v1/sample/logits_processor/state.py +161 -0
  1337. vllm/v1/sample/metadata.py +43 -0
  1338. vllm/v1/sample/ops/__init__.py +0 -0
  1339. vllm/v1/sample/ops/bad_words.py +39 -0
  1340. vllm/v1/sample/ops/logprobs.py +26 -0
  1341. vllm/v1/sample/ops/penalties.py +43 -0
  1342. vllm/v1/sample/ops/topk_topp_sampler.py +254 -0
  1343. vllm/v1/sample/rejection_sampler.py +623 -0
  1344. vllm/v1/sample/sampler.py +281 -0
  1345. vllm/v1/sample/tpu/__init__.py +0 -0
  1346. vllm/v1/sample/tpu/metadata.py +124 -0
  1347. vllm/v1/sample/tpu/sampler.py +213 -0
  1348. vllm/v1/serial_utils.py +395 -0
  1349. vllm/v1/spec_decode/__init__.py +0 -0
  1350. vllm/v1/spec_decode/eagle.py +740 -0
  1351. vllm/v1/spec_decode/medusa.py +66 -0
  1352. vllm/v1/spec_decode/metadata.py +62 -0
  1353. vllm/v1/spec_decode/metrics.py +191 -0
  1354. vllm/v1/spec_decode/ngram_proposer.py +157 -0
  1355. vllm/v1/spec_decode/utils.py +14 -0
  1356. vllm/v1/structured_output/__init__.py +297 -0
  1357. vllm/v1/structured_output/backend_guidance.py +245 -0
  1358. vllm/v1/structured_output/backend_lm_format_enforcer.py +167 -0
  1359. vllm/v1/structured_output/backend_outlines.py +320 -0
  1360. vllm/v1/structured_output/backend_types.py +134 -0
  1361. vllm/v1/structured_output/backend_xgrammar.py +323 -0
  1362. vllm/v1/structured_output/request.py +86 -0
  1363. vllm/v1/structured_output/utils.py +373 -0
  1364. vllm/v1/utils.py +382 -0
  1365. vllm/v1/worker/__init__.py +0 -0
  1366. vllm/v1/worker/block_table.py +221 -0
  1367. vllm/v1/worker/cpu_model_runner.py +163 -0
  1368. vllm/v1/worker/cpu_worker.py +183 -0
  1369. vllm/v1/worker/gpu_input_batch.py +821 -0
  1370. vllm/v1/worker/gpu_model_runner.py +3743 -0
  1371. vllm/v1/worker/gpu_worker.py +697 -0
  1372. vllm/v1/worker/kv_connector_model_runner_mixin.py +122 -0
  1373. vllm/v1/worker/lora_model_runner_mixin.py +192 -0
  1374. vllm/v1/worker/tpu_input_batch.py +585 -0
  1375. vllm/v1/worker/tpu_model_runner.py +1947 -0
  1376. vllm/v1/worker/tpu_worker.py +340 -0
  1377. vllm/v1/worker/utils.py +290 -0
  1378. vllm/v1/worker/worker_base.py +65 -0
  1379. vllm/v1/worker/xpu_model_runner.py +53 -0
  1380. vllm/v1/worker/xpu_worker.py +179 -0
  1381. vllm/version.py +41 -0
  1382. vllm/vllm_flash_attn/.gitkeep +0 -0
  1383. vllm/worker/__init__.py +0 -0
  1384. vllm/worker/cache_engine.py +145 -0
  1385. vllm/worker/enc_dec_model_runner.py +553 -0
  1386. vllm/worker/model_runner.py +2016 -0
  1387. vllm/worker/model_runner_base.py +307 -0
  1388. vllm/worker/utils.py +49 -0
  1389. vllm/worker/worker.py +670 -0
  1390. vllm/worker/worker_base.py +651 -0
  1391. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/METADATA +326 -0
  1392. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/RECORD +1395 -0
  1393. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/WHEEL +5 -0
  1394. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/entry_points.txt +5 -0
  1395. vllm_cpu_avx512vnni-0.10.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1591 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import asyncio
5
+ import json
6
+ import time
7
+ from collections.abc import AsyncGenerator, AsyncIterator
8
+ from collections.abc import Sequence as GenericSequence
9
+ from typing import Callable, Final, Optional, Union
10
+
11
+ import jinja2
12
+ import partial_json_parser
13
+ import regex as re
14
+ from fastapi import Request
15
+ from openai_harmony import Message as OpenAIMessage
16
+ from pydantic import TypeAdapter
17
+
18
+ from vllm.config import ModelConfig
19
+ from vllm.engine.protocol import EngineClient
20
+ from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption,
21
+ ConversationMessage,
22
+ get_history_tool_calls_cnt,
23
+ make_tool_call_id)
24
+ from vllm.entrypoints.harmony_utils import (
25
+ get_developer_message, get_stop_tokens_for_assistant_actions,
26
+ get_streamable_parser_for_assistant, get_system_message, parse_chat_input,
27
+ parse_chat_output, render_for_completion)
28
+ from vllm.entrypoints.logger import RequestLogger
29
+ from vllm.entrypoints.openai.protocol import (
30
+ ChatCompletionLogProb, ChatCompletionLogProbs,
31
+ ChatCompletionLogProbsContent, ChatCompletionNamedToolChoiceParam,
32
+ ChatCompletionRequest, ChatCompletionResponse,
33
+ ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
34
+ ChatCompletionStreamResponse, ChatMessage, DeltaFunctionCall, DeltaMessage,
35
+ DeltaToolCall, ErrorResponse, FunctionCall, FunctionDefinition,
36
+ PromptTokenUsageInfo, RequestResponseMetadata, ToolCall, UsageInfo)
37
+ from vllm.entrypoints.openai.serving_engine import (OpenAIServing,
38
+ clamp_prompt_logprobs)
39
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
40
+ from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager
41
+ from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import (
42
+ MistralToolCall)
43
+ from vllm.entrypoints.utils import get_max_tokens
44
+ from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
45
+ from vllm.logger import init_logger
46
+ from vllm.logprobs import Logprob
47
+ from vllm.outputs import CompletionOutput, RequestOutput
48
+ from vllm.reasoning import ReasoningParser, ReasoningParserManager
49
+ from vllm.sampling_params import BeamSearchParams, SamplingParams
50
+ from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
51
+ from vllm.transformers_utils.tokenizers import (maybe_serialize_tool_calls,
52
+ truncate_tool_call_ids,
53
+ validate_request_params)
54
+ from vllm.utils import as_list
55
+
56
+ logger = init_logger(__name__)
57
+
58
+
59
+ class OpenAIServingChat(OpenAIServing):
60
+
61
+ def __init__(
62
+ self,
63
+ engine_client: EngineClient,
64
+ model_config: ModelConfig,
65
+ models: OpenAIServingModels,
66
+ response_role: str,
67
+ *,
68
+ request_logger: Optional[RequestLogger],
69
+ chat_template: Optional[str],
70
+ chat_template_content_format: ChatTemplateContentFormatOption,
71
+ return_tokens_as_token_ids: bool = False,
72
+ reasoning_parser: str = "",
73
+ enable_auto_tools: bool = False,
74
+ exclude_tools_when_tool_choice_none: bool = False,
75
+ tool_parser: Optional[str] = None,
76
+ enable_prompt_tokens_details: bool = False,
77
+ enable_force_include_usage: bool = False,
78
+ enable_log_outputs: bool = False,
79
+ log_error_stack: bool = False,
80
+ ) -> None:
81
+ super().__init__(engine_client=engine_client,
82
+ model_config=model_config,
83
+ models=models,
84
+ request_logger=request_logger,
85
+ return_tokens_as_token_ids=return_tokens_as_token_ids,
86
+ enable_force_include_usage=enable_force_include_usage,
87
+ log_error_stack=log_error_stack)
88
+
89
+ self.response_role = response_role
90
+ self.chat_template = chat_template
91
+ self.chat_template_content_format: Final = chat_template_content_format
92
+ self.enable_log_outputs = enable_log_outputs
93
+
94
+ # set up tool use
95
+ self.enable_auto_tools: bool = enable_auto_tools
96
+ if self.enable_auto_tools:
97
+ logger.info(
98
+ "\"auto\" tool choice has been enabled please note that while"
99
+ " the parallel_tool_calls client option is preset for "
100
+ "compatibility reasons, it will be ignored.")
101
+
102
+ self.reasoning_parser: Optional[Callable[[AnyTokenizer],
103
+ ReasoningParser]] = None
104
+ if reasoning_parser:
105
+ try:
106
+ self.reasoning_parser = (
107
+ ReasoningParserManager.get_reasoning_parser(
108
+ reasoning_parser))
109
+ assert self.reasoning_parser is not None
110
+ except Exception as e:
111
+ raise TypeError(
112
+ f"{reasoning_parser=} has not been registered") from e
113
+ self.tool_parser: Optional[Callable[[AnyTokenizer], ToolParser]] = None
114
+ if self.enable_auto_tools:
115
+ try:
116
+ if (tool_parser == "pythonic" and
117
+ model_config.model.startswith("meta-llama/Llama-3.2")):
118
+ logger.warning(
119
+ "Llama3.2 models may struggle to emit valid pythonic"
120
+ " tool calls")
121
+ self.tool_parser = ToolParserManager.get_tool_parser(
122
+ tool_parser)
123
+ except Exception as e:
124
+ raise TypeError("Error: --enable-auto-tool-choice requires "
125
+ f"tool_parser:'{tool_parser}' which has not "
126
+ "been registered") from e
127
+ self.exclude_tools_when_tool_choice_none = (
128
+ exclude_tools_when_tool_choice_none)
129
+
130
+ self.enable_prompt_tokens_details = enable_prompt_tokens_details
131
+ self.enable_force_include_usage = enable_force_include_usage
132
+ self.default_sampling_params = (
133
+ self.model_config.get_diff_sampling_param())
134
+ if self.default_sampling_params:
135
+ source = self.model_config.generation_config
136
+ source = "model" if source == "auto" else source
137
+ logger.info("Using default chat sampling params from %s: %s",
138
+ source, self.default_sampling_params)
139
+ if self.model_config.hf_config.model_type == 'kimi_k2':
140
+ self.tool_call_id_type = 'kimi_k2'
141
+ else:
142
+ self.tool_call_id_type = 'random'
143
+
144
+ self.use_harmony = model_config.hf_config.model_type == "gpt_oss"
145
+ if self.use_harmony:
146
+ if "stop_token_ids" not in self.default_sampling_params:
147
+ self.default_sampling_params["stop_token_ids"] = []
148
+ self.default_sampling_params["stop_token_ids"].extend(
149
+ get_stop_tokens_for_assistant_actions())
150
+
151
+ # NOTE(woosuk): While OpenAI's chat completion API supports browsing
152
+ # for some models, currently vLLM doesn't support it. Please use the
153
+ # Responses API instead.
154
+ self.supports_browsing = False
155
+ self.browser_tool = None
156
+ # NOTE(woosuk): Chat completion API does not support code interpreter.
157
+ # Please use the Responses API instead.
158
+ self.supports_code_interpreter = False
159
+ self.python_tool = None
160
+
161
+ async def create_chat_completion(
162
+ self,
163
+ request: ChatCompletionRequest,
164
+ raw_request: Optional[Request] = None,
165
+ ) -> Union[AsyncGenerator[str, None], ChatCompletionResponse,
166
+ ErrorResponse]:
167
+ """
168
+ Chat Completion API similar to OpenAI's API.
169
+
170
+ See https://platform.openai.com/docs/api-reference/chat/create
171
+ for the API specification. This API mimics the OpenAI
172
+ Chat Completion API.
173
+ """
174
+ error_check_ret = await self._check_model(request)
175
+ if error_check_ret is not None:
176
+ logger.error("Error with model %s", error_check_ret)
177
+ return error_check_ret
178
+
179
+ # If the engine is dead, raise the engine's DEAD_ERROR.
180
+ # This is required for the streaming case, where we return a
181
+ # success status before we actually start generating text :).
182
+ if self.engine_client.errored:
183
+ raise self.engine_client.dead_error
184
+
185
+ try:
186
+ lora_request = self._maybe_get_adapters(
187
+ request, supports_default_mm_loras=True)
188
+
189
+ model_name = self.models.model_name(lora_request)
190
+
191
+ tokenizer = await self.engine_client.get_tokenizer(lora_request)
192
+
193
+ tool_parser = self.tool_parser
194
+
195
+ if isinstance(tokenizer, MistralTokenizer):
196
+ # because of issues with pydantic we need to potentially
197
+ # re-serialize the tool_calls field of the request
198
+ # for more info: see comment in `maybe_serialize_tool_calls`
199
+ maybe_serialize_tool_calls(request)
200
+ truncate_tool_call_ids(request)
201
+ validate_request_params(request)
202
+
203
+ if (request.tool_choice == "auto" and
204
+ not (self.enable_auto_tools and tool_parser is not None)
205
+ and not isinstance(tokenizer, MistralTokenizer)
206
+ and not self.use_harmony):
207
+ # for hf tokenizers, "auto" tools requires
208
+ # --enable-auto-tool-choice and --tool-call-parser
209
+ return self.create_error_response(
210
+ "\"auto\" tool choice requires "
211
+ "--enable-auto-tool-choice and --tool-call-parser to be set"
212
+ )
213
+
214
+ if (request.tools is None
215
+ or (request.tool_choice == "none"
216
+ and self.exclude_tools_when_tool_choice_none)):
217
+ tool_dicts = None
218
+ else:
219
+ tool_dicts = [tool.model_dump() for tool in request.tools]
220
+
221
+ if not self.use_harmony:
222
+ # Common case.
223
+ (
224
+ conversation,
225
+ request_prompts,
226
+ engine_prompts,
227
+ ) = await self._preprocess_chat(
228
+ request,
229
+ tokenizer,
230
+ request.messages,
231
+ chat_template=request.chat_template or self.chat_template,
232
+ chat_template_content_format=self.
233
+ chat_template_content_format,
234
+ add_generation_prompt=request.add_generation_prompt,
235
+ continue_final_message=request.continue_final_message,
236
+ tool_dicts=tool_dicts,
237
+ documents=request.documents,
238
+ chat_template_kwargs=request.chat_template_kwargs,
239
+ tool_parser=tool_parser,
240
+ add_special_tokens=request.add_special_tokens,
241
+ )
242
+ else:
243
+ # For GPT-OSS.
244
+ (
245
+ conversation,
246
+ request_prompts,
247
+ engine_prompts,
248
+ ) = self._make_request_with_harmony(request)
249
+ except (ValueError, TypeError, RuntimeError,
250
+ jinja2.TemplateError) as e:
251
+ logger.exception("Error in preprocessing prompt inputs")
252
+ return self.create_error_response(f"{e} {e.__cause__}")
253
+
254
+ request_id = "chatcmpl-" \
255
+ f"{self._base_request_id(raw_request, request.request_id)}"
256
+
257
+ request_metadata = RequestResponseMetadata(request_id=request_id)
258
+ if raw_request:
259
+ raw_request.state.request_metadata = request_metadata
260
+
261
+ # Schedule the request and get the result generator.
262
+ generators: list[AsyncGenerator[RequestOutput, None]] = []
263
+ try:
264
+ for i, engine_prompt in enumerate(engine_prompts):
265
+ sampling_params: Union[SamplingParams, BeamSearchParams]
266
+
267
+ if self.default_sampling_params is None:
268
+ self.default_sampling_params = {}
269
+
270
+ max_tokens = get_max_tokens(
271
+ max_model_len=self.max_model_len,
272
+ request=request,
273
+ input_length=len(engine_prompt["prompt_token_ids"]),
274
+ default_sampling_params=self.default_sampling_params)
275
+
276
+ if request.use_beam_search:
277
+ sampling_params = request.to_beam_search_params(
278
+ max_tokens, self.default_sampling_params)
279
+ else:
280
+ sampling_params = request.to_sampling_params(
281
+ max_tokens, self.model_config.logits_processor_pattern,
282
+ self.default_sampling_params)
283
+
284
+ self._log_inputs(request_id,
285
+ request_prompts[i],
286
+ params=sampling_params,
287
+ lora_request=lora_request)
288
+
289
+ trace_headers = (None if raw_request is None else await
290
+ self._get_trace_headers(raw_request.headers))
291
+
292
+ if isinstance(sampling_params, BeamSearchParams):
293
+ generator = self.engine_client.beam_search(
294
+ prompt=engine_prompt,
295
+ request_id=request_id,
296
+ params=sampling_params,
297
+ lora_request=lora_request,
298
+ )
299
+ else:
300
+ generator = self.engine_client.generate(
301
+ engine_prompt,
302
+ sampling_params,
303
+ request_id,
304
+ lora_request=lora_request,
305
+ trace_headers=trace_headers,
306
+ priority=request.priority,
307
+ )
308
+
309
+ generators.append(generator)
310
+ except ValueError as e:
311
+ # TODO: Use a vllm-specific Validation Error
312
+ return self.create_error_response(str(e))
313
+
314
+ assert len(generators) == 1
315
+ result_generator, = generators
316
+
317
+ # Streaming response
318
+ if request.stream:
319
+ return self.chat_completion_stream_generator(
320
+ request,
321
+ result_generator,
322
+ request_id,
323
+ model_name,
324
+ conversation,
325
+ tokenizer,
326
+ request_metadata,
327
+ enable_force_include_usage=self.enable_force_include_usage)
328
+
329
+ try:
330
+ return await self.chat_completion_full_generator(
331
+ request, result_generator, request_id, model_name,
332
+ conversation, tokenizer, request_metadata)
333
+ except ValueError as e:
334
+ # TODO: Use a vllm-specific Validation Error
335
+ return self.create_error_response(str(e))
336
+
337
+ def get_chat_request_role(self, request: ChatCompletionRequest) -> str:
338
+ if request.add_generation_prompt:
339
+ return self.response_role
340
+ return request.messages[-1]["role"]
341
+
342
+ @staticmethod
343
+ def _bracket_level(s: str, opening='{', closing='}') -> int:
344
+ """
345
+ Calculate the current level of nested brackets in a given string.
346
+ """
347
+ level = 0
348
+ for char in s:
349
+ if char == opening:
350
+ level += 1
351
+ elif char == closing:
352
+ level -= 1
353
+ return level
354
+
355
+ @staticmethod
356
+ def _filter_delta_text(delta_text: str,
357
+ previous_text: str) -> tuple[str, bool]:
358
+ # remove last '},' of the tool definition stemming from the
359
+ # "name"/"parameters" outer object or closing ']' of the tool list
360
+ # count occurrences of opening and closing curly braces and
361
+ # once level 0 is reached stop outputting text
362
+ # if 0 is reached while parsing the delta_text we know the current
363
+ # tool will finish in this current iteration
364
+ bracket_level = OpenAIServingChat._bracket_level(previous_text)
365
+ updated_delta, passed_zero = "", False
366
+ for c in delta_text:
367
+ if c == '{':
368
+ bracket_level += 1
369
+ passed_zero = bracket_level == 0
370
+ elif c == '}':
371
+ bracket_level -= 1
372
+ passed_zero = bracket_level == 0
373
+
374
+ if bracket_level != 0:
375
+ updated_delta += c
376
+ else:
377
+ # if a comma is reached at level 0 we can stop
378
+ if c == ',':
379
+ break
380
+ return updated_delta, passed_zero
381
+
382
+ def extract_tool_call_required_streaming(
383
+ self,
384
+ previous_text: str,
385
+ current_text: Optional[str],
386
+ delta_text: str,
387
+ function_name_returned: bool,
388
+ tool_call_idx: Optional[int] = None
389
+ ) -> tuple[Optional[DeltaMessage], bool]:
390
+ if current_text is None or current_text == "":
391
+ # if the current text is empty, we cannot parse it
392
+ return None, function_name_returned
393
+ try:
394
+ obj = partial_json_parser.loads(current_text)
395
+ except partial_json_parser.core.exceptions.MalformedJSON:
396
+ logger.debug('not enough tokens to parse into JSON yet')
397
+ obj = None
398
+
399
+ # check if the current text is a valid array
400
+ # containing a partial tool calling object
401
+ # if not repeat
402
+ if obj is None or not isinstance(obj, list) or not len(obj) > 0:
403
+ function_name_returned = False
404
+ delta_message = None
405
+ else:
406
+ _, finishes_previous_tool = OpenAIServingChat._filter_delta_text(
407
+ delta_text, previous_text)
408
+ # take the last tool call from the generated list
409
+ current_tool_call = obj[-1]
410
+
411
+ # once parameters have been generated the name is complete as well
412
+ if not finishes_previous_tool and ("name" not in current_tool_call
413
+ or "parameters"
414
+ not in current_tool_call):
415
+ function_name_returned = False
416
+ delta_message = None
417
+ else:
418
+ if not function_name_returned:
419
+ # get partly generated arguments from the latest tool call
420
+ param_match = re.search(r'.*"parameters":\s*(.*)',
421
+ current_text)
422
+ arguments = param_match.group(1) if param_match else ""
423
+ arguments, _ = OpenAIServingChat._filter_delta_text(
424
+ arguments, previous_text)
425
+
426
+ # if this iteration finishes a previous tool call but a
427
+ # new incomplete tool is already generated, take the
428
+ # previous from the list
429
+ if (finishes_previous_tool
430
+ and "parameters" not in current_tool_call):
431
+ current_tool_call = obj[-2]
432
+
433
+ function_name_returned = True
434
+ tool_call_id = make_tool_call_id(
435
+ id_type=self.tool_call_id_type,
436
+ func_name=current_tool_call["name"],
437
+ idx=tool_call_idx)
438
+ delta_message = DeltaMessage(tool_calls=[
439
+ DeltaToolCall(id=tool_call_id,
440
+ function=DeltaFunctionCall(
441
+ name=current_tool_call["name"],
442
+ arguments=arguments),
443
+ index=len(obj) - 1,
444
+ type="function")
445
+ ])
446
+
447
+ else:
448
+ delta_text, _ = OpenAIServingChat._filter_delta_text(
449
+ delta_text, previous_text)
450
+
451
+ if delta_text != "":
452
+ delta_message = DeltaMessage(tool_calls=[
453
+ DeltaToolCall(
454
+ function=DeltaFunctionCall(
455
+ # OpenAI API returns None
456
+ # instead of name every time
457
+ name=None,
458
+ arguments=delta_text),
459
+ index=len(obj) - 1)
460
+ ])
461
+ else:
462
+ delta_message = None
463
+
464
+ return delta_message, function_name_returned
465
+
466
+ async def chat_completion_stream_generator(
467
+ self,
468
+ request: ChatCompletionRequest,
469
+ result_generator: AsyncIterator[RequestOutput],
470
+ request_id: str,
471
+ model_name: str,
472
+ conversation: list[ConversationMessage],
473
+ tokenizer: AnyTokenizer,
474
+ request_metadata: RequestResponseMetadata,
475
+ enable_force_include_usage: bool,
476
+ ) -> AsyncGenerator[str, None]:
477
+ created_time = int(time.time())
478
+ chunk_object_type: Final = "chat.completion.chunk"
479
+ first_iteration = True
480
+
481
+ # Send response for each token for each request.n (index)
482
+ num_choices = 1 if request.n is None else request.n
483
+ previous_num_tokens = [0] * num_choices
484
+ finish_reason_sent = [False] * num_choices
485
+ num_prompt_tokens = 0
486
+ num_cached_tokens = None
487
+ if self.use_harmony:
488
+ harmony_parsers = [
489
+ get_streamable_parser_for_assistant()
490
+ for _ in range(num_choices)
491
+ ]
492
+ harmony_tools_streamed = [False] * num_choices
493
+ tools_streamed = [False] * num_choices
494
+
495
+ if isinstance(request.tool_choice, ChatCompletionNamedToolChoiceParam):
496
+ tool_choice_function_name = request.tool_choice.function.name
497
+ else:
498
+ tool_choice_function_name = None
499
+
500
+ # Determine whether tools are in use with "auto" tool choice
501
+ tool_choice_auto = (
502
+ not tool_choice_function_name
503
+ and self._should_stream_with_auto_tool_parsing(request))
504
+
505
+ all_previous_token_ids: Optional[list[list[int]]]
506
+ function_name_returned = [False] * num_choices
507
+ if self.tool_call_id_type == 'kimi_k2':
508
+ history_tool_call_cnt = get_history_tool_calls_cnt(conversation)
509
+ else:
510
+ history_tool_call_cnt = 0
511
+
512
+ # Always track previous_texts for comprehensive output logging
513
+ previous_texts = [""] * num_choices
514
+
515
+ # Only one of these will be used, thus previous_texts and
516
+ # all_previous_token_ids will not be used twice in the same iteration.
517
+ if tool_choice_auto or self.reasoning_parser:
518
+ # These are only required in "auto" tool choice case
519
+ all_previous_token_ids = [[]] * num_choices
520
+ # For reasoning parser and tool call all enabled
521
+ added_content_delta_arr = [False] * num_choices
522
+ reasoning_end_arr = [False] * num_choices
523
+ elif request.tool_choice == "required":
524
+ all_previous_token_ids = None
525
+ else:
526
+ all_previous_token_ids = None
527
+
528
+ try:
529
+ if self.reasoning_parser:
530
+ reasoning_parser = self.reasoning_parser(tokenizer)
531
+ except RuntimeError as e:
532
+ logger.exception("Error in reasoning parser creation.")
533
+ data = self.create_streaming_error_response(str(e))
534
+ yield f"data: {data}\n\n"
535
+ yield "data: [DONE]\n\n"
536
+ return
537
+ # Prepare the tool parser if it's needed
538
+ try:
539
+ if tool_choice_auto and self.tool_parser:
540
+ tool_parsers: list[Optional[ToolParser]] = [
541
+ self.tool_parser(tokenizer)
542
+ ] * num_choices
543
+ else:
544
+ tool_parsers = [None] * num_choices
545
+ except Exception as e:
546
+ logger.exception("Error in tool parser creation.")
547
+ data = self.create_streaming_error_response(str(e))
548
+ yield f"data: {data}\n\n"
549
+ yield "data: [DONE]\n\n"
550
+ return
551
+
552
+ stream_options = request.stream_options
553
+ if stream_options:
554
+ include_usage = stream_options.include_usage \
555
+ or enable_force_include_usage
556
+ include_continuous_usage = include_usage and \
557
+ stream_options.continuous_usage_stats
558
+ else:
559
+ include_usage, include_continuous_usage = False, False
560
+
561
+ try:
562
+ async for res in result_generator:
563
+ if res.prompt_token_ids is not None:
564
+ num_prompt_tokens = len(res.prompt_token_ids)
565
+ if res.encoder_prompt_token_ids is not None:
566
+ num_prompt_tokens += len(res.encoder_prompt_token_ids)
567
+
568
+ # We need to do it here, because if there are exceptions in
569
+ # the result_generator, it needs to be sent as the FIRST
570
+ # response (by the try...catch).
571
+ if first_iteration:
572
+ num_cached_tokens = res.num_cached_tokens
573
+ # Send first response for each request.n (index) with
574
+ # the role
575
+ role = self.get_chat_request_role(request)
576
+
577
+ # NOTE num_choices defaults to 1 so this usually executes
578
+ # once per request
579
+ for i in range(num_choices):
580
+ choice_data = ChatCompletionResponseStreamChoice(
581
+ index=i,
582
+ delta=DeltaMessage(
583
+ role=role,
584
+ content="",
585
+ ),
586
+ logprobs=None,
587
+ finish_reason=None)
588
+
589
+ # return prompt_token_ids at the first chunk ever
590
+ chunk = ChatCompletionStreamResponse(
591
+ id=request_id,
592
+ object=chunk_object_type,
593
+ created=created_time,
594
+ choices=[choice_data],
595
+ model=model_name,
596
+ prompt_token_ids=(res.prompt_token_ids
597
+ if request.return_token_ids else
598
+ None))
599
+
600
+ # if continuous usage stats are requested, add it
601
+ if include_continuous_usage:
602
+ chunk.usage = UsageInfo(
603
+ prompt_tokens=num_prompt_tokens,
604
+ completion_tokens=0,
605
+ total_tokens=num_prompt_tokens)
606
+
607
+ data = chunk.model_dump_json(exclude_unset=True)
608
+ yield f"data: {data}\n\n"
609
+
610
+ # Send response to echo the input portion of the
611
+ # last message
612
+ if request.echo:
613
+ last_msg_content: Union[str, list[dict[str, str]]] = ""
614
+ if conversation and "content" in conversation[
615
+ -1] and conversation[-1].get("role") == role:
616
+ last_msg_content = conversation[-1]["content"] or ""
617
+
618
+ if last_msg_content:
619
+ for i in range(num_choices):
620
+ choice_data = (
621
+ ChatCompletionResponseStreamChoice(
622
+ index=i,
623
+ delta=DeltaMessage(
624
+ content=last_msg_content),
625
+ logprobs=None,
626
+ finish_reason=None))
627
+ chunk = ChatCompletionStreamResponse(
628
+ id=request_id,
629
+ object=chunk_object_type,
630
+ created=created_time,
631
+ choices=[choice_data],
632
+ model=model_name)
633
+ if include_continuous_usage:
634
+ chunk.usage = UsageInfo(
635
+ prompt_tokens=num_prompt_tokens,
636
+ completion_tokens=0,
637
+ total_tokens=num_prompt_tokens)
638
+
639
+ data = chunk.model_dump_json(
640
+ exclude_unset=True)
641
+ yield f"data: {data}\n\n"
642
+ first_iteration = False
643
+
644
+ for output in res.outputs:
645
+ i = output.index
646
+ tool_parser = tool_parsers[i]
647
+
648
+ if finish_reason_sent[i]:
649
+ continue
650
+
651
+ if request.logprobs and request.top_logprobs is not None:
652
+ assert output.logprobs is not None, (
653
+ "Did not output logprobs")
654
+ logprobs = self._create_chat_logprobs(
655
+ token_ids=output.token_ids,
656
+ top_logprobs=output.logprobs,
657
+ tokenizer=tokenizer,
658
+ num_output_top_logprobs=request.top_logprobs,
659
+ return_as_token_id=request.
660
+ return_tokens_as_token_ids,
661
+ )
662
+ else:
663
+ logprobs = None
664
+
665
+ if self.use_harmony:
666
+ harmony_parser = harmony_parsers[i]
667
+ prev_recipient = harmony_parser.current_recipient
668
+ for token_id in output.token_ids:
669
+ harmony_parser.process(token_id)
670
+ cur_channel = harmony_parser.current_channel
671
+ cur_recipient = harmony_parser.current_recipient
672
+ delta_text = harmony_parser.last_content_delta or ""
673
+ else:
674
+ delta_text = output.text
675
+
676
+ if not delta_text and not output.token_ids and \
677
+ not previous_num_tokens[i]:
678
+ # Chunked prefill case, don't return empty chunks
679
+ continue
680
+
681
+ delta_message: Optional[DeltaMessage]
682
+
683
+ # just update previous_texts and previous_token_ids
684
+ if tool_choice_auto or self.reasoning_parser:
685
+ assert previous_texts is not None
686
+ assert all_previous_token_ids is not None
687
+ previous_text = previous_texts[i]
688
+ previous_token_ids = all_previous_token_ids[i]
689
+ current_text = previous_text + delta_text
690
+ # avoid the None + list error.
691
+ if previous_token_ids:
692
+ current_token_ids = previous_token_ids + as_list(
693
+ output.token_ids)
694
+ else:
695
+ current_token_ids = as_list(output.token_ids)
696
+
697
+ if self.use_harmony:
698
+ if cur_channel == "final":
699
+ delta_message = DeltaMessage(content=delta_text)
700
+ elif cur_channel == "analysis":
701
+ if request.include_reasoning:
702
+ delta_message = DeltaMessage(
703
+ reasoning_content=delta_text)
704
+ else:
705
+ delta_message = None
706
+ elif (cur_channel == "commentary" and cur_recipient
707
+ and cur_recipient.startswith("functions.")):
708
+ # Count completed tool calls to determine index
709
+ base_index = 0
710
+ for msg in harmony_parser.messages:
711
+ if (msg.channel == "commentary"
712
+ and msg.recipient
713
+ and msg.recipient.startswith(
714
+ "functions.")):
715
+ base_index += 1
716
+
717
+ if prev_recipient != cur_recipient:
718
+ tool_name = cur_recipient.split(
719
+ "functions.", 1)[1]
720
+ delta_message = DeltaMessage(tool_calls=[
721
+ DeltaToolCall(
722
+ id=make_tool_call_id(),
723
+ type="function",
724
+ function=DeltaFunctionCall(
725
+ name=tool_name,
726
+ arguments="",
727
+ ),
728
+ index=base_index,
729
+ )
730
+ ])
731
+ elif delta_text:
732
+ delta_message = DeltaMessage(tool_calls=[
733
+ DeltaToolCall(
734
+ index=base_index,
735
+ function=DeltaFunctionCall(
736
+ arguments=delta_text),
737
+ )
738
+ ])
739
+ else:
740
+ delta_message = None
741
+
742
+ if delta_message is not None:
743
+ harmony_tools_streamed[i] = True
744
+ else:
745
+ delta_message = None
746
+ # handle streaming deltas for tools with named tool_choice
747
+ elif tool_choice_function_name:
748
+ if (self.reasoning_parser and not reasoning_end_arr[i]
749
+ and not reasoning_parser.is_reasoning_end(
750
+ previous_token_ids)):
751
+ assert reasoning_parser is not None
752
+ delta_message = (
753
+ reasoning_parser.
754
+ extract_reasoning_content_streaming(
755
+ previous_text,
756
+ current_text,
757
+ delta_text,
758
+ previous_token_ids,
759
+ current_token_ids,
760
+ output.token_ids,
761
+ ))
762
+ # When encountering think end id in delta_token_ids
763
+ # or think end id in prompt_token_ids
764
+ # i.e {"enable_thinking": False},
765
+ # set reasoning status to end.
766
+ # Only keep 'content', remove 'reasoning_content'.
767
+ if reasoning_parser.is_reasoning_end(
768
+ as_list(output.token_ids)) or (
769
+ res.prompt_token_ids
770
+ and reasoning_parser.is_reasoning_end(
771
+ res.prompt_token_ids)):
772
+ reasoning_end_arr[i] = True
773
+ if delta_message and delta_message.content:
774
+ # This need to be added to next `delta_text`
775
+ current_text = delta_message.content
776
+ delta_message.content = None
777
+ else:
778
+ current_text = ""
779
+ else:
780
+ # Just to add remaining `content`
781
+ if self.reasoning_parser:
782
+ delta_text = previous_text + delta_text
783
+ current_text = ""
784
+
785
+ if function_name_returned[i]:
786
+ delta_tool_call = DeltaToolCall(
787
+ function=DeltaFunctionCall(
788
+ arguments=delta_text),
789
+ index=i)
790
+ else:
791
+ delta_tool_call = DeltaToolCall(
792
+ id=make_tool_call_id(),
793
+ type="function",
794
+ function=DeltaFunctionCall(
795
+ name=tool_choice_function_name,
796
+ arguments=delta_text),
797
+ index=i)
798
+ function_name_returned[i] = True
799
+
800
+ delta_message = DeltaMessage(tool_calls=[
801
+ delta_tool_call,
802
+ ])
803
+ tools_streamed[i] = True
804
+
805
+ elif request.tool_choice == "required":
806
+ assert previous_texts is not None
807
+ previous_text = previous_texts[i]
808
+ current_text = previous_text + delta_text
809
+ fn_name_returned = function_name_returned[i]
810
+
811
+ if self.reasoning_parser:
812
+ _, content = \
813
+ reasoning_parser.extract_reasoning_content(
814
+ current_text,
815
+ request
816
+ )
817
+ else:
818
+ content = current_text
819
+ delta_message, function_name_returned[i] = (
820
+ self.extract_tool_call_required_streaming(
821
+ previous_text=previous_text,
822
+ current_text=content,
823
+ delta_text=delta_text,
824
+ function_name_returned=fn_name_returned,
825
+ tool_call_idx=history_tool_call_cnt))
826
+ if (delta_message and delta_message.tool_calls and
827
+ delta_message.tool_calls[0].id is not None):
828
+ history_tool_call_cnt += 1
829
+ tools_streamed[i] = True
830
+
831
+ # update the previous values for the next iteration
832
+ previous_texts[i] = current_text
833
+
834
+ # handle streaming deltas for tools with "auto" tool choice
835
+ # and reasoning parser
836
+ elif tool_choice_auto and self.reasoning_parser:
837
+ assert tool_parser is not None
838
+ assert reasoning_parser is not None
839
+ assert added_content_delta_arr is not None
840
+ assert reasoning_end_arr is not None
841
+ output_token_ids = as_list(output.token_ids)
842
+ if not reasoning_end_arr[i]:
843
+ delta_message = (
844
+ reasoning_parser.
845
+ extract_reasoning_content_streaming(
846
+ previous_text,
847
+ current_text,
848
+ delta_text,
849
+ previous_token_ids,
850
+ current_token_ids,
851
+ output_token_ids,
852
+ ))
853
+ # When encountering think end id in prompt_token_ids
854
+ # i.e {"enable_thinking": False},
855
+ # set reasoning status to end.
856
+ # Remove the text and token ids related
857
+ # to 'reasoning_content'.
858
+ if res.prompt_token_ids and \
859
+ reasoning_parser.is_reasoning_end(
860
+ res.prompt_token_ids):
861
+ reasoning_end_arr[i] = True
862
+ current_token_ids = output_token_ids
863
+ if delta_message and delta_message.content:
864
+ current_text = delta_message.content
865
+ delta_message.content = None
866
+ else:
867
+ current_text = ""
868
+ # When encountering think end id in delta_token_ids,
869
+ # set reasoning status to end.
870
+ # Remove the text and token ids related
871
+ # to 'reasoning_content'.
872
+ if reasoning_parser.is_reasoning_end(
873
+ output_token_ids):
874
+ reasoning_end_arr[i] = True
875
+ current_token_ids = \
876
+ reasoning_parser.extract_content_ids(
877
+ output_token_ids)
878
+ if delta_message and delta_message.content:
879
+ current_text = delta_message.content
880
+ delta_message.content = None
881
+ else:
882
+ current_text = ""
883
+
884
+ # handle tool calls only after reasoning is done,
885
+ else:
886
+ delta_token_ids = output_token_ids
887
+ # First time to tool call,
888
+ # add the remaining text and token ids
889
+ # to delta from previous
890
+ if not added_content_delta_arr[i]:
891
+ added_content_delta_arr[i] = True
892
+ previous_text = ""
893
+ previous_token_ids = []
894
+ delta_text = current_text
895
+ delta_token_ids = current_token_ids
896
+
897
+ delta_message = (
898
+ tool_parser.extract_tool_calls_streaming(
899
+ previous_text=previous_text,
900
+ current_text=current_text,
901
+ delta_text=delta_text,
902
+ previous_token_ids=previous_token_ids,
903
+ current_token_ids=current_token_ids,
904
+ delta_token_ids=delta_token_ids,
905
+ request=request))
906
+ if delta_message and delta_message.tool_calls:
907
+ tools_streamed[i] = True
908
+ # when only tool calls
909
+ elif tool_choice_auto:
910
+ assert tool_parser is not None
911
+ delta_message = (
912
+ tool_parser.extract_tool_calls_streaming(
913
+ previous_text=previous_text,
914
+ current_text=current_text,
915
+ delta_text=delta_text,
916
+ previous_token_ids=previous_token_ids,
917
+ current_token_ids=current_token_ids,
918
+ delta_token_ids=output.token_ids,
919
+ request=request))
920
+ if delta_message and delta_message.tool_calls:
921
+ tools_streamed[i] = True
922
+
923
+ # when only reasoning
924
+ elif self.reasoning_parser:
925
+ delta_message = (reasoning_parser.
926
+ extract_reasoning_content_streaming(
927
+ previous_text,
928
+ current_text,
929
+ delta_text,
930
+ previous_token_ids,
931
+ current_token_ids,
932
+ output.token_ids,
933
+ ))
934
+ # handle streaming just a content delta
935
+ else:
936
+ delta_message = DeltaMessage(content=delta_text)
937
+
938
+ # update the previous values for the next iteration
939
+ if ((tool_choice_auto or self.reasoning_parser)
940
+ and not self.use_harmony):
941
+ assert previous_texts is not None
942
+ assert all_previous_token_ids is not None
943
+ previous_texts[i] = current_text
944
+ all_previous_token_ids[i] = current_token_ids
945
+ else:
946
+ # Update for comprehensive logging even in simple case
947
+ assert previous_texts is not None
948
+ previous_texts[i] += delta_text
949
+
950
+ # set the previous values for the next iteration
951
+ previous_num_tokens[i] += len(output.token_ids)
952
+
953
+ # if the message delta is None (e.g. because it was a
954
+ # "control token" for tool calls or the parser otherwise
955
+ # wasn't ready to send a token, then
956
+ # get the next token without streaming a chunk
957
+ if delta_message is None:
958
+ if output.finish_reason is None:
959
+ continue
960
+ else:
961
+ delta_message = DeltaMessage()
962
+
963
+ # Log streaming delta if output logging is enabled
964
+ if self.enable_log_outputs and self.request_logger:
965
+ delta_content = ""
966
+ if delta_message.content:
967
+ delta_content = delta_message.content
968
+ elif delta_message.tool_calls:
969
+ delta_content = "".join(
970
+ tc.function.arguments
971
+ for tc in delta_message.tool_calls
972
+ if tc.function and tc.function.arguments)
973
+
974
+ if delta_content:
975
+ self.request_logger.log_outputs(
976
+ request_id=request_id,
977
+ outputs=delta_content,
978
+ output_token_ids=as_list(output.token_ids),
979
+ finish_reason=output.finish_reason,
980
+ is_streaming=True,
981
+ delta=True,
982
+ )
983
+
984
+ if output.finish_reason is None:
985
+ # Send token-by-token response for each request.n
986
+ choice_data = ChatCompletionResponseStreamChoice(
987
+ index=i,
988
+ delta=delta_message,
989
+ logprobs=logprobs,
990
+ finish_reason=None,
991
+ token_ids=(as_list(output.token_ids)
992
+ if request.return_token_ids else None))
993
+
994
+ # if the model is finished generating
995
+ else:
996
+ # check to make sure we haven't "forgotten" to stream
997
+ # any tokens that were generated but previously
998
+ # matched by partial json parsing
999
+ # only happens if we are NOT using guided decoding
1000
+ auto_tools_called = False
1001
+ if tool_parser:
1002
+ auto_tools_called = len(
1003
+ tool_parser.prev_tool_call_arr) > 0
1004
+ index = len(tool_parser.prev_tool_call_arr
1005
+ ) - 1 if auto_tools_called else 0
1006
+ else:
1007
+ index = 0
1008
+
1009
+ if self._should_check_for_unstreamed_tool_arg_tokens(
1010
+ delta_message, output) and tool_parser:
1011
+ latest_delta_len = 0
1012
+ if ((isinstance(
1013
+ delta_message.tool_calls[0].function,
1014
+ DeltaFunctionCall)) and isinstance(
1015
+ delta_message.tool_calls[0].function.
1016
+ arguments, str)):
1017
+ latest_delta_len = len(
1018
+ delta_message.tool_calls[0].function.
1019
+ arguments)
1020
+
1021
+ # get the expected call based on partial JSON
1022
+ # parsing which "autocompletes" the JSON
1023
+ expected_call = json.dumps(
1024
+ tool_parser.prev_tool_call_arr[index].get(
1025
+ "arguments", {}),
1026
+ ensure_ascii=False)
1027
+
1028
+ # get what we've streamed so far for arguments
1029
+ # for the current tool
1030
+ actual_call = tool_parser.streamed_args_for_tool[
1031
+ index]
1032
+ if (latest_delta_len > 0):
1033
+ actual_call = actual_call[:-latest_delta_len]
1034
+
1035
+ # check to see if there's anything left to stream
1036
+ remaining_call = expected_call.replace(
1037
+ actual_call, "", 1)
1038
+ # set that as a delta message
1039
+ delta_message = DeltaMessage(tool_calls=[
1040
+ DeltaToolCall(index=index,
1041
+ function=DeltaFunctionCall(
1042
+ arguments=remaining_call).
1043
+ model_dump(exclude_none=True))
1044
+ ])
1045
+
1046
+ # Send the finish response for each request.n only once
1047
+ if auto_tools_called or tools_streamed[i] or (
1048
+ self.use_harmony
1049
+ and harmony_tools_streamed[i]):
1050
+ finish_reason_ = "tool_calls"
1051
+ else:
1052
+ finish_reason_ = output.finish_reason \
1053
+ if output.finish_reason else "stop"
1054
+ choice_data = ChatCompletionResponseStreamChoice(
1055
+ index=i,
1056
+ delta=delta_message,
1057
+ logprobs=logprobs,
1058
+ finish_reason=finish_reason_,
1059
+ stop_reason=output.stop_reason,
1060
+ token_ids=(as_list(output.token_ids)
1061
+ if request.return_token_ids else None))
1062
+
1063
+ finish_reason_sent[i] = True
1064
+
1065
+ chunk = ChatCompletionStreamResponse(
1066
+ id=request_id,
1067
+ object=chunk_object_type,
1068
+ created=created_time,
1069
+ choices=[choice_data],
1070
+ model=model_name)
1071
+
1072
+ # handle usage stats if requested & if continuous
1073
+ if include_continuous_usage:
1074
+ completion_tokens = previous_num_tokens[i]
1075
+ chunk.usage = UsageInfo(
1076
+ prompt_tokens=num_prompt_tokens,
1077
+ completion_tokens=completion_tokens,
1078
+ total_tokens=num_prompt_tokens + completion_tokens,
1079
+ )
1080
+
1081
+ data = chunk.model_dump_json(exclude_unset=True)
1082
+ yield f"data: {data}\n\n"
1083
+
1084
+ # once the final token is handled, if stream_options.include_usage
1085
+ # is sent, send the usage
1086
+ if include_usage:
1087
+ completion_tokens = sum(previous_num_tokens)
1088
+ final_usage = UsageInfo(prompt_tokens=num_prompt_tokens,
1089
+ completion_tokens=completion_tokens,
1090
+ total_tokens=num_prompt_tokens +
1091
+ completion_tokens)
1092
+ if self.enable_prompt_tokens_details and num_cached_tokens:
1093
+ final_usage.prompt_tokens_details = PromptTokenUsageInfo(
1094
+ cached_tokens=num_cached_tokens)
1095
+
1096
+ final_usage_chunk = ChatCompletionStreamResponse(
1097
+ id=request_id,
1098
+ object=chunk_object_type,
1099
+ created=created_time,
1100
+ choices=[],
1101
+ model=model_name,
1102
+ usage=final_usage)
1103
+ final_usage_data = (final_usage_chunk.model_dump_json(
1104
+ exclude_unset=True, exclude_none=True))
1105
+ yield f"data: {final_usage_data}\n\n"
1106
+
1107
+ # report to FastAPI middleware aggregate usage across all choices
1108
+ num_completion_tokens = sum(previous_num_tokens)
1109
+ request_metadata.final_usage_info = UsageInfo(
1110
+ prompt_tokens=num_prompt_tokens,
1111
+ completion_tokens=num_completion_tokens,
1112
+ total_tokens=num_prompt_tokens + num_completion_tokens,
1113
+ )
1114
+
1115
+ # Log complete streaming response if output logging is enabled
1116
+ if self.enable_log_outputs and self.request_logger:
1117
+ # Log the complete response for each choice
1118
+ for i in range(num_choices):
1119
+ full_text = (
1120
+ previous_texts[i]
1121
+ if previous_texts and i < len(previous_texts) else
1122
+ f"<streaming_complete: {previous_num_tokens[i]} tokens>"
1123
+ )
1124
+ self.request_logger.log_outputs(
1125
+ request_id=request_id,
1126
+ outputs=full_text,
1127
+ output_token_ids=
1128
+ None, # Consider also logging all token IDs
1129
+ finish_reason="streaming_complete",
1130
+ is_streaming=True,
1131
+ delta=False,
1132
+ )
1133
+
1134
+ except Exception as e:
1135
+ # TODO: Use a vllm-specific Validation Error
1136
+ logger.exception("Error in chat completion stream generator.")
1137
+ data = self.create_streaming_error_response(str(e))
1138
+ yield f"data: {data}\n\n"
1139
+ # Send the final done message after all response.n are finished
1140
+ yield "data: [DONE]\n\n"
1141
+
1142
+ async def chat_completion_full_generator(
1143
+ self,
1144
+ request: ChatCompletionRequest,
1145
+ result_generator: AsyncIterator[RequestOutput],
1146
+ request_id: str,
1147
+ model_name: str,
1148
+ conversation: list[ConversationMessage],
1149
+ tokenizer: AnyTokenizer,
1150
+ request_metadata: RequestResponseMetadata,
1151
+ ) -> Union[ErrorResponse, ChatCompletionResponse]:
1152
+
1153
+ created_time = int(time.time())
1154
+ final_res: Optional[RequestOutput] = None
1155
+
1156
+ try:
1157
+ async for res in result_generator:
1158
+ final_res = res
1159
+ except asyncio.CancelledError:
1160
+ return self.create_error_response("Client disconnected")
1161
+ except ValueError as e:
1162
+ # TODO: Use a vllm-specific Validation Error
1163
+ return self.create_error_response(str(e))
1164
+
1165
+ assert final_res is not None
1166
+
1167
+ choices: list[ChatCompletionResponseChoice] = []
1168
+ if self.tool_call_id_type == 'kimi_k2':
1169
+ history_tool_call_cnt = get_history_tool_calls_cnt(conversation)
1170
+ else:
1171
+ history_tool_call_cnt = 0
1172
+
1173
+ role = self.get_chat_request_role(request)
1174
+ for output in final_res.outputs:
1175
+ token_ids = output.token_ids
1176
+ out_logprobs = output.logprobs
1177
+ tool_call_info = None
1178
+
1179
+ if request.logprobs and request.top_logprobs is not None:
1180
+ assert out_logprobs is not None, "Did not output logprobs"
1181
+ logprobs = self._create_chat_logprobs(
1182
+ token_ids=token_ids,
1183
+ top_logprobs=out_logprobs,
1184
+ num_output_top_logprobs=request.top_logprobs,
1185
+ tokenizer=tokenizer,
1186
+ return_as_token_id=request.return_tokens_as_token_ids,
1187
+ )
1188
+ else:
1189
+ logprobs = None
1190
+
1191
+ if self.use_harmony:
1192
+ if self.tool_parser is not None:
1193
+ tool_parser = self.tool_parser(tokenizer)
1194
+ # NOTE: We use token_ids for openai tool parser
1195
+ tool_call_info = tool_parser.extract_tool_calls(
1196
+ "",
1197
+ request=request,
1198
+ token_ids=token_ids, # type: ignore
1199
+ )
1200
+ reasoning_content, content = None, tool_call_info.content
1201
+ if request.include_reasoning:
1202
+ reasoning_content, content, _ = parse_chat_output(
1203
+ token_ids)
1204
+ message = ChatMessage(
1205
+ role=role,
1206
+ reasoning_content=reasoning_content,
1207
+ content=content,
1208
+ tool_calls=tool_call_info.tool_calls,
1209
+ )
1210
+ else:
1211
+ reasoning_content, content, _ = parse_chat_output(
1212
+ token_ids)
1213
+ if not request.include_reasoning:
1214
+ reasoning_content = None
1215
+ message = ChatMessage(
1216
+ role=role,
1217
+ reasoning_content=reasoning_content,
1218
+ content=content,
1219
+ )
1220
+
1221
+ choice_data = ChatCompletionResponseChoice(
1222
+ index=output.index,
1223
+ message=message,
1224
+ logprobs=logprobs,
1225
+ finish_reason="tool_calls" if
1226
+ (tool_call_info is not None
1227
+ and tool_call_info.tools_called) else
1228
+ output.finish_reason if output.finish_reason else "stop",
1229
+ stop_reason=output.stop_reason,
1230
+ )
1231
+ choices.append(choice_data)
1232
+ continue
1233
+
1234
+ if self.reasoning_parser:
1235
+ try:
1236
+ reasoning_parser = self.reasoning_parser(tokenizer)
1237
+ except RuntimeError as e:
1238
+ logger.exception("Error in reasoning parser creation.")
1239
+ return self.create_error_response(str(e))
1240
+ # If the reasoning parser is enabled,
1241
+ # tool calls are extracted exclusively from the content.
1242
+ reasoning_content, content = (
1243
+ reasoning_parser.extract_reasoning_content(
1244
+ output.text, request=request))
1245
+ if not request.include_reasoning:
1246
+ reasoning_content = None
1247
+ else:
1248
+ reasoning_content = None
1249
+ content = output.text
1250
+
1251
+ auto_tools_called = False
1252
+ # if auto tools are not enabled, and a named tool choice using
1253
+ # outlines is not being used
1254
+ if (not self.enable_auto_tools or not self.tool_parser) and \
1255
+ (not isinstance(request.tool_choice,
1256
+ ChatCompletionNamedToolChoiceParam
1257
+ ) and request.tool_choice != "required"):
1258
+ message = ChatMessage(role=role,
1259
+ reasoning_content=reasoning_content,
1260
+ content=content)
1261
+
1262
+ # if the request uses tools and specified a tool choice
1263
+ elif request.tool_choice and type(
1264
+ request.tool_choice) is ChatCompletionNamedToolChoiceParam:
1265
+
1266
+ tool_call_class = MistralToolCall if isinstance(
1267
+ tokenizer, MistralTokenizer) else ToolCall
1268
+ message = ChatMessage(
1269
+ role=role,
1270
+ reasoning_content=reasoning_content,
1271
+ content="",
1272
+ tool_calls=[
1273
+ tool_call_class(function=FunctionCall(
1274
+ name=request.tool_choice.function.name,
1275
+ arguments=content,
1276
+ ))
1277
+ ],
1278
+ )
1279
+
1280
+ elif request.tool_choice and request.tool_choice == "required":
1281
+ tool_call_class = MistralToolCall if isinstance(
1282
+ tokenizer, MistralTokenizer) else ToolCall
1283
+
1284
+ # the fields of FunctionDefinition are a superset of the
1285
+ # tool call outputs and can be used for parsing
1286
+ assert content is not None
1287
+ tool_calls = TypeAdapter(
1288
+ list[FunctionDefinition]).validate_json(content)
1289
+ tool_call_ids = []
1290
+ for tool_call in tool_calls:
1291
+ tool_call_ids.append(
1292
+ make_tool_call_id(id_type=self.tool_call_id_type,
1293
+ func_name=tool_call.name,
1294
+ idx=history_tool_call_cnt))
1295
+ history_tool_call_cnt += 1
1296
+ message = ChatMessage(
1297
+ role=role,
1298
+ content="",
1299
+ tool_calls=[
1300
+ tool_call_class(id=tool_call_ids[i],
1301
+ function=FunctionCall(
1302
+ name=tool_call.name,
1303
+ arguments=json.dumps(
1304
+ tool_call.parameters,
1305
+ ensure_ascii=False)))
1306
+ for i, tool_call in enumerate(tool_calls)
1307
+ ],
1308
+ reasoning_content=reasoning_content)
1309
+
1310
+ # if the request doesn't use tool choice
1311
+ # OR specifies to not use a tool
1312
+ elif not request.tool_choice or request.tool_choice == "none":
1313
+
1314
+ message = ChatMessage(role=role,
1315
+ reasoning_content=reasoning_content,
1316
+ content=content)
1317
+
1318
+ # handle when there are tools and tool choice is auto
1319
+ elif request.tools and (
1320
+ request.tool_choice == "auto"
1321
+ or request.tool_choice is None) and self.enable_auto_tools \
1322
+ and self.tool_parser:
1323
+
1324
+ try:
1325
+ tool_parser = self.tool_parser(tokenizer)
1326
+ except RuntimeError as e:
1327
+ logger.exception("Error in tool parser creation.")
1328
+ return self.create_error_response(str(e))
1329
+
1330
+ tool_call_info = tool_parser.extract_tool_calls(
1331
+ content if content is not None else "", request=request)
1332
+ # In the OpenAI API the finish_reason is "tools_called"
1333
+ # if the tool choice is auto and the model produced a tool
1334
+ # call. The same is not true for named function calls
1335
+ auto_tools_called = tool_call_info.tools_called
1336
+ if tool_call_info.tools_called:
1337
+ message = ChatMessage(role=role,
1338
+ reasoning_content=reasoning_content,
1339
+ content=tool_call_info.content,
1340
+ tool_calls=tool_call_info.tool_calls)
1341
+
1342
+ else:
1343
+ # FOR NOW make it a chat message; we will have to detect
1344
+ # the type to make it later.
1345
+ ret_content = content
1346
+
1347
+ # try to use content return from tool parser first,
1348
+ # tool parser may do some modify for the content.
1349
+ if (tool_call_info.content
1350
+ and len(tool_call_info.content) > 0):
1351
+ ret_content = tool_call_info.content
1352
+ message = ChatMessage(role=role,
1353
+ reasoning_content=reasoning_content,
1354
+ content=ret_content)
1355
+
1356
+ # undetermined case that is still important to handle
1357
+ else:
1358
+ logger.error(
1359
+ "Error in chat_completion_full_generator - cannot determine"
1360
+ " if tools should be extracted. Returning a standard chat "
1361
+ "completion.")
1362
+ message = ChatMessage(role=role,
1363
+ reasoning_content=reasoning_content,
1364
+ content=content)
1365
+
1366
+ choice_data = ChatCompletionResponseChoice(
1367
+ index=output.index,
1368
+ message=message,
1369
+ logprobs=logprobs,
1370
+ finish_reason="tool_calls" if auto_tools_called else
1371
+ output.finish_reason if output.finish_reason else "stop",
1372
+ stop_reason=output.stop_reason,
1373
+ token_ids=(as_list(output.token_ids)
1374
+ if request.return_token_ids else None),
1375
+ )
1376
+
1377
+ choices.append(choice_data)
1378
+
1379
+ if request.echo:
1380
+ last_msg_content: Union[str, list[dict[str, str]]] = ""
1381
+ if (conversation and "content" in conversation[-1]
1382
+ and conversation[-1].get("role") == role):
1383
+ last_msg_content = conversation[-1]["content"] or ""
1384
+ if isinstance(last_msg_content, list):
1385
+ last_msg_content = "\n".join(msg['text']
1386
+ for msg in last_msg_content)
1387
+
1388
+ for choice in choices:
1389
+ full_message = last_msg_content + (choice.message.content
1390
+ or "")
1391
+ choice.message.content = full_message
1392
+
1393
+ assert final_res.prompt_token_ids is not None
1394
+ num_prompt_tokens = len(final_res.prompt_token_ids)
1395
+ if final_res.encoder_prompt_token_ids is not None:
1396
+ num_prompt_tokens += len(final_res.encoder_prompt_token_ids)
1397
+ num_generated_tokens = sum(
1398
+ len(output.token_ids) for output in final_res.outputs)
1399
+ usage = UsageInfo(prompt_tokens=num_prompt_tokens,
1400
+ completion_tokens=num_generated_tokens,
1401
+ total_tokens=num_prompt_tokens +
1402
+ num_generated_tokens)
1403
+ if self.enable_prompt_tokens_details and final_res.num_cached_tokens:
1404
+ usage.prompt_tokens_details = PromptTokenUsageInfo(
1405
+ cached_tokens=final_res.num_cached_tokens)
1406
+
1407
+ request_metadata.final_usage_info = usage
1408
+
1409
+ response = ChatCompletionResponse(
1410
+ id=request_id,
1411
+ created=created_time,
1412
+ model=model_name,
1413
+ choices=choices,
1414
+ usage=usage,
1415
+ prompt_logprobs=clamp_prompt_logprobs(final_res.prompt_logprobs),
1416
+ prompt_token_ids=(final_res.prompt_token_ids
1417
+ if request.return_token_ids else None),
1418
+ kv_transfer_params=final_res.kv_transfer_params,
1419
+ )
1420
+
1421
+ # Log complete response if output logging is enabled
1422
+ if self.enable_log_outputs and self.request_logger:
1423
+ for choice in choices:
1424
+ output_text = ""
1425
+ if choice.message.content:
1426
+ output_text = choice.message.content
1427
+ elif choice.message.tool_calls:
1428
+ # For tool calls, log the function name and arguments
1429
+ tool_call_descriptions = []
1430
+ for tc in choice.message.tool_calls:
1431
+ if hasattr(tc.function, "name") and hasattr(
1432
+ tc.function, "arguments"):
1433
+ tool_call_descriptions.append(
1434
+ f"{tc.function.name}({tc.function.arguments})")
1435
+ tool_calls_str = ", ".join(tool_call_descriptions)
1436
+ output_text = f"[tool_calls: {tool_calls_str}]"
1437
+
1438
+ if output_text:
1439
+ # Get the corresponding output token IDs
1440
+ output_token_ids = None
1441
+ if choice.index < len(final_res.outputs):
1442
+ output_token_ids = final_res.outputs[
1443
+ choice.index].token_ids
1444
+
1445
+ self.request_logger.log_outputs(
1446
+ request_id=request_id,
1447
+ outputs=output_text,
1448
+ output_token_ids=output_token_ids,
1449
+ finish_reason=choice.finish_reason,
1450
+ is_streaming=False,
1451
+ delta=False,
1452
+ )
1453
+
1454
+ return response
1455
+
1456
+ def _get_top_logprobs(
1457
+ self, logprobs: dict[int, Logprob], top_logprobs: Optional[int],
1458
+ tokenizer: AnyTokenizer,
1459
+ should_return_as_token_id: bool) -> list[ChatCompletionLogProb]:
1460
+ return [
1461
+ ChatCompletionLogProb(
1462
+ token=(token := self._get_decoded_token(
1463
+ p[1],
1464
+ p[0],
1465
+ tokenizer,
1466
+ return_as_token_id=should_return_as_token_id,
1467
+ )),
1468
+ logprob=max(p[1].logprob, -9999.0),
1469
+ bytes=list(token.encode("utf-8", errors="replace")),
1470
+ ) for i, p in enumerate(logprobs.items())
1471
+ if top_logprobs and i < top_logprobs
1472
+ ]
1473
+
1474
+ def _create_chat_logprobs(
1475
+ self,
1476
+ token_ids: GenericSequence[int],
1477
+ top_logprobs: GenericSequence[Optional[dict[int, Logprob]]],
1478
+ tokenizer: AnyTokenizer,
1479
+ num_output_top_logprobs: Optional[int] = None,
1480
+ return_as_token_id: Optional[bool] = None,
1481
+ ) -> ChatCompletionLogProbs:
1482
+ """Create OpenAI-style logprobs."""
1483
+ logprobs_content: list[ChatCompletionLogProbsContent] = []
1484
+
1485
+ should_return_as_token_id = return_as_token_id if \
1486
+ return_as_token_id is not None else self.return_tokens_as_token_ids
1487
+ for i, token_id in enumerate(token_ids):
1488
+ step_top_logprobs = top_logprobs[i]
1489
+ if step_top_logprobs is None or step_top_logprobs.get(
1490
+ token_id) is None:
1491
+ if should_return_as_token_id:
1492
+ token = f"token_id:{token_id}"
1493
+ else:
1494
+ token = tokenizer.decode(token_id)
1495
+
1496
+ logprobs_content.append(
1497
+ ChatCompletionLogProbsContent(
1498
+ token=token,
1499
+ bytes=list(token.encode("utf-8", errors="replace")),
1500
+ ))
1501
+ else:
1502
+ step_token = step_top_logprobs[token_id]
1503
+ step_decoded = step_token.decoded_token
1504
+
1505
+ logprobs_content.append(
1506
+ ChatCompletionLogProbsContent(
1507
+ token=self._get_decoded_token(
1508
+ step_token,
1509
+ token_id,
1510
+ tokenizer,
1511
+ should_return_as_token_id,
1512
+ ),
1513
+ logprob=max(step_token.logprob, -9999.0),
1514
+ bytes=None if step_decoded is None else list(
1515
+ step_decoded.encode("utf-8", errors="replace")),
1516
+ top_logprobs=self._get_top_logprobs(
1517
+ step_top_logprobs, num_output_top_logprobs,
1518
+ tokenizer, should_return_as_token_id),
1519
+ ))
1520
+
1521
+ return ChatCompletionLogProbs(content=logprobs_content)
1522
+
1523
+ def _should_stream_with_auto_tool_parsing(self,
1524
+ request: ChatCompletionRequest):
1525
+ """
1526
+ Utility function to check if streamed tokens should go through the tool
1527
+ call parser that was configured.
1528
+
1529
+ We only want to do this IF user-provided tools are set, a tool parser
1530
+ is configured, "auto" tool choice is enabled, and the request's tool
1531
+ choice field indicates that "auto" tool choice should be used.
1532
+ """
1533
+ return (request.tools and self.tool_parser and self.enable_auto_tools
1534
+ and request.tool_choice in ['auto', None])
1535
+
1536
+ def _should_check_for_unstreamed_tool_arg_tokens(
1537
+ self,
1538
+ delta_message: Optional[DeltaMessage],
1539
+ output: CompletionOutput,
1540
+ ) -> bool:
1541
+ """
1542
+ Check to see if we should check for unstreamed tool arguments tokens.
1543
+ This is only applicable when auto tool parsing is enabled, the delta
1544
+ is a tool call with arguments.
1545
+ """
1546
+
1547
+ # yapf: disable
1548
+ return bool(
1549
+ # if there is a delta message that includes tool calls which
1550
+ # include a function that has arguments
1551
+ output.finish_reason is not None
1552
+ and self.enable_auto_tools and self.tool_parser and delta_message
1553
+ and delta_message.tool_calls and delta_message.tool_calls[0]
1554
+ and delta_message.tool_calls[0].function
1555
+ and delta_message.tool_calls[0].function.arguments is not None
1556
+ )
1557
+
1558
+ def _make_request_with_harmony(
1559
+ self,
1560
+ request: ChatCompletionRequest,
1561
+ ):
1562
+ messages: list[OpenAIMessage] = []
1563
+
1564
+ # Add system message.
1565
+ # NOTE: In Chat Completion API, browsing is enabled by default
1566
+ # if the model supports it. TODO: Support browsing.
1567
+ assert not self.supports_browsing
1568
+ assert not self.supports_code_interpreter
1569
+ sys_msg = get_system_message(
1570
+ reasoning_effort=request.reasoning_effort,
1571
+ browser_description=None,
1572
+ python_description=None)
1573
+ messages.append(sys_msg)
1574
+
1575
+ # Add developer message.
1576
+ dev_msg = get_developer_message(tools=request.tools)
1577
+ messages.append(dev_msg)
1578
+
1579
+ # Add user message.
1580
+ for chat_msg in request.messages:
1581
+ messages.extend(parse_chat_input(chat_msg))
1582
+
1583
+ # Render prompt token ids.
1584
+ prompt_token_ids = render_for_completion(messages)
1585
+ engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids)
1586
+
1587
+ # Add cache_salt if provided in the request
1588
+ if request.cache_salt is not None:
1589
+ engine_prompt["cache_salt"] = request.cache_salt
1590
+
1591
+ return messages, [prompt_token_ids], [engine_prompt]