vllm-cpu-avx512bf16 0.14.0__cp313-cp313-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1712) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +1511 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +3206 -0
  6. vllm/_ipex_ops.py +445 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +62 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +0 -0
  14. vllm/attention/layer.py +913 -0
  15. vllm/attention/utils/__init__.py +0 -0
  16. vllm/attention/utils/kv_sharing_utils.py +33 -0
  17. vllm/attention/utils/kv_transfer_utils.py +60 -0
  18. vllm/beam_search.py +88 -0
  19. vllm/benchmarks/__init__.py +0 -0
  20. vllm/benchmarks/datasets.py +3277 -0
  21. vllm/benchmarks/latency.py +172 -0
  22. vllm/benchmarks/lib/__init__.py +3 -0
  23. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  24. vllm/benchmarks/lib/ready_checker.py +72 -0
  25. vllm/benchmarks/lib/utils.py +79 -0
  26. vllm/benchmarks/mm_processor.py +363 -0
  27. vllm/benchmarks/serve.py +1761 -0
  28. vllm/benchmarks/startup.py +321 -0
  29. vllm/benchmarks/sweep/__init__.py +0 -0
  30. vllm/benchmarks/sweep/cli.py +41 -0
  31. vllm/benchmarks/sweep/param_sweep.py +159 -0
  32. vllm/benchmarks/sweep/plot.py +675 -0
  33. vllm/benchmarks/sweep/plot_pareto.py +393 -0
  34. vllm/benchmarks/sweep/serve.py +450 -0
  35. vllm/benchmarks/sweep/serve_sla.py +459 -0
  36. vllm/benchmarks/sweep/server.py +114 -0
  37. vllm/benchmarks/sweep/sla_sweep.py +138 -0
  38. vllm/benchmarks/sweep/utils.py +4 -0
  39. vllm/benchmarks/throughput.py +946 -0
  40. vllm/collect_env.py +857 -0
  41. vllm/compilation/__init__.py +0 -0
  42. vllm/compilation/activation_quant_fusion.py +214 -0
  43. vllm/compilation/backends.py +840 -0
  44. vllm/compilation/base_static_graph.py +57 -0
  45. vllm/compilation/caching.py +196 -0
  46. vllm/compilation/collective_fusion.py +1224 -0
  47. vllm/compilation/compiler_interface.py +639 -0
  48. vllm/compilation/counter.py +50 -0
  49. vllm/compilation/cuda_graph.py +309 -0
  50. vllm/compilation/decorators.py +662 -0
  51. vllm/compilation/fix_functionalization.py +266 -0
  52. vllm/compilation/fusion.py +570 -0
  53. vllm/compilation/fusion_attn.py +363 -0
  54. vllm/compilation/fx_utils.py +92 -0
  55. vllm/compilation/inductor_pass.py +145 -0
  56. vllm/compilation/matcher_utils.py +454 -0
  57. vllm/compilation/monitor.py +62 -0
  58. vllm/compilation/noop_elimination.py +130 -0
  59. vllm/compilation/partition_rules.py +75 -0
  60. vllm/compilation/pass_manager.py +164 -0
  61. vllm/compilation/piecewise_backend.py +191 -0
  62. vllm/compilation/post_cleanup.py +21 -0
  63. vllm/compilation/qk_norm_rope_fusion.py +244 -0
  64. vllm/compilation/rocm_aiter_fusion.py +401 -0
  65. vllm/compilation/sequence_parallelism.py +368 -0
  66. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  67. vllm/compilation/vllm_inductor_pass.py +180 -0
  68. vllm/compilation/wrapper.py +329 -0
  69. vllm/config/__init__.py +112 -0
  70. vllm/config/attention.py +114 -0
  71. vllm/config/cache.py +233 -0
  72. vllm/config/compilation.py +1149 -0
  73. vllm/config/device.py +75 -0
  74. vllm/config/ec_transfer.py +110 -0
  75. vllm/config/kv_events.py +56 -0
  76. vllm/config/kv_transfer.py +119 -0
  77. vllm/config/load.py +124 -0
  78. vllm/config/lora.py +102 -0
  79. vllm/config/model.py +2026 -0
  80. vllm/config/model_arch.py +57 -0
  81. vllm/config/multimodal.py +247 -0
  82. vllm/config/observability.py +157 -0
  83. vllm/config/parallel.py +703 -0
  84. vllm/config/pooler.py +188 -0
  85. vllm/config/profiler.py +199 -0
  86. vllm/config/scheduler.py +298 -0
  87. vllm/config/speculative.py +656 -0
  88. vllm/config/speech_to_text.py +39 -0
  89. vllm/config/structured_outputs.py +78 -0
  90. vllm/config/utils.py +374 -0
  91. vllm/config/vllm.py +1487 -0
  92. vllm/connections.py +189 -0
  93. vllm/device_allocator/__init__.py +0 -0
  94. vllm/device_allocator/cumem.py +301 -0
  95. vllm/distributed/__init__.py +6 -0
  96. vllm/distributed/communication_op.py +43 -0
  97. vllm/distributed/device_communicators/__init__.py +0 -0
  98. vllm/distributed/device_communicators/all2all.py +509 -0
  99. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  100. vllm/distributed/device_communicators/base_device_communicator.py +303 -0
  101. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  102. vllm/distributed/device_communicators/cuda_communicator.py +346 -0
  103. vllm/distributed/device_communicators/cuda_wrapper.py +190 -0
  104. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  105. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  106. vllm/distributed/device_communicators/pynccl.py +386 -0
  107. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  108. vllm/distributed/device_communicators/pynccl_wrapper.py +567 -0
  109. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  110. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  111. vllm/distributed/device_communicators/shm_broadcast.py +778 -0
  112. vllm/distributed/device_communicators/shm_object_storage.py +697 -0
  113. vllm/distributed/device_communicators/symm_mem.py +156 -0
  114. vllm/distributed/device_communicators/xpu_communicator.py +98 -0
  115. vllm/distributed/ec_transfer/__init__.py +14 -0
  116. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  117. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  118. vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
  119. vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
  120. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  121. vllm/distributed/eplb/__init__.py +3 -0
  122. vllm/distributed/eplb/async_worker.py +115 -0
  123. vllm/distributed/eplb/eplb_state.py +1192 -0
  124. vllm/distributed/eplb/policy/__init__.py +19 -0
  125. vllm/distributed/eplb/policy/abstract.py +43 -0
  126. vllm/distributed/eplb/policy/default.py +376 -0
  127. vllm/distributed/eplb/rebalance_execute.py +699 -0
  128. vllm/distributed/kv_events.py +505 -0
  129. vllm/distributed/kv_transfer/README.md +29 -0
  130. vllm/distributed/kv_transfer/__init__.py +20 -0
  131. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  132. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  133. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  134. vllm/distributed/kv_transfer/kv_connector/factory.py +203 -0
  135. vllm/distributed/kv_transfer/kv_connector/utils.py +459 -0
  136. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  137. vllm/distributed/kv_transfer/kv_connector/v1/base.py +607 -0
  138. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  139. vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
  140. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +344 -0
  141. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  142. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +395 -0
  143. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +211 -0
  144. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1431 -0
  145. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +941 -0
  146. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
  147. vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +916 -0
  148. vllm/distributed/kv_transfer/kv_connector/v1/moriio/__init__.py +0 -0
  149. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py +321 -0
  150. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py +1515 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py +609 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +477 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2688 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +557 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  159. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  160. vllm/distributed/parallel_state.py +1809 -0
  161. vllm/distributed/utils.py +545 -0
  162. vllm/engine/__init__.py +0 -0
  163. vllm/engine/arg_utils.py +2137 -0
  164. vllm/engine/async_llm_engine.py +6 -0
  165. vllm/engine/llm_engine.py +6 -0
  166. vllm/engine/protocol.py +194 -0
  167. vllm/entrypoints/__init__.py +0 -0
  168. vllm/entrypoints/anthropic/__init__.py +0 -0
  169. vllm/entrypoints/anthropic/protocol.py +162 -0
  170. vllm/entrypoints/anthropic/serving_messages.py +468 -0
  171. vllm/entrypoints/api_server.py +186 -0
  172. vllm/entrypoints/chat_utils.py +1912 -0
  173. vllm/entrypoints/cli/__init__.py +19 -0
  174. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  175. vllm/entrypoints/cli/benchmark/base.py +25 -0
  176. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  177. vllm/entrypoints/cli/benchmark/main.py +57 -0
  178. vllm/entrypoints/cli/benchmark/mm_processor.py +21 -0
  179. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  180. vllm/entrypoints/cli/benchmark/startup.py +21 -0
  181. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  182. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  183. vllm/entrypoints/cli/collect_env.py +38 -0
  184. vllm/entrypoints/cli/main.py +79 -0
  185. vllm/entrypoints/cli/openai.py +260 -0
  186. vllm/entrypoints/cli/run_batch.py +68 -0
  187. vllm/entrypoints/cli/serve.py +253 -0
  188. vllm/entrypoints/cli/types.py +29 -0
  189. vllm/entrypoints/constants.py +12 -0
  190. vllm/entrypoints/context.py +898 -0
  191. vllm/entrypoints/grpc_server.py +531 -0
  192. vllm/entrypoints/launcher.py +175 -0
  193. vllm/entrypoints/llm.py +1807 -0
  194. vllm/entrypoints/logger.py +86 -0
  195. vllm/entrypoints/openai/__init__.py +0 -0
  196. vllm/entrypoints/openai/api_server.py +1390 -0
  197. vllm/entrypoints/openai/cli_args.py +320 -0
  198. vllm/entrypoints/openai/orca_metrics.py +120 -0
  199. vllm/entrypoints/openai/parser/__init__.py +0 -0
  200. vllm/entrypoints/openai/parser/harmony_utils.py +820 -0
  201. vllm/entrypoints/openai/parser/responses_parser.py +176 -0
  202. vllm/entrypoints/openai/protocol.py +2566 -0
  203. vllm/entrypoints/openai/run_batch.py +635 -0
  204. vllm/entrypoints/openai/serving_chat.py +1897 -0
  205. vllm/entrypoints/openai/serving_chat_stream_harmony.py +101 -0
  206. vllm/entrypoints/openai/serving_completion.py +740 -0
  207. vllm/entrypoints/openai/serving_engine.py +1612 -0
  208. vllm/entrypoints/openai/serving_models.py +309 -0
  209. vllm/entrypoints/openai/serving_responses.py +2552 -0
  210. vllm/entrypoints/openai/serving_transcription.py +168 -0
  211. vllm/entrypoints/openai/speech_to_text.py +711 -0
  212. vllm/entrypoints/openai/utils.py +49 -0
  213. vllm/entrypoints/pooling/__init__.py +16 -0
  214. vllm/entrypoints/pooling/classify/__init__.py +0 -0
  215. vllm/entrypoints/pooling/classify/api_router.py +48 -0
  216. vllm/entrypoints/pooling/classify/protocol.py +181 -0
  217. vllm/entrypoints/pooling/classify/serving.py +233 -0
  218. vllm/entrypoints/pooling/embed/__init__.py +0 -0
  219. vllm/entrypoints/pooling/embed/api_router.py +65 -0
  220. vllm/entrypoints/pooling/embed/conftest.py +28 -0
  221. vllm/entrypoints/pooling/embed/protocol.py +217 -0
  222. vllm/entrypoints/pooling/embed/serving.py +684 -0
  223. vllm/entrypoints/pooling/pooling/__init__.py +0 -0
  224. vllm/entrypoints/pooling/pooling/api_router.py +62 -0
  225. vllm/entrypoints/pooling/pooling/protocol.py +146 -0
  226. vllm/entrypoints/pooling/pooling/serving.py +354 -0
  227. vllm/entrypoints/pooling/score/__init__.py +0 -0
  228. vllm/entrypoints/pooling/score/api_router.py +147 -0
  229. vllm/entrypoints/pooling/score/protocol.py +146 -0
  230. vllm/entrypoints/pooling/score/serving.py +511 -0
  231. vllm/entrypoints/renderer.py +411 -0
  232. vllm/entrypoints/responses_utils.py +218 -0
  233. vllm/entrypoints/sagemaker/__init__.py +4 -0
  234. vllm/entrypoints/sagemaker/routes.py +118 -0
  235. vllm/entrypoints/score_utils.py +271 -0
  236. vllm/entrypoints/serve/__init__.py +94 -0
  237. vllm/entrypoints/serve/cache/__init__.py +0 -0
  238. vllm/entrypoints/serve/cache/api_router.py +61 -0
  239. vllm/entrypoints/serve/disagg/__init__.py +0 -0
  240. vllm/entrypoints/serve/disagg/api_router.py +109 -0
  241. vllm/entrypoints/serve/disagg/protocol.py +90 -0
  242. vllm/entrypoints/serve/disagg/serving.py +285 -0
  243. vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
  244. vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
  245. vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
  246. vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
  247. vllm/entrypoints/serve/instrumentator/health.py +33 -0
  248. vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
  249. vllm/entrypoints/serve/instrumentator/offline_docs.py +50 -0
  250. vllm/entrypoints/serve/instrumentator/server_info.py +56 -0
  251. vllm/entrypoints/serve/instrumentator/static/swagger-ui-bundle.js +2 -0
  252. vllm/entrypoints/serve/instrumentator/static/swagger-ui.css +3 -0
  253. vllm/entrypoints/serve/lora/__init__.py +0 -0
  254. vllm/entrypoints/serve/lora/api_router.py +70 -0
  255. vllm/entrypoints/serve/profile/__init__.py +0 -0
  256. vllm/entrypoints/serve/profile/api_router.py +46 -0
  257. vllm/entrypoints/serve/rlhf/__init__.py +0 -0
  258. vllm/entrypoints/serve/rlhf/api_router.py +102 -0
  259. vllm/entrypoints/serve/rpc/__init__.py +0 -0
  260. vllm/entrypoints/serve/rpc/api_router.py +61 -0
  261. vllm/entrypoints/serve/sleep/__init__.py +0 -0
  262. vllm/entrypoints/serve/sleep/api_router.py +56 -0
  263. vllm/entrypoints/serve/tokenize/__init__.py +0 -0
  264. vllm/entrypoints/serve/tokenize/api_router.py +112 -0
  265. vllm/entrypoints/serve/tokenize/serving.py +204 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +187 -0
  268. vllm/entrypoints/tool_server.py +234 -0
  269. vllm/entrypoints/utils.py +336 -0
  270. vllm/env_override.py +402 -0
  271. vllm/envs.py +1791 -0
  272. vllm/exceptions.py +36 -0
  273. vllm/forward_context.py +375 -0
  274. vllm/grpc/__init__.py +17 -0
  275. vllm/grpc/compile_protos.py +94 -0
  276. vllm/grpc/vllm_engine.proto +195 -0
  277. vllm/grpc/vllm_engine_pb2.py +77 -0
  278. vllm/grpc/vllm_engine_pb2.pyi +213 -0
  279. vllm/grpc/vllm_engine_pb2_grpc.py +330 -0
  280. vllm/inputs/__init__.py +44 -0
  281. vllm/inputs/data.py +359 -0
  282. vllm/inputs/parse.py +147 -0
  283. vllm/inputs/preprocess.py +716 -0
  284. vllm/logger.py +303 -0
  285. vllm/logging_utils/__init__.py +13 -0
  286. vllm/logging_utils/dump_input.py +83 -0
  287. vllm/logging_utils/formatter.py +127 -0
  288. vllm/logging_utils/lazy.py +20 -0
  289. vllm/logging_utils/log_time.py +34 -0
  290. vllm/logits_process.py +121 -0
  291. vllm/logprobs.py +206 -0
  292. vllm/lora/__init__.py +0 -0
  293. vllm/lora/layers/__init__.py +43 -0
  294. vllm/lora/layers/base.py +66 -0
  295. vllm/lora/layers/base_linear.py +172 -0
  296. vllm/lora/layers/column_parallel_linear.py +577 -0
  297. vllm/lora/layers/fused_moe.py +739 -0
  298. vllm/lora/layers/logits_processor.py +203 -0
  299. vllm/lora/layers/replicated_linear.py +70 -0
  300. vllm/lora/layers/row_parallel_linear.py +176 -0
  301. vllm/lora/layers/utils.py +115 -0
  302. vllm/lora/layers/vocal_parallel_embedding.py +140 -0
  303. vllm/lora/lora_model.py +221 -0
  304. vllm/lora/lora_weights.py +227 -0
  305. vllm/lora/model_manager.py +858 -0
  306. vllm/lora/ops/__init__.py +0 -0
  307. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  308. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  309. vllm/lora/ops/torch_ops/__init__.py +20 -0
  310. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  311. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  312. vllm/lora/ops/triton_ops/__init__.py +21 -0
  313. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +677 -0
  314. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  315. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  316. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  317. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  318. vllm/lora/ops/triton_ops/utils.py +313 -0
  319. vllm/lora/peft_helper.py +128 -0
  320. vllm/lora/punica_wrapper/__init__.py +10 -0
  321. vllm/lora/punica_wrapper/punica_base.py +493 -0
  322. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  323. vllm/lora/punica_wrapper/punica_gpu.py +413 -0
  324. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  325. vllm/lora/punica_wrapper/punica_xpu.py +276 -0
  326. vllm/lora/punica_wrapper/utils.py +150 -0
  327. vllm/lora/request.py +60 -0
  328. vllm/lora/resolver.py +88 -0
  329. vllm/lora/utils.py +281 -0
  330. vllm/lora/worker_manager.py +278 -0
  331. vllm/model_executor/__init__.py +9 -0
  332. vllm/model_executor/custom_op.py +203 -0
  333. vllm/model_executor/layers/__init__.py +0 -0
  334. vllm/model_executor/layers/activation.py +628 -0
  335. vllm/model_executor/layers/attention/__init__.py +0 -0
  336. vllm/model_executor/layers/attention/chunked_local_attention.py +130 -0
  337. vllm/model_executor/layers/attention/cross_attention.py +182 -0
  338. vllm/model_executor/layers/attention/encoder_only_attention.py +103 -0
  339. vllm/model_executor/layers/attention/mm_encoder_attention.py +234 -0
  340. vllm/model_executor/layers/attention/static_sink_attention.py +254 -0
  341. vllm/model_executor/layers/attention_layer_base.py +34 -0
  342. vllm/model_executor/layers/batch_invariant.py +1063 -0
  343. vllm/model_executor/layers/conv.py +262 -0
  344. vllm/model_executor/layers/fla/__init__.py +8 -0
  345. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  346. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  347. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  348. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  349. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  350. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  351. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  352. vllm/model_executor/layers/fla/ops/index.py +41 -0
  353. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  354. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  355. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  356. vllm/model_executor/layers/fla/ops/op.py +60 -0
  357. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  358. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  359. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  360. vllm/model_executor/layers/fused_moe/__init__.py +120 -0
  361. vllm/model_executor/layers/fused_moe/all2all_utils.py +173 -0
  362. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +411 -0
  363. vllm/model_executor/layers/fused_moe/config.py +1111 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200.json +147 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=129,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=160,N=768,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=64,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  625. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  626. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  627. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  628. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  629. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  630. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  631. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  632. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  633. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  634. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  635. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  636. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  637. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  638. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  639. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  640. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  641. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  642. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  643. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  644. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  645. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  646. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  647. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  648. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  649. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  650. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  651. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +444 -0
  652. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1086 -0
  653. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +364 -0
  654. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
  655. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  656. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +436 -0
  657. vllm/model_executor/layers/fused_moe/fallback.py +127 -0
  658. vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +338 -0
  659. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +310 -0
  660. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +371 -0
  661. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  662. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1018 -0
  663. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +824 -0
  664. vllm/model_executor/layers/fused_moe/fused_moe.py +2638 -0
  665. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +119 -0
  666. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +117 -0
  667. vllm/model_executor/layers/fused_moe/fused_moe_router.py +40 -0
  668. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +531 -0
  669. vllm/model_executor/layers/fused_moe/layer.py +2169 -0
  670. vllm/model_executor/layers/fused_moe/modular_kernel.py +1251 -0
  671. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
  672. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  673. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  674. vllm/model_executor/layers/fused_moe/oracle/__init__.py +2 -0
  675. vllm/model_executor/layers/fused_moe/oracle/fp8.py +358 -0
  676. vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +280 -0
  677. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  678. vllm/model_executor/layers/fused_moe/prepare_finalize.py +87 -0
  679. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +347 -0
  680. vllm/model_executor/layers/fused_moe/routed_experts_capturer.py +324 -0
  681. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  682. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
  683. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  684. vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py +78 -0
  685. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +75 -0
  686. vllm/model_executor/layers/fused_moe/trtllm_moe.py +144 -0
  687. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +403 -0
  688. vllm/model_executor/layers/fused_moe/utils.py +382 -0
  689. vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py +189 -0
  690. vllm/model_executor/layers/kda.py +442 -0
  691. vllm/model_executor/layers/layernorm.py +451 -0
  692. vllm/model_executor/layers/lightning_attn.py +735 -0
  693. vllm/model_executor/layers/linear.py +1478 -0
  694. vllm/model_executor/layers/logits_processor.py +109 -0
  695. vllm/model_executor/layers/mamba/__init__.py +0 -0
  696. vllm/model_executor/layers/mamba/abstract.py +68 -0
  697. vllm/model_executor/layers/mamba/linear_attn.py +410 -0
  698. vllm/model_executor/layers/mamba/mamba_mixer.py +541 -0
  699. vllm/model_executor/layers/mamba/mamba_mixer2.py +936 -0
  700. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  701. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  702. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  703. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  704. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
  705. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  706. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  707. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  708. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  709. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  710. vllm/model_executor/layers/mamba/short_conv.py +254 -0
  711. vllm/model_executor/layers/mla.py +179 -0
  712. vllm/model_executor/layers/pooler/__init__.py +5 -0
  713. vllm/model_executor/layers/pooler/abstract.py +39 -0
  714. vllm/model_executor/layers/pooler/activations.py +162 -0
  715. vllm/model_executor/layers/pooler/common.py +32 -0
  716. vllm/model_executor/layers/pooler/seqwise/__init__.py +45 -0
  717. vllm/model_executor/layers/pooler/seqwise/heads.py +151 -0
  718. vllm/model_executor/layers/pooler/seqwise/methods.py +93 -0
  719. vllm/model_executor/layers/pooler/seqwise/poolers.py +127 -0
  720. vllm/model_executor/layers/pooler/special.py +128 -0
  721. vllm/model_executor/layers/pooler/tokwise/__init__.py +39 -0
  722. vllm/model_executor/layers/pooler/tokwise/heads.py +133 -0
  723. vllm/model_executor/layers/pooler/tokwise/methods.py +122 -0
  724. vllm/model_executor/layers/pooler/tokwise/poolers.py +127 -0
  725. vllm/model_executor/layers/quantization/__init__.py +195 -0
  726. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  727. vllm/model_executor/layers/quantization/awq.py +277 -0
  728. vllm/model_executor/layers/quantization/awq_marlin.py +795 -0
  729. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  730. vllm/model_executor/layers/quantization/base_config.py +170 -0
  731. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  732. vllm/model_executor/layers/quantization/bitsandbytes.py +631 -0
  733. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  734. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +982 -0
  735. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2368 -0
  736. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +37 -0
  737. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  738. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  739. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  740. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py +106 -0
  741. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  742. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  743. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
  744. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  745. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  746. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +203 -0
  747. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  748. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
  749. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  750. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  751. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  752. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  753. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  754. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  755. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  756. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  757. vllm/model_executor/layers/quantization/cpu_wna16.py +299 -0
  758. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  759. vllm/model_executor/layers/quantization/experts_int8.py +209 -0
  760. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  761. vllm/model_executor/layers/quantization/fp8.py +1224 -0
  762. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  763. vllm/model_executor/layers/quantization/gguf.py +682 -0
  764. vllm/model_executor/layers/quantization/gptq.py +393 -0
  765. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  766. vllm/model_executor/layers/quantization/gptq_marlin.py +934 -0
  767. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  768. vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
  769. vllm/model_executor/layers/quantization/inc.py +65 -0
  770. vllm/model_executor/layers/quantization/input_quant_fp8.py +212 -0
  771. vllm/model_executor/layers/quantization/ipex_quant.py +403 -0
  772. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  773. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  774. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +113 -0
  775. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  776. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  777. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  778. vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py +126 -0
  779. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
  780. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  781. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +168 -0
  782. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  783. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
  784. vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
  785. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
  786. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +77 -0
  787. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
  788. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
  789. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
  790. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +88 -0
  791. vllm/model_executor/layers/quantization/kv_cache.py +153 -0
  792. vllm/model_executor/layers/quantization/modelopt.py +1665 -0
  793. vllm/model_executor/layers/quantization/moe_wna16.py +518 -0
  794. vllm/model_executor/layers/quantization/mxfp4.py +1145 -0
  795. vllm/model_executor/layers/quantization/petit.py +319 -0
  796. vllm/model_executor/layers/quantization/ptpc_fp8.py +140 -0
  797. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  798. vllm/model_executor/layers/quantization/quark/quark.py +570 -0
  799. vllm/model_executor/layers/quantization/quark/quark_moe.py +797 -0
  800. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  801. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
  802. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  803. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  804. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  805. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  806. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  807. vllm/model_executor/layers/quantization/rtn.py +626 -0
  808. vllm/model_executor/layers/quantization/schema.py +90 -0
  809. vllm/model_executor/layers/quantization/torchao.py +380 -0
  810. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  811. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  812. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  976. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  977. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  978. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  979. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  980. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  981. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  982. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  983. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  984. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  985. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  986. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  987. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  988. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  989. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  990. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  991. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  992. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  993. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  994. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  995. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  996. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  997. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  998. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  999. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1000. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1001. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1002. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1003. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1004. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1005. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1006. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1007. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1008. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1009. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1010. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1011. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1012. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1013. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1014. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1015. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1016. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1017. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1018. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  1019. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  1020. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  1021. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1022. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1023. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1024. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1025. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1026. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  1027. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  1028. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +514 -0
  1029. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +370 -0
  1030. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1658 -0
  1031. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  1032. vllm/model_executor/layers/quantization/utils/int8_utils.py +477 -0
  1033. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  1034. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  1035. vllm/model_executor/layers/quantization/utils/marlin_utils.py +720 -0
  1036. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +565 -0
  1037. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
  1038. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
  1039. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  1040. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
  1041. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  1042. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  1043. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  1044. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
  1045. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  1046. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  1047. vllm/model_executor/layers/quantization/utils/quant_utils.py +767 -0
  1048. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
  1049. vllm/model_executor/layers/resampler.py +283 -0
  1050. vllm/model_executor/layers/rotary_embedding/__init__.py +291 -0
  1051. vllm/model_executor/layers/rotary_embedding/base.py +282 -0
  1052. vllm/model_executor/layers/rotary_embedding/common.py +289 -0
  1053. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +184 -0
  1054. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +218 -0
  1055. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1056. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1057. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
  1058. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1059. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1060. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +83 -0
  1061. vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
  1062. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1063. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1064. vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
  1065. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
  1066. vllm/model_executor/layers/utils.py +251 -0
  1067. vllm/model_executor/layers/vocab_parallel_embedding.py +564 -0
  1068. vllm/model_executor/model_loader/__init__.py +150 -0
  1069. vllm/model_executor/model_loader/base_loader.py +71 -0
  1070. vllm/model_executor/model_loader/bitsandbytes_loader.py +821 -0
  1071. vllm/model_executor/model_loader/default_loader.py +304 -0
  1072. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1073. vllm/model_executor/model_loader/gguf_loader.py +371 -0
  1074. vllm/model_executor/model_loader/online_quantization.py +275 -0
  1075. vllm/model_executor/model_loader/runai_streamer_loader.py +115 -0
  1076. vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
  1077. vllm/model_executor/model_loader/tensorizer.py +793 -0
  1078. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1079. vllm/model_executor/model_loader/utils.py +299 -0
  1080. vllm/model_executor/model_loader/weight_utils.py +1183 -0
  1081. vllm/model_executor/models/__init__.py +44 -0
  1082. vllm/model_executor/models/adapters.py +592 -0
  1083. vllm/model_executor/models/afmoe.py +697 -0
  1084. vllm/model_executor/models/aimv2.py +248 -0
  1085. vllm/model_executor/models/apertus.py +567 -0
  1086. vllm/model_executor/models/arcee.py +428 -0
  1087. vllm/model_executor/models/arctic.py +633 -0
  1088. vllm/model_executor/models/aria.py +663 -0
  1089. vllm/model_executor/models/audioflamingo3.py +639 -0
  1090. vllm/model_executor/models/aya_vision.py +448 -0
  1091. vllm/model_executor/models/bagel.py +591 -0
  1092. vllm/model_executor/models/baichuan.py +493 -0
  1093. vllm/model_executor/models/bailing_moe.py +643 -0
  1094. vllm/model_executor/models/bamba.py +511 -0
  1095. vllm/model_executor/models/bee.py +157 -0
  1096. vllm/model_executor/models/bert.py +911 -0
  1097. vllm/model_executor/models/bert_with_rope.py +729 -0
  1098. vllm/model_executor/models/blip.py +350 -0
  1099. vllm/model_executor/models/blip2.py +736 -0
  1100. vllm/model_executor/models/bloom.py +390 -0
  1101. vllm/model_executor/models/chameleon.py +1095 -0
  1102. vllm/model_executor/models/chatglm.py +502 -0
  1103. vllm/model_executor/models/clip.py +1045 -0
  1104. vllm/model_executor/models/cohere2_vision.py +470 -0
  1105. vllm/model_executor/models/commandr.py +469 -0
  1106. vllm/model_executor/models/config.py +571 -0
  1107. vllm/model_executor/models/dbrx.py +484 -0
  1108. vllm/model_executor/models/deepencoder.py +679 -0
  1109. vllm/model_executor/models/deepseek_eagle.py +253 -0
  1110. vllm/model_executor/models/deepseek_mtp.py +447 -0
  1111. vllm/model_executor/models/deepseek_ocr.py +601 -0
  1112. vllm/model_executor/models/deepseek_v2.py +1727 -0
  1113. vllm/model_executor/models/deepseek_vl2.py +642 -0
  1114. vllm/model_executor/models/dots1.py +566 -0
  1115. vllm/model_executor/models/dots_ocr.py +830 -0
  1116. vllm/model_executor/models/ernie45.py +53 -0
  1117. vllm/model_executor/models/ernie45_moe.py +755 -0
  1118. vllm/model_executor/models/ernie45_vl.py +1702 -0
  1119. vllm/model_executor/models/ernie45_vl_moe.py +801 -0
  1120. vllm/model_executor/models/ernie_mtp.py +278 -0
  1121. vllm/model_executor/models/exaone.py +524 -0
  1122. vllm/model_executor/models/exaone4.py +518 -0
  1123. vllm/model_executor/models/exaone_moe.py +579 -0
  1124. vllm/model_executor/models/exaone_moe_mtp.py +255 -0
  1125. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1126. vllm/model_executor/models/falcon.py +543 -0
  1127. vllm/model_executor/models/falcon_h1.py +675 -0
  1128. vllm/model_executor/models/flex_olmo.py +155 -0
  1129. vllm/model_executor/models/fuyu.py +371 -0
  1130. vllm/model_executor/models/gemma.py +425 -0
  1131. vllm/model_executor/models/gemma2.py +435 -0
  1132. vllm/model_executor/models/gemma3.py +520 -0
  1133. vllm/model_executor/models/gemma3_mm.py +664 -0
  1134. vllm/model_executor/models/gemma3n.py +1166 -0
  1135. vllm/model_executor/models/gemma3n_audio_utils.py +57 -0
  1136. vllm/model_executor/models/gemma3n_mm.py +820 -0
  1137. vllm/model_executor/models/glm.py +24 -0
  1138. vllm/model_executor/models/glm4.py +295 -0
  1139. vllm/model_executor/models/glm4_1v.py +1823 -0
  1140. vllm/model_executor/models/glm4_moe.py +725 -0
  1141. vllm/model_executor/models/glm4_moe_mtp.py +365 -0
  1142. vllm/model_executor/models/glm4v.py +783 -0
  1143. vllm/model_executor/models/glmasr.py +1154 -0
  1144. vllm/model_executor/models/glmasr_utils.py +188 -0
  1145. vllm/model_executor/models/gpt2.py +385 -0
  1146. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1147. vllm/model_executor/models/gpt_j.py +346 -0
  1148. vllm/model_executor/models/gpt_neox.py +340 -0
  1149. vllm/model_executor/models/gpt_oss.py +745 -0
  1150. vllm/model_executor/models/granite.py +475 -0
  1151. vllm/model_executor/models/granite_speech.py +919 -0
  1152. vllm/model_executor/models/granitemoe.py +561 -0
  1153. vllm/model_executor/models/granitemoehybrid.py +703 -0
  1154. vllm/model_executor/models/granitemoeshared.py +328 -0
  1155. vllm/model_executor/models/gritlm.py +242 -0
  1156. vllm/model_executor/models/grok1.py +803 -0
  1157. vllm/model_executor/models/h2ovl.py +554 -0
  1158. vllm/model_executor/models/hunyuan_v1.py +1042 -0
  1159. vllm/model_executor/models/hunyuan_vision.py +1034 -0
  1160. vllm/model_executor/models/hyperclovax_vision.py +1163 -0
  1161. vllm/model_executor/models/idefics2_vision_model.py +427 -0
  1162. vllm/model_executor/models/idefics3.py +734 -0
  1163. vllm/model_executor/models/interfaces.py +1180 -0
  1164. vllm/model_executor/models/interfaces_base.py +252 -0
  1165. vllm/model_executor/models/intern_vit.py +454 -0
  1166. vllm/model_executor/models/internlm2.py +451 -0
  1167. vllm/model_executor/models/internlm2_ve.py +139 -0
  1168. vllm/model_executor/models/interns1.py +828 -0
  1169. vllm/model_executor/models/interns1_vit.py +433 -0
  1170. vllm/model_executor/models/internvl.py +1436 -0
  1171. vllm/model_executor/models/iquest_loopcoder.py +595 -0
  1172. vllm/model_executor/models/isaac.py +1503 -0
  1173. vllm/model_executor/models/jais.py +397 -0
  1174. vllm/model_executor/models/jais2.py +508 -0
  1175. vllm/model_executor/models/jamba.py +599 -0
  1176. vllm/model_executor/models/jina_vl.py +145 -0
  1177. vllm/model_executor/models/kanana_v.py +756 -0
  1178. vllm/model_executor/models/keye.py +1709 -0
  1179. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1180. vllm/model_executor/models/kimi_linear.py +659 -0
  1181. vllm/model_executor/models/kimi_vl.py +577 -0
  1182. vllm/model_executor/models/lfm2.py +515 -0
  1183. vllm/model_executor/models/lfm2_moe.py +746 -0
  1184. vllm/model_executor/models/lfm2_vl.py +732 -0
  1185. vllm/model_executor/models/lightonocr.py +197 -0
  1186. vllm/model_executor/models/llama.py +724 -0
  1187. vllm/model_executor/models/llama4.py +860 -0
  1188. vllm/model_executor/models/llama4_eagle.py +225 -0
  1189. vllm/model_executor/models/llama_eagle.py +213 -0
  1190. vllm/model_executor/models/llama_eagle3.py +375 -0
  1191. vllm/model_executor/models/llava.py +879 -0
  1192. vllm/model_executor/models/llava_next.py +583 -0
  1193. vllm/model_executor/models/llava_next_video.py +467 -0
  1194. vllm/model_executor/models/llava_onevision.py +922 -0
  1195. vllm/model_executor/models/longcat_flash.py +767 -0
  1196. vllm/model_executor/models/longcat_flash_mtp.py +348 -0
  1197. vllm/model_executor/models/mamba.py +276 -0
  1198. vllm/model_executor/models/mamba2.py +288 -0
  1199. vllm/model_executor/models/medusa.py +179 -0
  1200. vllm/model_executor/models/midashenglm.py +826 -0
  1201. vllm/model_executor/models/mimo.py +188 -0
  1202. vllm/model_executor/models/mimo_mtp.py +294 -0
  1203. vllm/model_executor/models/mimo_v2_flash.py +718 -0
  1204. vllm/model_executor/models/minicpm.py +660 -0
  1205. vllm/model_executor/models/minicpm3.py +233 -0
  1206. vllm/model_executor/models/minicpm_eagle.py +386 -0
  1207. vllm/model_executor/models/minicpmo.py +768 -0
  1208. vllm/model_executor/models/minicpmv.py +1742 -0
  1209. vllm/model_executor/models/minimax_m2.py +552 -0
  1210. vllm/model_executor/models/minimax_text_01.py +1008 -0
  1211. vllm/model_executor/models/minimax_vl_01.py +395 -0
  1212. vllm/model_executor/models/mistral3.py +638 -0
  1213. vllm/model_executor/models/mistral_large_3.py +63 -0
  1214. vllm/model_executor/models/mistral_large_3_eagle.py +137 -0
  1215. vllm/model_executor/models/mixtral.py +599 -0
  1216. vllm/model_executor/models/mllama4.py +1170 -0
  1217. vllm/model_executor/models/mlp_speculator.py +235 -0
  1218. vllm/model_executor/models/modernbert.py +458 -0
  1219. vllm/model_executor/models/module_mapping.py +74 -0
  1220. vllm/model_executor/models/molmo.py +1592 -0
  1221. vllm/model_executor/models/moonvit.py +601 -0
  1222. vllm/model_executor/models/mpt.py +335 -0
  1223. vllm/model_executor/models/nano_nemotron_vl.py +1725 -0
  1224. vllm/model_executor/models/nemotron.py +499 -0
  1225. vllm/model_executor/models/nemotron_h.py +902 -0
  1226. vllm/model_executor/models/nemotron_nas.py +474 -0
  1227. vllm/model_executor/models/nemotron_parse.py +958 -0
  1228. vllm/model_executor/models/nemotron_vl.py +651 -0
  1229. vllm/model_executor/models/nvlm_d.py +216 -0
  1230. vllm/model_executor/models/olmo.py +412 -0
  1231. vllm/model_executor/models/olmo2.py +454 -0
  1232. vllm/model_executor/models/olmoe.py +498 -0
  1233. vllm/model_executor/models/opencua.py +262 -0
  1234. vllm/model_executor/models/openpangu.py +1378 -0
  1235. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1236. vllm/model_executor/models/opt.py +426 -0
  1237. vllm/model_executor/models/orion.py +365 -0
  1238. vllm/model_executor/models/ouro.py +507 -0
  1239. vllm/model_executor/models/ovis.py +557 -0
  1240. vllm/model_executor/models/ovis2_5.py +661 -0
  1241. vllm/model_executor/models/paddleocr_vl.py +1261 -0
  1242. vllm/model_executor/models/paligemma.py +429 -0
  1243. vllm/model_executor/models/persimmon.py +373 -0
  1244. vllm/model_executor/models/phi.py +363 -0
  1245. vllm/model_executor/models/phi3.py +18 -0
  1246. vllm/model_executor/models/phi3v.py +729 -0
  1247. vllm/model_executor/models/phi4mm.py +1250 -0
  1248. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1249. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1250. vllm/model_executor/models/phimoe.py +671 -0
  1251. vllm/model_executor/models/pixtral.py +1437 -0
  1252. vllm/model_executor/models/plamo2.py +993 -0
  1253. vllm/model_executor/models/plamo3.py +437 -0
  1254. vllm/model_executor/models/qwen.py +377 -0
  1255. vllm/model_executor/models/qwen2.py +600 -0
  1256. vllm/model_executor/models/qwen2_5_omni_thinker.py +1200 -0
  1257. vllm/model_executor/models/qwen2_5_vl.py +1598 -0
  1258. vllm/model_executor/models/qwen2_audio.py +478 -0
  1259. vllm/model_executor/models/qwen2_moe.py +604 -0
  1260. vllm/model_executor/models/qwen2_rm.py +120 -0
  1261. vllm/model_executor/models/qwen2_vl.py +1588 -0
  1262. vllm/model_executor/models/qwen3.py +331 -0
  1263. vllm/model_executor/models/qwen3_moe.py +752 -0
  1264. vllm/model_executor/models/qwen3_next.py +1410 -0
  1265. vllm/model_executor/models/qwen3_next_mtp.py +293 -0
  1266. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1814 -0
  1267. vllm/model_executor/models/qwen3_vl.py +2120 -0
  1268. vllm/model_executor/models/qwen3_vl_moe.py +474 -0
  1269. vllm/model_executor/models/qwen_vl.py +821 -0
  1270. vllm/model_executor/models/radio.py +573 -0
  1271. vllm/model_executor/models/registry.py +1218 -0
  1272. vllm/model_executor/models/roberta.py +239 -0
  1273. vllm/model_executor/models/rvl.py +107 -0
  1274. vllm/model_executor/models/seed_oss.py +492 -0
  1275. vllm/model_executor/models/siglip.py +1259 -0
  1276. vllm/model_executor/models/siglip2.py +495 -0
  1277. vllm/model_executor/models/siglip2navit.py +660 -0
  1278. vllm/model_executor/models/skyworkr1v.py +951 -0
  1279. vllm/model_executor/models/smolvlm.py +38 -0
  1280. vllm/model_executor/models/solar.py +484 -0
  1281. vllm/model_executor/models/stablelm.py +354 -0
  1282. vllm/model_executor/models/starcoder2.py +365 -0
  1283. vllm/model_executor/models/step3_text.py +554 -0
  1284. vllm/model_executor/models/step3_vl.py +1147 -0
  1285. vllm/model_executor/models/swin.py +500 -0
  1286. vllm/model_executor/models/tarsier.py +624 -0
  1287. vllm/model_executor/models/telechat2.py +153 -0
  1288. vllm/model_executor/models/teleflm.py +78 -0
  1289. vllm/model_executor/models/terratorch.py +318 -0
  1290. vllm/model_executor/models/transformers/__init__.py +127 -0
  1291. vllm/model_executor/models/transformers/base.py +523 -0
  1292. vllm/model_executor/models/transformers/causal.py +65 -0
  1293. vllm/model_executor/models/transformers/legacy.py +90 -0
  1294. vllm/model_executor/models/transformers/moe.py +329 -0
  1295. vllm/model_executor/models/transformers/multimodal.py +441 -0
  1296. vllm/model_executor/models/transformers/pooling.py +102 -0
  1297. vllm/model_executor/models/transformers/utils.py +253 -0
  1298. vllm/model_executor/models/ultravox.py +786 -0
  1299. vllm/model_executor/models/utils.py +832 -0
  1300. vllm/model_executor/models/vision.py +546 -0
  1301. vllm/model_executor/models/voxtral.py +867 -0
  1302. vllm/model_executor/models/voxtral_streaming.py +304 -0
  1303. vllm/model_executor/models/whisper.py +993 -0
  1304. vllm/model_executor/models/whisper_utils.py +299 -0
  1305. vllm/model_executor/models/zamba2.py +986 -0
  1306. vllm/model_executor/parameter.py +642 -0
  1307. vllm/model_executor/utils.py +113 -0
  1308. vllm/model_executor/warmup/__init__.py +0 -0
  1309. vllm/model_executor/warmup/deep_gemm_warmup.py +371 -0
  1310. vllm/model_executor/warmup/kernel_warmup.py +97 -0
  1311. vllm/model_inspection.py +136 -0
  1312. vllm/multimodal/__init__.py +38 -0
  1313. vllm/multimodal/audio.py +287 -0
  1314. vllm/multimodal/base.py +60 -0
  1315. vllm/multimodal/cache.py +829 -0
  1316. vllm/multimodal/evs.py +294 -0
  1317. vllm/multimodal/hasher.py +123 -0
  1318. vllm/multimodal/image.py +155 -0
  1319. vllm/multimodal/inputs.py +1027 -0
  1320. vllm/multimodal/parse.py +674 -0
  1321. vllm/multimodal/processing.py +2469 -0
  1322. vllm/multimodal/profiling.py +351 -0
  1323. vllm/multimodal/registry.py +375 -0
  1324. vllm/multimodal/utils.py +550 -0
  1325. vllm/multimodal/video.py +512 -0
  1326. vllm/outputs.py +347 -0
  1327. vllm/platforms/__init__.py +277 -0
  1328. vllm/platforms/cpu.py +423 -0
  1329. vllm/platforms/cuda.py +618 -0
  1330. vllm/platforms/interface.py +707 -0
  1331. vllm/platforms/rocm.py +586 -0
  1332. vllm/platforms/tpu.py +20 -0
  1333. vllm/platforms/xpu.py +262 -0
  1334. vllm/plugins/__init__.py +81 -0
  1335. vllm/plugins/io_processors/__init__.py +68 -0
  1336. vllm/plugins/io_processors/interface.py +77 -0
  1337. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1338. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1339. vllm/pooling_params.py +229 -0
  1340. vllm/profiler/__init__.py +0 -0
  1341. vllm/profiler/layerwise_profile.py +392 -0
  1342. vllm/profiler/utils.py +151 -0
  1343. vllm/profiler/wrapper.py +241 -0
  1344. vllm/py.typed +2 -0
  1345. vllm/ray/__init__.py +0 -0
  1346. vllm/ray/lazy_utils.py +30 -0
  1347. vllm/ray/ray_env.py +79 -0
  1348. vllm/reasoning/__init__.py +96 -0
  1349. vllm/reasoning/abs_reasoning_parsers.py +318 -0
  1350. vllm/reasoning/basic_parsers.py +175 -0
  1351. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1352. vllm/reasoning/deepseek_v3_reasoning_parser.py +69 -0
  1353. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1354. vllm/reasoning/glm4_moe_reasoning_parser.py +13 -0
  1355. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1356. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1357. vllm/reasoning/holo2_reasoning_parser.py +89 -0
  1358. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1359. vllm/reasoning/identity_reasoning_parser.py +63 -0
  1360. vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
  1361. vllm/reasoning/mistral_reasoning_parser.py +154 -0
  1362. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1363. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1364. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1365. vllm/reasoning/step3_reasoning_parser.py +113 -0
  1366. vllm/sampling_params.py +629 -0
  1367. vllm/scalar_type.py +355 -0
  1368. vllm/scripts.py +17 -0
  1369. vllm/sequence.py +64 -0
  1370. vllm/tasks.py +13 -0
  1371. vllm/third_party/__init__.py +0 -0
  1372. vllm/third_party/pynvml.py +6140 -0
  1373. vllm/tokenizers/__init__.py +18 -0
  1374. vllm/tokenizers/deepseek_v32.py +187 -0
  1375. vllm/tokenizers/deepseek_v32_encoding.py +463 -0
  1376. vllm/tokenizers/detokenizer_utils.py +198 -0
  1377. vllm/tokenizers/grok2.py +443 -0
  1378. vllm/tokenizers/hf.py +119 -0
  1379. vllm/tokenizers/mistral.py +543 -0
  1380. vllm/tokenizers/protocol.py +123 -0
  1381. vllm/tokenizers/registry.py +238 -0
  1382. vllm/tool_parsers/__init__.py +158 -0
  1383. vllm/tool_parsers/abstract_tool_parser.py +274 -0
  1384. vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
  1385. vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
  1386. vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
  1387. vllm/tool_parsers/ernie45_tool_parser.py +210 -0
  1388. vllm/tool_parsers/functiongemma_tool_parser.py +321 -0
  1389. vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
  1390. vllm/tool_parsers/glm47_moe_tool_parser.py +23 -0
  1391. vllm/tool_parsers/glm4_moe_tool_parser.py +215 -0
  1392. vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  1393. vllm/tool_parsers/granite_tool_parser.py +253 -0
  1394. vllm/tool_parsers/hermes_tool_parser.py +495 -0
  1395. vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  1396. vllm/tool_parsers/internlm2_tool_parser.py +227 -0
  1397. vllm/tool_parsers/jamba_tool_parser.py +323 -0
  1398. vllm/tool_parsers/kimi_k2_tool_parser.py +598 -0
  1399. vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  1400. vllm/tool_parsers/llama_tool_parser.py +324 -0
  1401. vllm/tool_parsers/longcat_tool_parser.py +37 -0
  1402. vllm/tool_parsers/minimax_m2_tool_parser.py +776 -0
  1403. vllm/tool_parsers/minimax_tool_parser.py +849 -0
  1404. vllm/tool_parsers/mistral_tool_parser.py +612 -0
  1405. vllm/tool_parsers/olmo3_tool_parser.py +366 -0
  1406. vllm/tool_parsers/openai_tool_parser.py +111 -0
  1407. vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
  1408. vllm/tool_parsers/pythonic_tool_parser.py +332 -0
  1409. vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
  1410. vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  1411. vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
  1412. vllm/tool_parsers/step3_tool_parser.py +303 -0
  1413. vllm/tool_parsers/utils.py +229 -0
  1414. vllm/tool_parsers/xlam_tool_parser.py +556 -0
  1415. vllm/tracing.py +135 -0
  1416. vllm/transformers_utils/__init__.py +26 -0
  1417. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1418. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1419. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1420. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1421. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1422. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1423. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1424. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1425. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1426. vllm/transformers_utils/config.py +1169 -0
  1427. vllm/transformers_utils/config_parser_base.py +20 -0
  1428. vllm/transformers_utils/configs/__init__.py +106 -0
  1429. vllm/transformers_utils/configs/afmoe.py +87 -0
  1430. vllm/transformers_utils/configs/arctic.py +216 -0
  1431. vllm/transformers_utils/configs/bagel.py +53 -0
  1432. vllm/transformers_utils/configs/chatglm.py +75 -0
  1433. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1434. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1435. vllm/transformers_utils/configs/eagle.py +90 -0
  1436. vllm/transformers_utils/configs/falcon.py +89 -0
  1437. vllm/transformers_utils/configs/flex_olmo.py +82 -0
  1438. vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
  1439. vllm/transformers_utils/configs/isaac.py +100 -0
  1440. vllm/transformers_utils/configs/jais.py +243 -0
  1441. vllm/transformers_utils/configs/kimi_linear.py +148 -0
  1442. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1443. vllm/transformers_utils/configs/lfm2_moe.py +163 -0
  1444. vllm/transformers_utils/configs/medusa.py +65 -0
  1445. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1446. vllm/transformers_utils/configs/mistral.py +263 -0
  1447. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1448. vllm/transformers_utils/configs/moonvit.py +33 -0
  1449. vllm/transformers_utils/configs/nemotron.py +220 -0
  1450. vllm/transformers_utils/configs/nemotron_h.py +284 -0
  1451. vllm/transformers_utils/configs/olmo3.py +83 -0
  1452. vllm/transformers_utils/configs/ovis.py +182 -0
  1453. vllm/transformers_utils/configs/qwen3_next.py +277 -0
  1454. vllm/transformers_utils/configs/radio.py +98 -0
  1455. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1456. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1457. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1458. vllm/transformers_utils/configs/step3_vl.py +178 -0
  1459. vllm/transformers_utils/configs/tarsier2.py +24 -0
  1460. vllm/transformers_utils/configs/ultravox.py +120 -0
  1461. vllm/transformers_utils/dynamic_module.py +70 -0
  1462. vllm/transformers_utils/gguf_utils.py +280 -0
  1463. vllm/transformers_utils/model_arch_config_convertor.py +402 -0
  1464. vllm/transformers_utils/processor.py +424 -0
  1465. vllm/transformers_utils/processors/__init__.py +25 -0
  1466. vllm/transformers_utils/processors/bagel.py +78 -0
  1467. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1468. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1469. vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
  1470. vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
  1471. vllm/transformers_utils/processors/ovis.py +453 -0
  1472. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1473. vllm/transformers_utils/repo_utils.py +287 -0
  1474. vllm/transformers_utils/runai_utils.py +102 -0
  1475. vllm/transformers_utils/s3_utils.py +95 -0
  1476. vllm/transformers_utils/tokenizer.py +19 -0
  1477. vllm/transformers_utils/utils.py +112 -0
  1478. vllm/triton_utils/__init__.py +20 -0
  1479. vllm/triton_utils/importing.py +103 -0
  1480. vllm/usage/__init__.py +0 -0
  1481. vllm/usage/usage_lib.py +278 -0
  1482. vllm/utils/__init__.py +36 -0
  1483. vllm/utils/argparse_utils.py +491 -0
  1484. vllm/utils/async_utils.py +310 -0
  1485. vllm/utils/cache.py +214 -0
  1486. vllm/utils/collection_utils.py +112 -0
  1487. vllm/utils/counter.py +45 -0
  1488. vllm/utils/deep_gemm.py +424 -0
  1489. vllm/utils/flashinfer.py +602 -0
  1490. vllm/utils/func_utils.py +236 -0
  1491. vllm/utils/gc_utils.py +151 -0
  1492. vllm/utils/hashing.py +117 -0
  1493. vllm/utils/import_utils.py +438 -0
  1494. vllm/utils/jsontree.py +158 -0
  1495. vllm/utils/math_utils.py +32 -0
  1496. vllm/utils/mem_constants.py +13 -0
  1497. vllm/utils/mem_utils.py +285 -0
  1498. vllm/utils/nccl.py +64 -0
  1499. vllm/utils/network_utils.py +331 -0
  1500. vllm/utils/nvtx_pytorch_hooks.py +286 -0
  1501. vllm/utils/platform_utils.py +59 -0
  1502. vllm/utils/profiling.py +56 -0
  1503. vllm/utils/registry.py +51 -0
  1504. vllm/utils/serial_utils.py +214 -0
  1505. vllm/utils/system_utils.py +296 -0
  1506. vllm/utils/tensor_schema.py +255 -0
  1507. vllm/utils/torch_utils.py +781 -0
  1508. vllm/v1/__init__.py +0 -0
  1509. vllm/v1/attention/__init__.py +0 -0
  1510. vllm/v1/attention/backend.py +736 -0
  1511. vllm/v1/attention/backends/__init__.py +0 -0
  1512. vllm/v1/attention/backends/cpu_attn.py +501 -0
  1513. vllm/v1/attention/backends/fa_utils.py +126 -0
  1514. vllm/v1/attention/backends/flash_attn.py +1092 -0
  1515. vllm/v1/attention/backends/flash_attn_diffkv.py +277 -0
  1516. vllm/v1/attention/backends/flashinfer.py +1713 -0
  1517. vllm/v1/attention/backends/flex_attention.py +1024 -0
  1518. vllm/v1/attention/backends/gdn_attn.py +382 -0
  1519. vllm/v1/attention/backends/linear_attn.py +77 -0
  1520. vllm/v1/attention/backends/mamba1_attn.py +28 -0
  1521. vllm/v1/attention/backends/mamba2_attn.py +256 -0
  1522. vllm/v1/attention/backends/mamba_attn.py +313 -0
  1523. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1524. vllm/v1/attention/backends/mla/aiter_triton_mla.py +66 -0
  1525. vllm/v1/attention/backends/mla/common.py +2156 -0
  1526. vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
  1527. vllm/v1/attention/backends/mla/flashattn_mla.py +348 -0
  1528. vllm/v1/attention/backends/mla/flashinfer_mla.py +175 -0
  1529. vllm/v1/attention/backends/mla/flashmla.py +321 -0
  1530. vllm/v1/attention/backends/mla/flashmla_sparse.py +1021 -0
  1531. vllm/v1/attention/backends/mla/indexer.py +345 -0
  1532. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +284 -0
  1533. vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +321 -0
  1534. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1535. vllm/v1/attention/backends/registry.py +258 -0
  1536. vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
  1537. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
  1538. vllm/v1/attention/backends/rocm_attn.py +405 -0
  1539. vllm/v1/attention/backends/short_conv_attn.py +26 -0
  1540. vllm/v1/attention/backends/tree_attn.py +430 -0
  1541. vllm/v1/attention/backends/triton_attn.py +578 -0
  1542. vllm/v1/attention/backends/utils.py +978 -0
  1543. vllm/v1/attention/ops/__init__.py +0 -0
  1544. vllm/v1/attention/ops/chunked_prefill_paged_decode.py +459 -0
  1545. vllm/v1/attention/ops/common.py +469 -0
  1546. vllm/v1/attention/ops/flashmla.py +254 -0
  1547. vllm/v1/attention/ops/merge_attn_states.py +47 -0
  1548. vllm/v1/attention/ops/paged_attn.py +51 -0
  1549. vllm/v1/attention/ops/pallas_kv_cache_update.py +130 -0
  1550. vllm/v1/attention/ops/prefix_prefill.py +862 -0
  1551. vllm/v1/attention/ops/rocm_aiter_mla_sparse.py +210 -0
  1552. vllm/v1/attention/ops/triton_decode_attention.py +709 -0
  1553. vllm/v1/attention/ops/triton_merge_attn_states.py +116 -0
  1554. vllm/v1/attention/ops/triton_prefill_attention.py +272 -0
  1555. vllm/v1/attention/ops/triton_reshape_and_cache_flash.py +395 -0
  1556. vllm/v1/attention/ops/triton_unified_attention.py +1088 -0
  1557. vllm/v1/attention/ops/vit_attn_wrappers.py +185 -0
  1558. vllm/v1/attention/selector.py +145 -0
  1559. vllm/v1/core/__init__.py +0 -0
  1560. vllm/v1/core/block_pool.py +489 -0
  1561. vllm/v1/core/encoder_cache_manager.py +402 -0
  1562. vllm/v1/core/kv_cache_coordinator.py +560 -0
  1563. vllm/v1/core/kv_cache_manager.py +485 -0
  1564. vllm/v1/core/kv_cache_metrics.py +96 -0
  1565. vllm/v1/core/kv_cache_utils.py +1642 -0
  1566. vllm/v1/core/sched/__init__.py +0 -0
  1567. vllm/v1/core/sched/async_scheduler.py +66 -0
  1568. vllm/v1/core/sched/interface.py +205 -0
  1569. vllm/v1/core/sched/output.py +261 -0
  1570. vllm/v1/core/sched/request_queue.py +208 -0
  1571. vllm/v1/core/sched/scheduler.py +1936 -0
  1572. vllm/v1/core/sched/utils.py +64 -0
  1573. vllm/v1/core/single_type_kv_cache_manager.py +926 -0
  1574. vllm/v1/cudagraph_dispatcher.py +183 -0
  1575. vllm/v1/engine/__init__.py +224 -0
  1576. vllm/v1/engine/async_llm.py +874 -0
  1577. vllm/v1/engine/coordinator.py +396 -0
  1578. vllm/v1/engine/core.py +1614 -0
  1579. vllm/v1/engine/core_client.py +1422 -0
  1580. vllm/v1/engine/detokenizer.py +351 -0
  1581. vllm/v1/engine/exceptions.py +18 -0
  1582. vllm/v1/engine/input_processor.py +713 -0
  1583. vllm/v1/engine/llm_engine.py +415 -0
  1584. vllm/v1/engine/logprobs.py +245 -0
  1585. vllm/v1/engine/output_processor.py +715 -0
  1586. vllm/v1/engine/parallel_sampling.py +150 -0
  1587. vllm/v1/engine/utils.py +1086 -0
  1588. vllm/v1/executor/__init__.py +6 -0
  1589. vllm/v1/executor/abstract.py +352 -0
  1590. vllm/v1/executor/multiproc_executor.py +888 -0
  1591. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1592. vllm/v1/executor/ray_executor.py +623 -0
  1593. vllm/v1/executor/ray_utils.py +468 -0
  1594. vllm/v1/executor/uniproc_executor.py +186 -0
  1595. vllm/v1/kv_cache_interface.py +485 -0
  1596. vllm/v1/kv_offload/__init__.py +0 -0
  1597. vllm/v1/kv_offload/abstract.py +161 -0
  1598. vllm/v1/kv_offload/arc_manager.py +237 -0
  1599. vllm/v1/kv_offload/backend.py +97 -0
  1600. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1601. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1602. vllm/v1/kv_offload/cpu.py +109 -0
  1603. vllm/v1/kv_offload/factory.py +58 -0
  1604. vllm/v1/kv_offload/lru_manager.py +139 -0
  1605. vllm/v1/kv_offload/mediums.py +39 -0
  1606. vllm/v1/kv_offload/spec.py +70 -0
  1607. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1608. vllm/v1/kv_offload/worker/cpu_gpu.py +287 -0
  1609. vllm/v1/kv_offload/worker/worker.py +163 -0
  1610. vllm/v1/metrics/__init__.py +0 -0
  1611. vllm/v1/metrics/loggers.py +1320 -0
  1612. vllm/v1/metrics/perf.py +1244 -0
  1613. vllm/v1/metrics/prometheus.py +82 -0
  1614. vllm/v1/metrics/ray_wrappers.py +194 -0
  1615. vllm/v1/metrics/reader.py +257 -0
  1616. vllm/v1/metrics/stats.py +440 -0
  1617. vllm/v1/outputs.py +242 -0
  1618. vllm/v1/pool/__init__.py +0 -0
  1619. vllm/v1/pool/metadata.py +124 -0
  1620. vllm/v1/request.py +281 -0
  1621. vllm/v1/sample/__init__.py +0 -0
  1622. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1623. vllm/v1/sample/logits_processor/builtin.py +278 -0
  1624. vllm/v1/sample/logits_processor/interface.py +106 -0
  1625. vllm/v1/sample/logits_processor/state.py +165 -0
  1626. vllm/v1/sample/metadata.py +44 -0
  1627. vllm/v1/sample/ops/__init__.py +0 -0
  1628. vllm/v1/sample/ops/bad_words.py +57 -0
  1629. vllm/v1/sample/ops/logprobs.py +25 -0
  1630. vllm/v1/sample/ops/penalties.py +57 -0
  1631. vllm/v1/sample/ops/topk_topp_sampler.py +388 -0
  1632. vllm/v1/sample/rejection_sampler.py +822 -0
  1633. vllm/v1/sample/sampler.py +319 -0
  1634. vllm/v1/sample/tpu/__init__.py +0 -0
  1635. vllm/v1/sample/tpu/metadata.py +120 -0
  1636. vllm/v1/sample/tpu/sampler.py +215 -0
  1637. vllm/v1/serial_utils.py +514 -0
  1638. vllm/v1/spec_decode/__init__.py +0 -0
  1639. vllm/v1/spec_decode/eagle.py +1346 -0
  1640. vllm/v1/spec_decode/medusa.py +73 -0
  1641. vllm/v1/spec_decode/metadata.py +66 -0
  1642. vllm/v1/spec_decode/metrics.py +225 -0
  1643. vllm/v1/spec_decode/ngram_proposer.py +281 -0
  1644. vllm/v1/spec_decode/suffix_decoding.py +95 -0
  1645. vllm/v1/spec_decode/utils.py +109 -0
  1646. vllm/v1/structured_output/__init__.py +337 -0
  1647. vllm/v1/structured_output/backend_guidance.py +291 -0
  1648. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1649. vllm/v1/structured_output/backend_outlines.py +324 -0
  1650. vllm/v1/structured_output/backend_types.py +136 -0
  1651. vllm/v1/structured_output/backend_xgrammar.py +378 -0
  1652. vllm/v1/structured_output/request.py +91 -0
  1653. vllm/v1/structured_output/utils.py +457 -0
  1654. vllm/v1/utils.py +466 -0
  1655. vllm/v1/worker/__init__.py +0 -0
  1656. vllm/v1/worker/block_table.py +343 -0
  1657. vllm/v1/worker/cp_utils.py +42 -0
  1658. vllm/v1/worker/cpu_model_runner.py +122 -0
  1659. vllm/v1/worker/cpu_worker.py +192 -0
  1660. vllm/v1/worker/dp_utils.py +240 -0
  1661. vllm/v1/worker/ec_connector_model_runner_mixin.py +85 -0
  1662. vllm/v1/worker/gpu/README.md +4 -0
  1663. vllm/v1/worker/gpu/__init__.py +0 -0
  1664. vllm/v1/worker/gpu/async_utils.py +98 -0
  1665. vllm/v1/worker/gpu/attn_utils.py +183 -0
  1666. vllm/v1/worker/gpu/block_table.py +222 -0
  1667. vllm/v1/worker/gpu/buffer_utils.py +224 -0
  1668. vllm/v1/worker/gpu/cudagraph_utils.py +264 -0
  1669. vllm/v1/worker/gpu/dp_utils.py +31 -0
  1670. vllm/v1/worker/gpu/input_batch.py +526 -0
  1671. vllm/v1/worker/gpu/metrics/__init__.py +0 -0
  1672. vllm/v1/worker/gpu/metrics/logits.py +42 -0
  1673. vllm/v1/worker/gpu/mm/__init__.py +0 -0
  1674. vllm/v1/worker/gpu/mm/mrope_utils.py +127 -0
  1675. vllm/v1/worker/gpu/model_runner.py +1005 -0
  1676. vllm/v1/worker/gpu/sample/__init__.py +0 -0
  1677. vllm/v1/worker/gpu/sample/gumbel.py +106 -0
  1678. vllm/v1/worker/gpu/sample/logit_bias.py +270 -0
  1679. vllm/v1/worker/gpu/sample/logprob.py +167 -0
  1680. vllm/v1/worker/gpu/sample/metadata.py +79 -0
  1681. vllm/v1/worker/gpu/sample/min_p.py +58 -0
  1682. vllm/v1/worker/gpu/sample/output.py +14 -0
  1683. vllm/v1/worker/gpu/sample/penalties.py +155 -0
  1684. vllm/v1/worker/gpu/sample/sampler.py +88 -0
  1685. vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
  1686. vllm/v1/worker/gpu/spec_decode/eagle.py +566 -0
  1687. vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
  1688. vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
  1689. vllm/v1/worker/gpu/states.py +282 -0
  1690. vllm/v1/worker/gpu/structured_outputs.py +100 -0
  1691. vllm/v1/worker/gpu_input_batch.py +1030 -0
  1692. vllm/v1/worker/gpu_model_runner.py +5761 -0
  1693. vllm/v1/worker/gpu_ubatch_wrapper.py +475 -0
  1694. vllm/v1/worker/gpu_worker.py +968 -0
  1695. vllm/v1/worker/kv_connector_model_runner_mixin.py +300 -0
  1696. vllm/v1/worker/lora_model_runner_mixin.py +225 -0
  1697. vllm/v1/worker/tpu_input_batch.py +574 -0
  1698. vllm/v1/worker/tpu_worker.py +18 -0
  1699. vllm/v1/worker/ubatch_utils.py +112 -0
  1700. vllm/v1/worker/ubatching.py +242 -0
  1701. vllm/v1/worker/utils.py +400 -0
  1702. vllm/v1/worker/worker_base.py +372 -0
  1703. vllm/v1/worker/workspace.py +253 -0
  1704. vllm/v1/worker/xpu_model_runner.py +48 -0
  1705. vllm/v1/worker/xpu_worker.py +174 -0
  1706. vllm/version.py +39 -0
  1707. vllm/vllm_flash_attn/.gitkeep +0 -0
  1708. vllm_cpu_avx512bf16-0.14.0.dist-info/METADATA +348 -0
  1709. vllm_cpu_avx512bf16-0.14.0.dist-info/RECORD +1712 -0
  1710. vllm_cpu_avx512bf16-0.14.0.dist-info/WHEEL +5 -0
  1711. vllm_cpu_avx512bf16-0.14.0.dist-info/entry_points.txt +5 -0
  1712. vllm_cpu_avx512bf16-0.14.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2552 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import asyncio
5
+ import json
6
+ import time
7
+ import uuid
8
+ from collections import deque
9
+ from collections.abc import AsyncGenerator, AsyncIterator, Callable, Sequence
10
+ from contextlib import AsyncExitStack
11
+ from copy import copy
12
+ from dataclasses import dataclass
13
+ from http import HTTPStatus
14
+ from typing import Final
15
+
16
+ import jinja2
17
+ from fastapi import Request
18
+ from openai.types.responses import (
19
+ ResponseCodeInterpreterCallCodeDeltaEvent,
20
+ ResponseCodeInterpreterCallCodeDoneEvent,
21
+ ResponseCodeInterpreterCallCompletedEvent,
22
+ ResponseCodeInterpreterCallInProgressEvent,
23
+ ResponseCodeInterpreterCallInterpretingEvent,
24
+ ResponseCodeInterpreterToolCallParam,
25
+ ResponseContentPartAddedEvent,
26
+ ResponseContentPartDoneEvent,
27
+ ResponseFunctionCallArgumentsDeltaEvent,
28
+ ResponseFunctionCallArgumentsDoneEvent,
29
+ ResponseFunctionToolCall,
30
+ ResponseFunctionWebSearch,
31
+ ResponseMcpCallArgumentsDeltaEvent,
32
+ ResponseMcpCallArgumentsDoneEvent,
33
+ ResponseMcpCallCompletedEvent,
34
+ ResponseMcpCallInProgressEvent,
35
+ ResponseOutputItem,
36
+ ResponseOutputItemAddedEvent,
37
+ ResponseOutputItemDoneEvent,
38
+ ResponseOutputMessage,
39
+ ResponseOutputText,
40
+ ResponseReasoningItem,
41
+ ResponseReasoningTextDeltaEvent,
42
+ ResponseReasoningTextDoneEvent,
43
+ ResponseStatus,
44
+ ResponseTextDeltaEvent,
45
+ ResponseTextDoneEvent,
46
+ ResponseWebSearchCallCompletedEvent,
47
+ ResponseWebSearchCallInProgressEvent,
48
+ ResponseWebSearchCallSearchingEvent,
49
+ response_function_web_search,
50
+ response_text_delta_event,
51
+ )
52
+ from openai.types.responses.response_output_item import McpCall
53
+ from openai.types.responses.response_output_text import Logprob, LogprobTopLogprob
54
+ from openai.types.responses.response_reasoning_item import (
55
+ Content as ResponseReasoningTextContent,
56
+ )
57
+ from openai.types.responses.tool import Mcp, Tool
58
+ from openai_harmony import Message as OpenAIHarmonyMessage
59
+ from pydantic import TypeAdapter
60
+
61
+ from vllm import envs
62
+ from vllm.engine.protocol import EngineClient
63
+ from vllm.entrypoints.chat_utils import (
64
+ ChatCompletionMessageParam,
65
+ ChatTemplateContentFormatOption,
66
+ )
67
+ from vllm.entrypoints.context import (
68
+ ConversationContext,
69
+ HarmonyContext,
70
+ ParsableContext,
71
+ SimpleContext,
72
+ StreamingHarmonyContext,
73
+ )
74
+ from vllm.entrypoints.logger import RequestLogger
75
+ from vllm.entrypoints.openai.parser.harmony_utils import (
76
+ construct_harmony_previous_input_messages,
77
+ get_developer_message,
78
+ get_stop_tokens_for_assistant_actions,
79
+ get_system_message,
80
+ get_user_message,
81
+ has_custom_tools,
82
+ parse_output_message,
83
+ parse_remaining_state,
84
+ parse_response_input,
85
+ render_for_completion,
86
+ )
87
+ from vllm.entrypoints.openai.protocol import (
88
+ DeltaMessage,
89
+ ErrorResponse,
90
+ InputTokensDetails,
91
+ OutputTokensDetails,
92
+ RequestResponseMetadata,
93
+ ResponseCompletedEvent,
94
+ ResponseCreatedEvent,
95
+ ResponseInProgressEvent,
96
+ ResponseInputOutputMessage,
97
+ ResponseReasoningPartAddedEvent,
98
+ ResponseReasoningPartDoneEvent,
99
+ ResponsesRequest,
100
+ ResponsesResponse,
101
+ ResponseUsage,
102
+ StreamingResponsesResponse,
103
+ VLLMValidationError,
104
+ )
105
+ from vllm.entrypoints.openai.serving_engine import (
106
+ GenerationError,
107
+ OpenAIServing,
108
+ )
109
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
110
+ from vllm.entrypoints.responses_utils import (
111
+ construct_input_messages,
112
+ construct_tool_dicts,
113
+ extract_tool_types,
114
+ )
115
+ from vllm.entrypoints.tool_server import ToolServer
116
+ from vllm.inputs.data import TokensPrompt
117
+ from vllm.logger import init_logger
118
+ from vllm.logprobs import Logprob as SampleLogprob
119
+ from vllm.logprobs import SampleLogprobs
120
+ from vllm.outputs import CompletionOutput
121
+ from vllm.sampling_params import SamplingParams, StructuredOutputsParams
122
+ from vllm.tokenizers import TokenizerLike
123
+ from vllm.utils import random_uuid
124
+
125
+ logger = init_logger(__name__)
126
+
127
+
128
+ @dataclass
129
+ class HarmonyStreamingState:
130
+ """Mutable state for harmony streaming event processing."""
131
+
132
+ current_content_index: int = -1
133
+ current_output_index: int = 0
134
+ current_item_id: str = ""
135
+ sent_output_item_added: bool = False
136
+ is_first_function_call_delta: bool = False
137
+
138
+ def reset_for_new_item(self) -> None:
139
+ """Reset state when expecting a new output item."""
140
+ self.current_output_index += 1
141
+ self.sent_output_item_added = False
142
+ self.is_first_function_call_delta = False
143
+
144
+
145
+ def _extract_allowed_tools_from_mcp_requests(
146
+ tools: list[Tool],
147
+ ) -> dict[str, list[str] | None]:
148
+ """
149
+ Extract allowed_tools mapping from MCP tool requests.
150
+
151
+ Returns a dictionary mapping server_label to allowed_tools list.
152
+ Handles both list format and McpAllowedToolsMcpToolFilter object format.
153
+
154
+ Special handling:
155
+ - If allowed_tools is None, returns None (allows all tools)
156
+ - If allowed_tools contains "*", returns None (allows all tools)
157
+ - Otherwise, returns the list of specific tool names
158
+
159
+ This function can be reused for both harmony and non-harmony MCP calls.
160
+ """
161
+ allowed_tools_map: dict[str, list[str] | None] = {}
162
+ for tool in tools:
163
+ if not isinstance(tool, Mcp):
164
+ continue
165
+
166
+ # allowed_tools can be a list or an object with tool_names
167
+ # Extract the actual list of tool names
168
+ allowed_tools_val = None
169
+ if tool.allowed_tools is not None:
170
+ if isinstance(tool.allowed_tools, list):
171
+ allowed_tools_val = tool.allowed_tools
172
+ elif hasattr(tool.allowed_tools, "tool_names"):
173
+ # It's an McpAllowedToolsMcpToolFilter object
174
+ allowed_tools_val = tool.allowed_tools.tool_names
175
+
176
+ # Normalize "*" to None (both mean "allow all tools")
177
+ if allowed_tools_val is not None and "*" in allowed_tools_val:
178
+ allowed_tools_val = None
179
+
180
+ allowed_tools_map[tool.server_label] = allowed_tools_val
181
+ return allowed_tools_map
182
+
183
+
184
+ class OpenAIServingResponses(OpenAIServing):
185
+ def __init__(
186
+ self,
187
+ engine_client: EngineClient,
188
+ models: OpenAIServingModels,
189
+ *,
190
+ request_logger: RequestLogger | None,
191
+ chat_template: str | None,
192
+ chat_template_content_format: ChatTemplateContentFormatOption,
193
+ return_tokens_as_token_ids: bool = False,
194
+ reasoning_parser: str = "",
195
+ enable_auto_tools: bool = False,
196
+ tool_parser: str | None = None,
197
+ tool_server: ToolServer | None = None,
198
+ enable_prompt_tokens_details: bool = False,
199
+ enable_force_include_usage: bool = False,
200
+ enable_log_outputs: bool = False,
201
+ log_error_stack: bool = False,
202
+ ) -> None:
203
+ super().__init__(
204
+ engine_client=engine_client,
205
+ models=models,
206
+ request_logger=request_logger,
207
+ return_tokens_as_token_ids=return_tokens_as_token_ids,
208
+ log_error_stack=log_error_stack,
209
+ )
210
+
211
+ self.chat_template = chat_template
212
+ self.chat_template_content_format: Final = chat_template_content_format
213
+ self.enable_log_outputs = enable_log_outputs
214
+
215
+ self.reasoning_parser = self._get_reasoning_parser(
216
+ reasoning_parser_name=reasoning_parser
217
+ )
218
+ self.enable_prompt_tokens_details = enable_prompt_tokens_details
219
+ self.enable_force_include_usage = enable_force_include_usage
220
+ self.default_sampling_params = self.model_config.get_diff_sampling_param()
221
+ if self.default_sampling_params:
222
+ source = self.model_config.generation_config
223
+ source = "model" if source == "auto" else source
224
+ logger.info(
225
+ "Using default chat sampling params from %s: %s",
226
+ source,
227
+ self.default_sampling_params,
228
+ )
229
+
230
+ # If False (default), the "store" option is (silently) ignored and the
231
+ # response is not stored. If True, the response is stored in memory.
232
+ # NOTE(woosuk): This may not be intuitive for users, as the default
233
+ # behavior in OpenAI's Responses API is to store the response, but
234
+ # vLLM's default behavior is not.
235
+ self.enable_store = envs.VLLM_ENABLE_RESPONSES_API_STORE
236
+ if self.enable_store:
237
+ logger.warning_once(
238
+ "`VLLM_ENABLE_RESPONSES_API_STORE` is enabled. This may "
239
+ "cause a memory leak since we never remove responses from "
240
+ "the store."
241
+ )
242
+
243
+ self.use_harmony = self.model_config.hf_config.model_type == "gpt_oss"
244
+ if self.use_harmony:
245
+ logger.warning(
246
+ "For gpt-oss, we ignore --enable-auto-tool-choice "
247
+ "and always enable tool use."
248
+ )
249
+ # OpenAI models have two EOS-like tokens: <|return|> and <|call|>.
250
+ # We need to add them to the stop token ids.
251
+ if "stop_token_ids" not in self.default_sampling_params:
252
+ self.default_sampling_params["stop_token_ids"] = []
253
+ self.default_sampling_params["stop_token_ids"].extend(
254
+ get_stop_tokens_for_assistant_actions()
255
+ )
256
+ self.enable_auto_tools = enable_auto_tools
257
+ # set up tool use
258
+ self.tool_parser = self._get_tool_parser(
259
+ tool_parser_name=tool_parser, enable_auto_tools=enable_auto_tools
260
+ )
261
+ # HACK(woosuk): This is a hack. We should use a better store.
262
+ # FIXME: If enable_store=True, this may cause a memory leak since we
263
+ # never remove responses from the store.
264
+ self.response_store: dict[str, ResponsesResponse] = {}
265
+ self.response_store_lock = asyncio.Lock()
266
+
267
+ # HACK(woosuk): This is a hack. We should use a better store.
268
+ # FIXME: If enable_store=True, this may cause a memory leak since we
269
+ # never remove messages from the store.
270
+ self.msg_store: dict[str, list[ChatCompletionMessageParam]] = {}
271
+
272
+ # HACK(wuhang): This is a hack. We should use a better store.
273
+ # FIXME: If enable_store=True, this may cause a memory leak since we
274
+ # never remove events from the store.
275
+ self.event_store: dict[
276
+ str, tuple[deque[StreamingResponsesResponse], asyncio.Event]
277
+ ] = {}
278
+
279
+ self.background_tasks: dict[str, asyncio.Task] = {}
280
+
281
+ self.tool_server = tool_server
282
+
283
+ def _validate_generator_input(
284
+ self, engine_prompt: TokensPrompt
285
+ ) -> ErrorResponse | None:
286
+ """Add validations to the input to the generator here."""
287
+ if self.max_model_len <= len(engine_prompt["prompt_token_ids"]):
288
+ error_message = (
289
+ "The engine prompt length"
290
+ f" {len(engine_prompt['prompt_token_ids'])} "
291
+ f"exceeds the max_model_len {self.max_model_len}. "
292
+ "Please reduce prompt."
293
+ )
294
+ return self.create_error_response(
295
+ err_type="invalid_request_error",
296
+ message=error_message,
297
+ status_code=HTTPStatus.BAD_REQUEST,
298
+ param="input",
299
+ )
300
+ return None
301
+
302
+ def _validate_create_responses_input(
303
+ self, request: ResponsesRequest
304
+ ) -> ErrorResponse | None:
305
+ if self.use_harmony and request.is_include_output_logprobs():
306
+ return self.create_error_response(
307
+ err_type="invalid_request_error",
308
+ message="logprobs are not supported with gpt-oss models",
309
+ status_code=HTTPStatus.BAD_REQUEST,
310
+ param="logprobs",
311
+ )
312
+ if request.store and not self.enable_store and request.background:
313
+ return self.create_error_response(
314
+ err_type="invalid_request_error",
315
+ message=(
316
+ "This vLLM engine does not support `store=True` and "
317
+ "therefore does not support the background mode. To "
318
+ "enable these features, set the environment variable "
319
+ "`VLLM_ENABLE_RESPONSES_API_STORE=1` when launching "
320
+ "the vLLM server."
321
+ ),
322
+ status_code=HTTPStatus.BAD_REQUEST,
323
+ param="background",
324
+ )
325
+ if request.previous_input_messages and request.previous_response_id:
326
+ return self.create_error_response(
327
+ err_type="invalid_request_error",
328
+ message="Only one of `previous_input_messages` and "
329
+ "`previous_response_id` can be set.",
330
+ status_code=HTTPStatus.BAD_REQUEST,
331
+ param="previous_response_id",
332
+ )
333
+ return None
334
+
335
+ async def create_responses(
336
+ self,
337
+ request: ResponsesRequest,
338
+ raw_request: Request | None = None,
339
+ ) -> (
340
+ AsyncGenerator[StreamingResponsesResponse, None]
341
+ | ResponsesResponse
342
+ | ErrorResponse
343
+ ):
344
+ error_check_ret = await self._check_model(request)
345
+ if error_check_ret is not None:
346
+ logger.error("Error with model %s", error_check_ret)
347
+ return error_check_ret
348
+ maybe_validation_error = self._validate_create_responses_input(request)
349
+ if maybe_validation_error is not None:
350
+ return maybe_validation_error
351
+
352
+ # If the engine is dead, raise the engine's DEAD_ERROR.
353
+ # This is required for the streaming case, where we return a
354
+ # success status before we actually start generating text :).
355
+ if self.engine_client.errored:
356
+ raise self.engine_client.dead_error
357
+
358
+ if request.store and not self.enable_store:
359
+ # Disable the store option.
360
+ # NOTE(woosuk): Although returning an error is possible, we opted
361
+ # to implicitly disable store and process the request anyway, as
362
+ # we assume most users do not intend to actually store the response
363
+ # (i.e., their request's `store=True` just because it's the default
364
+ # value).
365
+ request.store = False
366
+
367
+ # Handle the previous response ID.
368
+ prev_response_id = request.previous_response_id
369
+ if prev_response_id is not None:
370
+ async with self.response_store_lock:
371
+ prev_response = self.response_store.get(prev_response_id)
372
+ if prev_response is None:
373
+ return self._make_not_found_error(prev_response_id)
374
+ else:
375
+ prev_response = None
376
+
377
+ try:
378
+ lora_request = self._maybe_get_adapters(request)
379
+ model_name = self.models.model_name(lora_request)
380
+ tokenizer = await self.engine_client.get_tokenizer()
381
+
382
+ if self.use_harmony:
383
+ messages, engine_prompts = self._make_request_with_harmony(
384
+ request, prev_response
385
+ )
386
+ else:
387
+ messages, engine_prompts = await self._make_request(
388
+ request, prev_response, tokenizer
389
+ )
390
+
391
+ except (
392
+ ValueError,
393
+ TypeError,
394
+ RuntimeError,
395
+ jinja2.TemplateError,
396
+ NotImplementedError,
397
+ ) as e:
398
+ logger.exception("Error in preprocessing prompt inputs")
399
+ return self.create_error_response(e)
400
+
401
+ request_metadata = RequestResponseMetadata(request_id=request.request_id)
402
+ if raw_request:
403
+ raw_request.state.request_metadata = request_metadata
404
+
405
+ # Schedule the request and get the result generator.
406
+ generators: list[AsyncGenerator[ConversationContext, None]] = []
407
+
408
+ builtin_tool_list: list[str] = []
409
+ if self.tool_server is not None:
410
+ if self.tool_server.has_tool("browser"):
411
+ builtin_tool_list.append("browser")
412
+ if self.tool_server.has_tool("python"):
413
+ builtin_tool_list.append("python")
414
+ if self.tool_server.has_tool("container"):
415
+ builtin_tool_list.append("container")
416
+
417
+ if self.tool_server is not None:
418
+ available_tools = builtin_tool_list
419
+ else:
420
+ assert len(builtin_tool_list) == 0
421
+ available_tools = []
422
+ try:
423
+ for engine_prompt in engine_prompts:
424
+ maybe_error = self._validate_generator_input(engine_prompt)
425
+ if maybe_error is not None:
426
+ return maybe_error
427
+
428
+ default_max_tokens = self.max_model_len - len(
429
+ engine_prompt["prompt_token_ids"]
430
+ )
431
+
432
+ sampling_params = request.to_sampling_params(
433
+ default_max_tokens, self.default_sampling_params
434
+ )
435
+
436
+ trace_headers = (
437
+ None
438
+ if raw_request is None
439
+ else await self._get_trace_headers(raw_request.headers)
440
+ )
441
+
442
+ context: ConversationContext
443
+ if self.use_harmony:
444
+ if request.stream:
445
+ context = StreamingHarmonyContext(messages, available_tools)
446
+ else:
447
+ context = HarmonyContext(messages, available_tools)
448
+ else:
449
+ if envs.VLLM_USE_EXPERIMENTAL_PARSER_CONTEXT:
450
+ # This is a feature in development for parsing
451
+ # tokens during generation instead of at the end
452
+ context = ParsableContext(
453
+ response_messages=messages,
454
+ tokenizer=tokenizer,
455
+ reasoning_parser_cls=self.reasoning_parser,
456
+ request=request,
457
+ tool_parser_cls=self.tool_parser,
458
+ available_tools=available_tools,
459
+ chat_template=self.chat_template,
460
+ chat_template_content_format=self.chat_template_content_format,
461
+ )
462
+ else:
463
+ context = SimpleContext()
464
+
465
+ if self.reasoning_parser is not None:
466
+ reasoning_parser = self.reasoning_parser(tokenizer)
467
+ if sampling_params.structured_outputs is None:
468
+ sampling_params.structured_outputs = StructuredOutputsParams()
469
+ struct_out = sampling_params.structured_outputs
470
+ if struct_out.all_non_structural_tag_constraints_none():
471
+ sampling_params.structured_outputs.structural_tag = (
472
+ reasoning_parser.prepare_structured_tag(
473
+ sampling_params.structured_outputs.structural_tag,
474
+ self.tool_server,
475
+ )
476
+ )
477
+ generator = self._generate_with_builtin_tools(
478
+ request_id=request.request_id,
479
+ engine_prompt=engine_prompt,
480
+ sampling_params=sampling_params,
481
+ context=context,
482
+ lora_request=lora_request,
483
+ priority=request.priority,
484
+ trace_headers=trace_headers,
485
+ )
486
+ generators.append(generator)
487
+ except ValueError as e:
488
+ return self.create_error_response(e)
489
+
490
+ assert len(generators) == 1
491
+ (result_generator,) = generators
492
+
493
+ # Store the input messages.
494
+ if request.store:
495
+ self.msg_store[request.request_id] = messages
496
+
497
+ if request.background:
498
+ created_time = int(time.time())
499
+ response = ResponsesResponse.from_request(
500
+ request,
501
+ sampling_params,
502
+ model_name=model_name,
503
+ created_time=created_time,
504
+ output=[],
505
+ status="queued",
506
+ usage=None,
507
+ )
508
+ async with self.response_store_lock:
509
+ self.response_store[response.id] = response
510
+
511
+ # Run the request in the background.
512
+ if request.stream:
513
+ task = asyncio.create_task(
514
+ self._run_background_request_stream(
515
+ request,
516
+ sampling_params,
517
+ result_generator,
518
+ context,
519
+ model_name,
520
+ tokenizer,
521
+ request_metadata,
522
+ created_time,
523
+ ),
524
+ name=f"create_{request.request_id}",
525
+ )
526
+ else:
527
+ task = asyncio.create_task(
528
+ self._run_background_request(
529
+ request,
530
+ sampling_params,
531
+ result_generator,
532
+ context,
533
+ model_name,
534
+ tokenizer,
535
+ request_metadata,
536
+ created_time,
537
+ ),
538
+ name=f"create_{response.id}",
539
+ )
540
+
541
+ # For cleanup.
542
+ response_id = response.id
543
+ self.background_tasks[response_id] = task
544
+ task.add_done_callback(
545
+ lambda _: self.background_tasks.pop(response_id, None)
546
+ )
547
+
548
+ if request.stream:
549
+ return self.responses_background_stream_generator(request.request_id)
550
+ return response
551
+
552
+ if request.stream:
553
+ return self.responses_stream_generator(
554
+ request,
555
+ sampling_params,
556
+ result_generator,
557
+ context,
558
+ model_name,
559
+ tokenizer,
560
+ request_metadata,
561
+ )
562
+
563
+ try:
564
+ return await self.responses_full_generator(
565
+ request,
566
+ sampling_params,
567
+ result_generator,
568
+ context,
569
+ model_name,
570
+ tokenizer,
571
+ request_metadata,
572
+ )
573
+ except GenerationError as e:
574
+ return self._convert_generation_error_to_response(e)
575
+ except Exception as e:
576
+ return self.create_error_response(e)
577
+
578
+ async def _make_request(
579
+ self,
580
+ request: ResponsesRequest,
581
+ prev_response: ResponsesResponse | None,
582
+ tokenizer: TokenizerLike,
583
+ ):
584
+ tool_dicts = construct_tool_dicts(request.tools, request.tool_choice)
585
+ # Construct the input messages.
586
+ messages = construct_input_messages(
587
+ request_instructions=request.instructions,
588
+ request_input=request.input,
589
+ prev_msg=self.msg_store.get(prev_response.id) if prev_response else None,
590
+ prev_response_output=prev_response.output if prev_response else None,
591
+ )
592
+
593
+ chat_template_kwargs = dict(
594
+ reasoning_effort=None
595
+ if request.reasoning is None
596
+ else request.reasoning.effort
597
+ )
598
+
599
+ _, engine_prompts = await self._preprocess_chat(
600
+ request,
601
+ tokenizer,
602
+ messages,
603
+ tool_dicts=tool_dicts,
604
+ tool_parser=self.tool_parser,
605
+ chat_template=self.chat_template,
606
+ chat_template_content_format=self.chat_template_content_format,
607
+ chat_template_kwargs=chat_template_kwargs,
608
+ )
609
+ return messages, engine_prompts
610
+
611
+ def _make_request_with_harmony(
612
+ self,
613
+ request: ResponsesRequest,
614
+ prev_response: ResponsesResponse | None,
615
+ ):
616
+ if request.tool_choice != "auto":
617
+ raise NotImplementedError(
618
+ "Only 'auto' tool_choice is supported in response API with Harmony"
619
+ )
620
+ messages = self._construct_input_messages_with_harmony(request, prev_response)
621
+ prompt_token_ids = render_for_completion(messages)
622
+ engine_prompt = TokensPrompt(prompt_token_ids=prompt_token_ids)
623
+
624
+ # Add cache_salt if provided in the request
625
+ if request.cache_salt is not None:
626
+ engine_prompt["cache_salt"] = request.cache_salt
627
+
628
+ return messages, [engine_prompt]
629
+
630
+ async def _initialize_tool_sessions(
631
+ self,
632
+ request: ResponsesRequest,
633
+ context: ConversationContext,
634
+ exit_stack: AsyncExitStack,
635
+ ):
636
+ # we should only initialize the tool session if the request needs tools
637
+ if len(request.tools) == 0:
638
+ return
639
+ mcp_tools = {
640
+ tool.server_label: tool for tool in request.tools if tool.type == "mcp"
641
+ }
642
+ await context.init_tool_sessions(
643
+ self.tool_server, exit_stack, request.request_id, mcp_tools
644
+ )
645
+
646
+ async def responses_full_generator(
647
+ self,
648
+ request: ResponsesRequest,
649
+ sampling_params: SamplingParams,
650
+ result_generator: AsyncIterator[ConversationContext],
651
+ context: ConversationContext,
652
+ model_name: str,
653
+ tokenizer: TokenizerLike,
654
+ request_metadata: RequestResponseMetadata,
655
+ created_time: int | None = None,
656
+ ) -> ErrorResponse | ResponsesResponse:
657
+ if created_time is None:
658
+ created_time = int(time.time())
659
+
660
+ async with AsyncExitStack() as exit_stack:
661
+ try:
662
+ await self._initialize_tool_sessions(request, context, exit_stack)
663
+ async for _ in result_generator:
664
+ pass
665
+ except asyncio.CancelledError:
666
+ return self.create_error_response("Client disconnected")
667
+ except ValueError as e:
668
+ return self.create_error_response(e)
669
+
670
+ # NOTE: Implementation of stauts is still WIP, but for now
671
+ # we guarantee that if the status is not "completed", it is accurate.
672
+ # "completed" is implemented as the "catch-all" for now.
673
+ status: ResponseStatus = "completed"
674
+
675
+ input_messages: ResponseInputOutputMessage | None = None
676
+ output_messages: ResponseInputOutputMessage | None = None
677
+ if self.use_harmony:
678
+ assert isinstance(context, HarmonyContext)
679
+ output = self._make_response_output_items_with_harmony(context)
680
+ if request.enable_response_messages:
681
+ input_messages = context.messages[: context.num_init_messages]
682
+ output_messages = context.messages[context.num_init_messages :]
683
+ num_tool_output_tokens = context.num_tool_output_tokens
684
+ if len(output) > 0:
685
+ if context.finish_reason == "length":
686
+ status = "incomplete"
687
+ elif context.finish_reason == "abort":
688
+ status = "cancelled"
689
+ else:
690
+ self._raise_if_error(context.finish_reason, request.request_id)
691
+ else:
692
+ status = "incomplete"
693
+ elif isinstance(context, ParsableContext):
694
+ output = context.parser.make_response_output_items_from_parsable_context()
695
+
696
+ if request.enable_response_messages:
697
+ input_messages = context.input_messages
698
+ output_messages = context.output_messages
699
+
700
+ # TODO: Calculate usage.
701
+ # assert final_res.prompt_token_ids is not None
702
+ num_tool_output_tokens = 0
703
+
704
+ # Check finish reason from the parser
705
+ if context.parser.finish_reason == "length":
706
+ status = "incomplete"
707
+ else:
708
+ assert isinstance(context, SimpleContext)
709
+ # Use final_output which has accumulated text/token_ids/logprobs
710
+ final_res = context.final_output
711
+ assert final_res is not None
712
+ assert len(final_res.outputs) == 1
713
+ final_output = final_res.outputs[0]
714
+
715
+ # finish_reason='error' indicates retryable internal error
716
+ self._raise_if_error(final_output.finish_reason, request.request_id)
717
+
718
+ # Check if generation was stopped due to max_tokens
719
+ if final_output.finish_reason == "length":
720
+ status = "incomplete"
721
+
722
+ output = self._make_response_output_items(request, final_output, tokenizer)
723
+
724
+ if request.enable_response_messages:
725
+ input_messages = context.input_messages
726
+ output_messages = context.output_messages
727
+
728
+ # Calculate usage.
729
+ assert final_res.prompt_token_ids is not None
730
+ num_tool_output_tokens = 0
731
+
732
+ assert isinstance(context, (SimpleContext, HarmonyContext, ParsableContext))
733
+ num_prompt_tokens = context.num_prompt_tokens
734
+ num_generated_tokens = context.num_output_tokens
735
+ num_cached_tokens = context.num_cached_tokens
736
+ num_reasoning_tokens = context.num_reasoning_tokens
737
+
738
+ usage = ResponseUsage(
739
+ input_tokens=num_prompt_tokens,
740
+ output_tokens=num_generated_tokens,
741
+ total_tokens=num_prompt_tokens + num_generated_tokens,
742
+ input_tokens_details=InputTokensDetails(
743
+ cached_tokens=num_cached_tokens,
744
+ input_tokens_per_turn=[
745
+ turn.input_tokens for turn in context.all_turn_metrics
746
+ ],
747
+ cached_tokens_per_turn=[
748
+ turn.cached_input_tokens for turn in context.all_turn_metrics
749
+ ],
750
+ ),
751
+ output_tokens_details=OutputTokensDetails(
752
+ reasoning_tokens=num_reasoning_tokens,
753
+ tool_output_tokens=num_tool_output_tokens,
754
+ output_tokens_per_turn=[
755
+ turn.output_tokens for turn in context.all_turn_metrics
756
+ ],
757
+ tool_output_tokens_per_turn=[
758
+ turn.tool_output_tokens for turn in context.all_turn_metrics
759
+ ],
760
+ ),
761
+ )
762
+ response = ResponsesResponse.from_request(
763
+ request,
764
+ sampling_params,
765
+ input_messages=input_messages,
766
+ output_messages=output_messages,
767
+ model_name=model_name,
768
+ created_time=created_time,
769
+ output=output,
770
+ status=status,
771
+ usage=usage,
772
+ )
773
+
774
+ if request.store:
775
+ async with self.response_store_lock:
776
+ stored_response = self.response_store.get(response.id)
777
+ # If the response is already cancelled, don't update it.
778
+ if stored_response is None or stored_response.status != "cancelled":
779
+ self.response_store[response.id] = response
780
+ return response
781
+
782
+ def _is_mcp_tool_by_namespace(self, recipient: str | None) -> bool:
783
+ """
784
+ Determine if a tool call is an MCP tool based on recipient prefix.
785
+
786
+ - Tools starting with "functions." are function calls
787
+ - Everything else is an MCP tool
788
+ """
789
+ if recipient is None:
790
+ return False
791
+
792
+ # Function calls have "functions." prefix
793
+ # Everything else is an MCP tool
794
+ return not recipient.startswith("functions.")
795
+
796
+ _TOOL_NAME_TO_MCP_SERVER_LABEL: Final[dict[str, str]] = {
797
+ "python": "code_interpreter",
798
+ "container": "container",
799
+ "browser": "web_search_preview",
800
+ }
801
+
802
+ def _topk_logprobs(
803
+ self,
804
+ logprobs: dict[int, SampleLogprob],
805
+ top_logprobs: int,
806
+ tokenizer: TokenizerLike,
807
+ ) -> list[LogprobTopLogprob]:
808
+ """Returns the top-k logprobs from the logprobs dictionary."""
809
+ out = []
810
+ for i, (token_id, _logprob) in enumerate(logprobs.items()):
811
+ if i >= top_logprobs:
812
+ break
813
+ text = (
814
+ _logprob.decoded_token
815
+ if _logprob.decoded_token is not None
816
+ else tokenizer.decode([token_id])
817
+ )
818
+ out.append(
819
+ LogprobTopLogprob(
820
+ token=text,
821
+ logprob=max(_logprob.logprob, -9999.0),
822
+ bytes=list(text.encode("utf-8", errors="replace")),
823
+ )
824
+ )
825
+ return out
826
+
827
+ def _create_response_logprobs(
828
+ self,
829
+ token_ids: Sequence[int],
830
+ logprobs: SampleLogprobs | None,
831
+ tokenizer: TokenizerLike,
832
+ top_logprobs: int | None = None,
833
+ ) -> list[Logprob]:
834
+ assert logprobs is not None, "logprobs must be provided"
835
+ assert len(token_ids) == len(logprobs), (
836
+ "token_ids and logprobs.token_ids must have the same length"
837
+ )
838
+ out = []
839
+ for i, token_id in enumerate(token_ids):
840
+ logprob = logprobs[i]
841
+ token_logprob = logprob[token_id]
842
+ text = (
843
+ token_logprob.decoded_token
844
+ if token_logprob.decoded_token is not None
845
+ else tokenizer.decode([token_id])
846
+ )
847
+ out.append(
848
+ Logprob(
849
+ token=text,
850
+ logprob=max(token_logprob.logprob, -9999.0),
851
+ bytes=list(text.encode("utf-8", errors="replace")),
852
+ top_logprobs=(
853
+ self._topk_logprobs(
854
+ logprob, top_logprobs=top_logprobs, tokenizer=tokenizer
855
+ )
856
+ if top_logprobs
857
+ else []
858
+ ),
859
+ )
860
+ )
861
+ return out
862
+
863
+ def _create_stream_response_logprobs(
864
+ self,
865
+ token_ids: Sequence[int],
866
+ logprobs: SampleLogprobs | None,
867
+ tokenizer: TokenizerLike,
868
+ top_logprobs: int | None = None,
869
+ ) -> list[response_text_delta_event.Logprob]:
870
+ lgs = self._create_response_logprobs(
871
+ token_ids=token_ids,
872
+ logprobs=logprobs,
873
+ tokenizer=tokenizer,
874
+ top_logprobs=top_logprobs,
875
+ )
876
+ return [
877
+ response_text_delta_event.Logprob(
878
+ token=lg.token,
879
+ logprob=lg.logprob,
880
+ top_logprobs=[
881
+ response_text_delta_event.LogprobTopLogprob(
882
+ token=tl.token, logprob=tl.logprob
883
+ )
884
+ for tl in lg.top_logprobs
885
+ ],
886
+ )
887
+ for lg in lgs
888
+ ]
889
+
890
+ def _make_response_output_items(
891
+ self,
892
+ request: ResponsesRequest,
893
+ final_output: CompletionOutput,
894
+ tokenizer: TokenizerLike,
895
+ ) -> list[ResponseOutputItem]:
896
+ if self.reasoning_parser:
897
+ try:
898
+ reasoning_parser = self.reasoning_parser(tokenizer)
899
+ except RuntimeError as e:
900
+ logger.exception("Error in reasoning parser creation.")
901
+ raise e
902
+
903
+ reasoning, content = reasoning_parser.extract_reasoning(
904
+ final_output.text, request=request
905
+ )
906
+ else:
907
+ reasoning = None
908
+ content = final_output.text
909
+
910
+ # Log complete response if output logging is enabled
911
+ if self.enable_log_outputs and self.request_logger:
912
+ output_text = ""
913
+ if content:
914
+ output_text = content
915
+ elif reasoning:
916
+ output_text = f"[reasoning: {reasoning}]"
917
+
918
+ if output_text:
919
+ self.request_logger.log_outputs(
920
+ request_id=request.request_id,
921
+ outputs=output_text,
922
+ output_token_ids=final_output.token_ids,
923
+ finish_reason=final_output.finish_reason,
924
+ is_streaming=False,
925
+ delta=False,
926
+ )
927
+
928
+ reasoning_item = None
929
+ message_item = None
930
+ if reasoning:
931
+ reasoning_item = ResponseReasoningItem(
932
+ id=f"rs_{random_uuid()}",
933
+ summary=[],
934
+ type="reasoning",
935
+ content=[
936
+ ResponseReasoningTextContent(text=reasoning, type="reasoning_text")
937
+ ],
938
+ status=None, # NOTE: Only the last output item has status.
939
+ )
940
+ tool_calls, content = self._parse_tool_calls_from_content(
941
+ request=request,
942
+ tokenizer=tokenizer,
943
+ content=content,
944
+ enable_auto_tools=self.enable_auto_tools,
945
+ tool_parser_cls=self.tool_parser,
946
+ )
947
+ if content:
948
+ output_text = ResponseOutputText(
949
+ text=content,
950
+ annotations=[], # TODO
951
+ type="output_text",
952
+ logprobs=(
953
+ self._create_response_logprobs(
954
+ token_ids=final_output.token_ids,
955
+ logprobs=final_output.logprobs,
956
+ tokenizer=tokenizer,
957
+ top_logprobs=request.top_logprobs,
958
+ )
959
+ if request.is_include_output_logprobs()
960
+ else None
961
+ ),
962
+ )
963
+ message_item = ResponseOutputMessage(
964
+ id=f"msg_{random_uuid()}",
965
+ content=[output_text],
966
+ role="assistant",
967
+ status="completed",
968
+ type="message",
969
+ )
970
+ outputs = []
971
+
972
+ if reasoning_item:
973
+ outputs.append(reasoning_item)
974
+ if message_item:
975
+ outputs.append(message_item)
976
+ if tool_calls:
977
+ tool_call_items = [
978
+ ResponseFunctionToolCall(
979
+ id=f"fc_{random_uuid()}",
980
+ call_id=f"call_{random_uuid()}",
981
+ type="function_call",
982
+ status="completed",
983
+ name=tool_call.name,
984
+ arguments=tool_call.arguments,
985
+ )
986
+ for tool_call in tool_calls
987
+ ]
988
+ outputs.extend(tool_call_items)
989
+ return outputs
990
+
991
+ def _make_response_output_items_with_harmony(
992
+ self,
993
+ context: HarmonyContext,
994
+ ) -> list[ResponseOutputItem]:
995
+ output_items: list[ResponseOutputItem] = []
996
+ num_init_messages = context.num_init_messages
997
+ for msg in context.messages[num_init_messages:]:
998
+ output_items.extend(parse_output_message(msg))
999
+ # Handle the generation stopped in the middle (if any).
1000
+ last_items = parse_remaining_state(context.parser)
1001
+ if last_items:
1002
+ output_items.extend(last_items)
1003
+ return output_items
1004
+
1005
+ def _extract_system_message_from_request(self, request) -> str | None:
1006
+ system_msg = None
1007
+ if not isinstance(request.input, str):
1008
+ for response_msg in request.input:
1009
+ if (
1010
+ isinstance(response_msg, dict)
1011
+ and response_msg.get("role") == "system"
1012
+ ):
1013
+ system_msg = response_msg.get("content")
1014
+ break
1015
+ return system_msg
1016
+
1017
+ def _construct_harmony_system_input_message(
1018
+ self, request: ResponsesRequest, with_custom_tools: bool, tool_types: set[str]
1019
+ ) -> OpenAIHarmonyMessage:
1020
+ model_identity = self._extract_system_message_from_request(request)
1021
+
1022
+ reasoning_effort = request.reasoning.effort if request.reasoning else None
1023
+
1024
+ # Extract allowed_tools from MCP tool requests
1025
+ allowed_tools_map = _extract_allowed_tools_from_mcp_requests(request.tools)
1026
+
1027
+ # Get filtered tool descriptions first.
1028
+ # If get_tool_description returns None (due to filtering), the tool is disabled.
1029
+ browser_description = (
1030
+ self.tool_server.get_tool_description(
1031
+ "browser", allowed_tools_map.get("web_search_preview")
1032
+ )
1033
+ if "web_search_preview" in tool_types
1034
+ and self.tool_server is not None
1035
+ and self.tool_server.has_tool("browser")
1036
+ else None
1037
+ )
1038
+ python_description = (
1039
+ self.tool_server.get_tool_description(
1040
+ "python", allowed_tools_map.get("code_interpreter")
1041
+ )
1042
+ if "code_interpreter" in tool_types
1043
+ and self.tool_server is not None
1044
+ and self.tool_server.has_tool("python")
1045
+ else None
1046
+ )
1047
+ container_description = (
1048
+ self.tool_server.get_tool_description(
1049
+ "container", allowed_tools_map.get("container")
1050
+ )
1051
+ if "container" in tool_types
1052
+ and self.tool_server is not None
1053
+ and self.tool_server.has_tool("container")
1054
+ else None
1055
+ )
1056
+
1057
+ sys_msg = get_system_message(
1058
+ model_identity=model_identity,
1059
+ reasoning_effort=reasoning_effort,
1060
+ browser_description=browser_description,
1061
+ python_description=python_description,
1062
+ container_description=container_description,
1063
+ instructions=request.instructions,
1064
+ with_custom_tools=with_custom_tools,
1065
+ )
1066
+ return sys_msg
1067
+
1068
+ def _construct_input_messages_with_harmony(
1069
+ self,
1070
+ request: ResponsesRequest,
1071
+ prev_response: ResponsesResponse | None,
1072
+ ) -> list[OpenAIHarmonyMessage]:
1073
+ messages: list[OpenAIHarmonyMessage] = []
1074
+ if prev_response is None:
1075
+ # New conversation.
1076
+ tool_types = extract_tool_types(request.tools)
1077
+ with_custom_tools = has_custom_tools(tool_types)
1078
+
1079
+ sys_msg = self._construct_harmony_system_input_message(
1080
+ request, with_custom_tools, tool_types
1081
+ )
1082
+ messages.append(sys_msg)
1083
+ if with_custom_tools:
1084
+ dev_msg = get_developer_message(
1085
+ instructions=request.instructions, tools=request.tools
1086
+ )
1087
+ messages.append(dev_msg)
1088
+ messages += construct_harmony_previous_input_messages(request)
1089
+
1090
+ else:
1091
+ # Continue the previous conversation.
1092
+ # FIXME(woosuk): Currently, request params like reasoning and
1093
+ # instructions are ignored.
1094
+ prev_msgs = self.msg_store[prev_response.id]
1095
+ # Remove the previous chain-of-thoughts if there is a new "final"
1096
+ # message. Note that this also removes these messages from the
1097
+ # msg_store.
1098
+ if len(prev_msgs) > 0:
1099
+ last_msg = prev_msgs[-1]
1100
+ assert isinstance(last_msg, OpenAIHarmonyMessage)
1101
+ if last_msg.channel == "final":
1102
+ prev_final_msg_idx = -1
1103
+ for i in range(len(prev_msgs) - 2, -1, -1):
1104
+ prev_msg_i = prev_msgs[i]
1105
+ assert isinstance(prev_msg_i, OpenAIHarmonyMessage)
1106
+ if prev_msg_i.channel == "final":
1107
+ prev_final_msg_idx = i
1108
+ break
1109
+ recent_turn_msgs = prev_msgs[prev_final_msg_idx + 1 :]
1110
+ del prev_msgs[prev_final_msg_idx + 1 :]
1111
+ for msg in recent_turn_msgs:
1112
+ assert isinstance(msg, OpenAIHarmonyMessage)
1113
+ prev_msgs.append(msg)
1114
+ messages.extend(prev_msgs)
1115
+ # Append the new input.
1116
+ # Responses API supports simple text inputs without chat format.
1117
+ if isinstance(request.input, str):
1118
+ messages.append(get_user_message(request.input))
1119
+ else:
1120
+ if prev_response is not None:
1121
+ prev_outputs = copy(prev_response.output)
1122
+ else:
1123
+ prev_outputs = []
1124
+ for response_msg in request.input:
1125
+ new_msg = parse_response_input(response_msg, prev_outputs)
1126
+ if new_msg.author.role != "system":
1127
+ messages.append(new_msg)
1128
+
1129
+ # User passes in a tool call request and its output. We need
1130
+ # to add the tool call request to prev_outputs so that the
1131
+ # parse_response_input can find the tool call request when
1132
+ # parsing the tool call output.
1133
+ if isinstance(response_msg, ResponseFunctionToolCall):
1134
+ prev_outputs.append(response_msg)
1135
+ return messages
1136
+
1137
+ async def _run_background_request_stream(
1138
+ self,
1139
+ request: ResponsesRequest,
1140
+ *args,
1141
+ **kwargs,
1142
+ ):
1143
+ event_deque: deque[StreamingResponsesResponse] = deque()
1144
+ new_event_signal = asyncio.Event()
1145
+ self.event_store[request.request_id] = (event_deque, new_event_signal)
1146
+ response = None
1147
+ try:
1148
+ generator = self.responses_stream_generator(request, *args, **kwargs)
1149
+ async for event in generator:
1150
+ event_deque.append(event)
1151
+ new_event_signal.set() # Signal new event available
1152
+ except GenerationError as e:
1153
+ response = self._convert_generation_error_to_response(e)
1154
+ except Exception as e:
1155
+ logger.exception("Background request failed for %s", request.request_id)
1156
+ response = self.create_error_response(e)
1157
+ finally:
1158
+ new_event_signal.set()
1159
+
1160
+ if response is not None and isinstance(response, ErrorResponse):
1161
+ # If the request has failed, update the status to "failed".
1162
+ response_id = request.request_id
1163
+ async with self.response_store_lock:
1164
+ stored_response = self.response_store.get(response_id)
1165
+ assert stored_response is not None
1166
+ if stored_response.status not in ("completed", "cancelled"):
1167
+ stored_response.status = "failed"
1168
+
1169
+ async def _run_background_request(
1170
+ self,
1171
+ request: ResponsesRequest,
1172
+ *args,
1173
+ **kwargs,
1174
+ ):
1175
+ try:
1176
+ response = await self.responses_full_generator(request, *args, **kwargs)
1177
+ except GenerationError as e:
1178
+ response = self._convert_generation_error_to_response(e)
1179
+ except Exception as e:
1180
+ logger.exception("Background request failed for %s", request.request_id)
1181
+ response = self.create_error_response(e)
1182
+
1183
+ if isinstance(response, ErrorResponse):
1184
+ # If the request has failed, update the status to "failed".
1185
+ response_id = request.request_id
1186
+ async with self.response_store_lock:
1187
+ stored_response = self.response_store.get(response_id)
1188
+ assert stored_response is not None
1189
+ if stored_response.status not in ("completed", "cancelled"):
1190
+ stored_response.status = "failed"
1191
+
1192
+ async def responses_background_stream_generator(
1193
+ self,
1194
+ response_id: str,
1195
+ starting_after: int | None = None,
1196
+ ) -> AsyncGenerator[StreamingResponsesResponse, None]:
1197
+ if response_id not in self.event_store:
1198
+ raise VLLMValidationError(
1199
+ f"Unknown response_id: {response_id}",
1200
+ parameter="response_id",
1201
+ value=response_id,
1202
+ )
1203
+
1204
+ event_deque, new_event_signal = self.event_store[response_id]
1205
+ start_index = 0 if starting_after is None else starting_after + 1
1206
+ current_index = start_index
1207
+
1208
+ while True:
1209
+ new_event_signal.clear()
1210
+
1211
+ # Yield existing events from start_index
1212
+ while current_index < len(event_deque):
1213
+ event = event_deque[current_index]
1214
+ yield event
1215
+ if getattr(event, "type", "unknown") == "response.completed":
1216
+ return
1217
+ current_index += 1
1218
+
1219
+ await new_event_signal.wait()
1220
+
1221
+ async def retrieve_responses(
1222
+ self,
1223
+ response_id: str,
1224
+ starting_after: int | None,
1225
+ stream: bool | None,
1226
+ ) -> (
1227
+ ErrorResponse
1228
+ | ResponsesResponse
1229
+ | AsyncGenerator[StreamingResponsesResponse, None]
1230
+ ):
1231
+ async with self.response_store_lock:
1232
+ response = self.response_store.get(response_id)
1233
+
1234
+ if response is None:
1235
+ return self._make_not_found_error(response_id)
1236
+
1237
+ if stream:
1238
+ return self.responses_background_stream_generator(
1239
+ response_id,
1240
+ starting_after,
1241
+ )
1242
+ return response
1243
+
1244
+ async def cancel_responses(
1245
+ self,
1246
+ response_id: str,
1247
+ ) -> ErrorResponse | ResponsesResponse:
1248
+ async with self.response_store_lock:
1249
+ response = self.response_store.get(response_id)
1250
+ if response is None:
1251
+ return self._make_not_found_error(response_id)
1252
+
1253
+ prev_status = response.status
1254
+ if prev_status not in ("queued", "in_progress"):
1255
+ return self.create_error_response(
1256
+ err_type="invalid_request_error",
1257
+ message="Cannot cancel a synchronous response.",
1258
+ param="response_id",
1259
+ )
1260
+
1261
+ # Update the status to "cancelled".
1262
+ response.status = "cancelled"
1263
+
1264
+ # Abort the request.
1265
+ if task := self.background_tasks.get(response_id):
1266
+ task.cancel()
1267
+ try:
1268
+ await task
1269
+ except asyncio.CancelledError:
1270
+ logger.exception("Background task for %s was cancelled", response_id)
1271
+ return response
1272
+
1273
+ def _make_not_found_error(self, response_id: str) -> ErrorResponse:
1274
+ return self.create_error_response(
1275
+ err_type="invalid_request_error",
1276
+ message=f"Response with id '{response_id}' not found.",
1277
+ status_code=HTTPStatus.NOT_FOUND,
1278
+ param="response_id",
1279
+ )
1280
+
1281
+ def _make_store_not_supported_error(self) -> ErrorResponse:
1282
+ return self.create_error_response(
1283
+ err_type="invalid_request_error",
1284
+ message=(
1285
+ "`store=True` (default) is not supported. Please set "
1286
+ "`store=False` in Responses API or set "
1287
+ "`VLLM_ENABLE_RESPONSES_API_STORE=1` in the env var when "
1288
+ "starting the vLLM server."
1289
+ ),
1290
+ status_code=HTTPStatus.BAD_REQUEST,
1291
+ param="store",
1292
+ )
1293
+
1294
+ async def _process_simple_streaming_events(
1295
+ self,
1296
+ request: ResponsesRequest,
1297
+ sampling_params: SamplingParams,
1298
+ result_generator: AsyncIterator[ConversationContext | None],
1299
+ context: ConversationContext,
1300
+ model_name: str,
1301
+ tokenizer: TokenizerLike,
1302
+ request_metadata: RequestResponseMetadata,
1303
+ created_time: int,
1304
+ _increment_sequence_number_and_return: Callable[
1305
+ [StreamingResponsesResponse], StreamingResponsesResponse
1306
+ ],
1307
+ ) -> AsyncGenerator[StreamingResponsesResponse, None]:
1308
+ current_content_index = 0
1309
+ current_output_index = 0
1310
+ current_item_id = ""
1311
+ reasoning_parser = None
1312
+ if self.reasoning_parser:
1313
+ reasoning_parser = self.reasoning_parser(tokenizer)
1314
+ previous_text = ""
1315
+ previous_token_ids: list[int] = []
1316
+ first_delta_sent = False
1317
+ previous_delta_messages: list[DeltaMessage] = []
1318
+ async for ctx in result_generator:
1319
+ assert isinstance(ctx, SimpleContext)
1320
+ if ctx.last_output is None:
1321
+ continue
1322
+ if ctx.last_output.outputs:
1323
+ output = ctx.last_output.outputs[0]
1324
+ # finish_reason='error' indicates a retryable error
1325
+ self._raise_if_error(output.finish_reason, request.request_id)
1326
+ if reasoning_parser:
1327
+ delta_message = reasoning_parser.extract_reasoning_streaming(
1328
+ previous_text=previous_text,
1329
+ current_text=previous_text + output.text,
1330
+ delta_text=output.text,
1331
+ previous_token_ids=previous_token_ids,
1332
+ current_token_ids=previous_token_ids + output.token_ids,
1333
+ delta_token_ids=output.token_ids,
1334
+ )
1335
+ else:
1336
+ delta_message = DeltaMessage(
1337
+ content=output.text,
1338
+ )
1339
+ previous_text += output.text
1340
+ previous_token_ids += output.token_ids
1341
+ if not delta_message:
1342
+ continue
1343
+ if not first_delta_sent:
1344
+ current_item_id = str(uuid.uuid4())
1345
+ if delta_message.reasoning:
1346
+ yield _increment_sequence_number_and_return(
1347
+ ResponseOutputItemAddedEvent(
1348
+ type="response.output_item.added",
1349
+ sequence_number=-1,
1350
+ output_index=current_output_index,
1351
+ item=ResponseReasoningItem(
1352
+ type="reasoning",
1353
+ id=current_item_id,
1354
+ summary=[],
1355
+ status="in_progress",
1356
+ ),
1357
+ )
1358
+ )
1359
+ else:
1360
+ yield _increment_sequence_number_and_return(
1361
+ ResponseOutputItemAddedEvent(
1362
+ type="response.output_item.added",
1363
+ sequence_number=-1,
1364
+ output_index=current_output_index,
1365
+ item=ResponseOutputMessage(
1366
+ id=current_item_id,
1367
+ type="message",
1368
+ role="assistant",
1369
+ content=[],
1370
+ status="in_progress",
1371
+ ),
1372
+ )
1373
+ )
1374
+ yield _increment_sequence_number_and_return(
1375
+ ResponseContentPartAddedEvent(
1376
+ type="response.content_part.added",
1377
+ sequence_number=-1,
1378
+ output_index=current_output_index,
1379
+ item_id=current_item_id,
1380
+ content_index=current_content_index,
1381
+ part=ResponseOutputText(
1382
+ type="output_text",
1383
+ text="",
1384
+ annotations=[],
1385
+ logprobs=[],
1386
+ ),
1387
+ )
1388
+ )
1389
+ current_content_index += 1
1390
+ first_delta_sent = True
1391
+ # todo(kebe7jun) tool call support
1392
+
1393
+ # check delta message and previous delta message are
1394
+ # same as content or reasoning content
1395
+ if (
1396
+ previous_delta_messages
1397
+ and previous_delta_messages[-1].reasoning is not None
1398
+ and delta_message.content is not None
1399
+ ):
1400
+ # from reasoning to normal content, send done
1401
+ # event for reasoning
1402
+ reason_content = "".join(
1403
+ pm.reasoning
1404
+ for pm in previous_delta_messages
1405
+ if pm.reasoning is not None
1406
+ )
1407
+ yield _increment_sequence_number_and_return(
1408
+ ResponseReasoningTextDoneEvent(
1409
+ type="response.reasoning_text.done",
1410
+ item_id=current_item_id,
1411
+ sequence_number=-1,
1412
+ output_index=current_output_index,
1413
+ content_index=current_content_index,
1414
+ text=reason_content,
1415
+ )
1416
+ )
1417
+ current_content_index = 0
1418
+ reasoning_item = ResponseReasoningItem(
1419
+ type="reasoning",
1420
+ content=[
1421
+ ResponseReasoningTextContent(
1422
+ text=reason_content,
1423
+ type="reasoning_text",
1424
+ ),
1425
+ ],
1426
+ status="completed",
1427
+ id=current_item_id,
1428
+ summary=[],
1429
+ )
1430
+ yield _increment_sequence_number_and_return(
1431
+ ResponseOutputItemDoneEvent(
1432
+ type="response.output_item.done",
1433
+ sequence_number=-1,
1434
+ output_index=current_output_index,
1435
+ item=reasoning_item,
1436
+ )
1437
+ )
1438
+ yield _increment_sequence_number_and_return(
1439
+ ResponseOutputItemAddedEvent(
1440
+ type="response.output_item.added",
1441
+ sequence_number=-1,
1442
+ output_index=current_output_index,
1443
+ item=ResponseOutputMessage(
1444
+ id=current_item_id,
1445
+ type="message",
1446
+ role="assistant",
1447
+ content=[],
1448
+ status="in_progress",
1449
+ ),
1450
+ )
1451
+ )
1452
+ current_output_index += 1
1453
+ current_item_id = str(uuid.uuid4())
1454
+ yield _increment_sequence_number_and_return(
1455
+ ResponseContentPartAddedEvent(
1456
+ type="response.content_part.added",
1457
+ sequence_number=-1,
1458
+ output_index=current_output_index,
1459
+ item_id=current_item_id,
1460
+ content_index=current_content_index,
1461
+ part=ResponseOutputText(
1462
+ type="output_text",
1463
+ text="",
1464
+ annotations=[],
1465
+ logprobs=[],
1466
+ ),
1467
+ )
1468
+ )
1469
+ current_content_index += 1
1470
+ # reset previous delta messages
1471
+ previous_delta_messages = []
1472
+
1473
+ if delta_message.reasoning is not None:
1474
+ yield _increment_sequence_number_and_return(
1475
+ ResponseReasoningTextDeltaEvent(
1476
+ type="response.reasoning_text.delta",
1477
+ sequence_number=-1,
1478
+ content_index=current_content_index,
1479
+ output_index=current_output_index,
1480
+ item_id=current_item_id,
1481
+ delta=delta_message.reasoning,
1482
+ )
1483
+ )
1484
+ elif delta_message.content is not None:
1485
+ yield _increment_sequence_number_and_return(
1486
+ ResponseTextDeltaEvent(
1487
+ type="response.output_text.delta",
1488
+ sequence_number=-1,
1489
+ content_index=current_content_index,
1490
+ output_index=current_output_index,
1491
+ item_id=current_item_id,
1492
+ delta=delta_message.content,
1493
+ logprobs=(
1494
+ self._create_stream_response_logprobs(
1495
+ token_ids=output.token_ids,
1496
+ logprobs=output.logprobs,
1497
+ tokenizer=tokenizer,
1498
+ top_logprobs=request.top_logprobs,
1499
+ )
1500
+ if request.is_include_output_logprobs()
1501
+ else []
1502
+ ),
1503
+ )
1504
+ )
1505
+ current_content_index += 1
1506
+
1507
+ previous_delta_messages.append(delta_message)
1508
+ if previous_delta_messages:
1509
+ if previous_delta_messages[-1].reasoning is not None:
1510
+ reason_content = "".join(
1511
+ pm.reasoning
1512
+ for pm in previous_delta_messages
1513
+ if pm.reasoning is not None
1514
+ )
1515
+ yield _increment_sequence_number_and_return(
1516
+ ResponseReasoningTextDoneEvent(
1517
+ type="response.reasoning_text.done",
1518
+ item_id=current_item_id,
1519
+ sequence_number=-1,
1520
+ output_index=current_output_index,
1521
+ content_index=current_content_index,
1522
+ text=reason_content,
1523
+ )
1524
+ )
1525
+ current_content_index += 1
1526
+ reasoning_item = ResponseReasoningItem(
1527
+ type="reasoning",
1528
+ content=[
1529
+ ResponseReasoningTextContent(
1530
+ text=reason_content,
1531
+ type="reasoning_text",
1532
+ ),
1533
+ ],
1534
+ status="completed",
1535
+ id=current_item_id,
1536
+ summary=[],
1537
+ )
1538
+ yield _increment_sequence_number_and_return(
1539
+ ResponseOutputItemDoneEvent(
1540
+ type="response.output_item.done",
1541
+ sequence_number=-1,
1542
+ output_index=current_output_index,
1543
+ item=reasoning_item,
1544
+ )
1545
+ )
1546
+ elif previous_delta_messages[-1].content is not None:
1547
+ final_content = "".join(
1548
+ pm.content
1549
+ for pm in previous_delta_messages
1550
+ if pm.content is not None
1551
+ )
1552
+ yield _increment_sequence_number_and_return(
1553
+ ResponseTextDoneEvent(
1554
+ type="response.output_text.done",
1555
+ sequence_number=-1,
1556
+ output_index=current_output_index,
1557
+ content_index=current_content_index,
1558
+ text=final_content,
1559
+ logprobs=[],
1560
+ item_id=current_item_id,
1561
+ )
1562
+ )
1563
+ current_content_index += 1
1564
+ part = ResponseOutputText(
1565
+ text=final_content,
1566
+ type="output_text",
1567
+ annotations=[],
1568
+ )
1569
+ yield _increment_sequence_number_and_return(
1570
+ ResponseContentPartDoneEvent(
1571
+ type="response.content_part.done",
1572
+ sequence_number=-1,
1573
+ item_id=current_item_id,
1574
+ output_index=current_output_index,
1575
+ content_index=current_content_index,
1576
+ part=part,
1577
+ )
1578
+ )
1579
+ current_content_index += 1
1580
+ item = ResponseOutputMessage(
1581
+ type="message",
1582
+ role="assistant",
1583
+ content=[
1584
+ part,
1585
+ ],
1586
+ status="completed",
1587
+ id=current_item_id,
1588
+ summary=[],
1589
+ )
1590
+ yield _increment_sequence_number_and_return(
1591
+ ResponseOutputItemDoneEvent(
1592
+ type="response.output_item.done",
1593
+ sequence_number=-1,
1594
+ output_index=current_output_index,
1595
+ item=item,
1596
+ )
1597
+ )
1598
+
1599
+ def _emit_function_call_done_events(
1600
+ self,
1601
+ previous_item,
1602
+ state: HarmonyStreamingState,
1603
+ ) -> list[StreamingResponsesResponse]:
1604
+ """Emit events when a function call completes."""
1605
+ function_name = previous_item.recipient[len("functions.") :]
1606
+ events = []
1607
+ events.append(
1608
+ ResponseFunctionCallArgumentsDoneEvent(
1609
+ type="response.function_call_arguments.done",
1610
+ arguments=previous_item.content[0].text,
1611
+ name=function_name,
1612
+ item_id=state.current_item_id,
1613
+ output_index=state.current_output_index,
1614
+ sequence_number=-1,
1615
+ )
1616
+ )
1617
+ function_call_item = ResponseFunctionToolCall(
1618
+ type="function_call",
1619
+ arguments=previous_item.content[0].text,
1620
+ name=function_name,
1621
+ item_id=state.current_item_id,
1622
+ output_index=state.current_output_index,
1623
+ sequence_number=-1,
1624
+ call_id=f"fc_{random_uuid()}",
1625
+ status="completed",
1626
+ )
1627
+ events.append(
1628
+ ResponseOutputItemDoneEvent(
1629
+ type="response.output_item.done",
1630
+ sequence_number=-1,
1631
+ output_index=state.current_output_index,
1632
+ item=function_call_item,
1633
+ )
1634
+ )
1635
+ return events
1636
+
1637
+ def _emit_mcp_call_done_events(
1638
+ self,
1639
+ previous_item,
1640
+ state: HarmonyStreamingState,
1641
+ ) -> list[StreamingResponsesResponse]:
1642
+ """Emit events when an MCP tool call completes."""
1643
+ server_label = self._TOOL_NAME_TO_MCP_SERVER_LABEL.get(
1644
+ previous_item.recipient, previous_item.recipient
1645
+ )
1646
+ events = []
1647
+ events.append(
1648
+ ResponseMcpCallArgumentsDoneEvent(
1649
+ type="response.mcp_call_arguments.done",
1650
+ arguments=previous_item.content[0].text,
1651
+ name=previous_item.recipient,
1652
+ item_id=state.current_item_id,
1653
+ output_index=state.current_output_index,
1654
+ sequence_number=-1,
1655
+ )
1656
+ )
1657
+ events.append(
1658
+ ResponseMcpCallCompletedEvent(
1659
+ type="response.mcp_call.completed",
1660
+ sequence_number=-1,
1661
+ output_index=state.current_output_index,
1662
+ item_id=state.current_item_id,
1663
+ )
1664
+ )
1665
+ events.append(
1666
+ ResponseOutputItemDoneEvent(
1667
+ type="response.output_item.done",
1668
+ sequence_number=-1,
1669
+ output_index=state.current_output_index,
1670
+ item=McpCall(
1671
+ type="mcp_call",
1672
+ arguments=previous_item.content[0].text,
1673
+ name=previous_item.recipient,
1674
+ id=state.current_item_id,
1675
+ server_label=server_label,
1676
+ status="completed",
1677
+ ),
1678
+ )
1679
+ )
1680
+ return events
1681
+
1682
+ def _emit_reasoning_done_events(
1683
+ self,
1684
+ previous_item,
1685
+ state: HarmonyStreamingState,
1686
+ ) -> list[StreamingResponsesResponse]:
1687
+ """Emit events when a reasoning (analysis) item completes."""
1688
+ content = ResponseReasoningTextContent(
1689
+ text=previous_item.content[0].text,
1690
+ type="reasoning_text",
1691
+ )
1692
+ reasoning_item = ResponseReasoningItem(
1693
+ type="reasoning",
1694
+ content=[content],
1695
+ status="completed",
1696
+ id=state.current_item_id,
1697
+ summary=[],
1698
+ )
1699
+ events = []
1700
+ events.append(
1701
+ ResponseReasoningTextDoneEvent(
1702
+ type="response.reasoning_text.done",
1703
+ item_id=state.current_item_id,
1704
+ sequence_number=-1,
1705
+ output_index=state.current_output_index,
1706
+ content_index=state.current_content_index,
1707
+ text=previous_item.content[0].text,
1708
+ )
1709
+ )
1710
+ events.append(
1711
+ ResponseReasoningPartDoneEvent(
1712
+ type="response.reasoning_part.done",
1713
+ sequence_number=-1,
1714
+ item_id=state.current_item_id,
1715
+ output_index=state.current_output_index,
1716
+ content_index=state.current_content_index,
1717
+ part=content,
1718
+ )
1719
+ )
1720
+ events.append(
1721
+ ResponseOutputItemDoneEvent(
1722
+ type="response.output_item.done",
1723
+ sequence_number=-1,
1724
+ output_index=state.current_output_index,
1725
+ item=reasoning_item,
1726
+ )
1727
+ )
1728
+ return events
1729
+
1730
+ def _emit_text_output_done_events(
1731
+ self,
1732
+ previous_item,
1733
+ state: HarmonyStreamingState,
1734
+ ) -> list[StreamingResponsesResponse]:
1735
+ """Emit events when a final text output item completes."""
1736
+ text_content = ResponseOutputText(
1737
+ type="output_text",
1738
+ text=previous_item.content[0].text,
1739
+ annotations=[],
1740
+ )
1741
+ events = []
1742
+ events.append(
1743
+ ResponseTextDoneEvent(
1744
+ type="response.output_text.done",
1745
+ sequence_number=-1,
1746
+ output_index=state.current_output_index,
1747
+ content_index=state.current_content_index,
1748
+ text=previous_item.content[0].text,
1749
+ logprobs=[],
1750
+ item_id=state.current_item_id,
1751
+ )
1752
+ )
1753
+ events.append(
1754
+ ResponseContentPartDoneEvent(
1755
+ type="response.content_part.done",
1756
+ sequence_number=-1,
1757
+ item_id=state.current_item_id,
1758
+ output_index=state.current_output_index,
1759
+ content_index=state.current_content_index,
1760
+ part=text_content,
1761
+ )
1762
+ )
1763
+ events.append(
1764
+ ResponseOutputItemDoneEvent(
1765
+ type="response.output_item.done",
1766
+ sequence_number=-1,
1767
+ output_index=state.current_output_index,
1768
+ item=ResponseOutputMessage(
1769
+ id=state.current_item_id,
1770
+ type="message",
1771
+ role="assistant",
1772
+ content=[text_content],
1773
+ status="completed",
1774
+ ),
1775
+ )
1776
+ )
1777
+ return events
1778
+
1779
+ def _emit_previous_item_done_events(
1780
+ self,
1781
+ previous_item,
1782
+ state: HarmonyStreamingState,
1783
+ ) -> list[StreamingResponsesResponse]:
1784
+ """Emit done events for the previous item when expecting a new start."""
1785
+ if previous_item.recipient is not None:
1786
+ # Deal with tool call
1787
+ if previous_item.recipient.startswith("functions."):
1788
+ return self._emit_function_call_done_events(previous_item, state)
1789
+ elif (
1790
+ self._is_mcp_tool_by_namespace(previous_item.recipient)
1791
+ and state.current_item_id is not None
1792
+ and state.current_item_id.startswith("mcp_")
1793
+ ):
1794
+ return self._emit_mcp_call_done_events(previous_item, state)
1795
+ elif previous_item.channel == "analysis":
1796
+ return self._emit_reasoning_done_events(previous_item, state)
1797
+ elif previous_item.channel == "final":
1798
+ return self._emit_text_output_done_events(previous_item, state)
1799
+ return []
1800
+
1801
+ def _emit_final_channel_delta_events(
1802
+ self,
1803
+ ctx: StreamingHarmonyContext,
1804
+ state: HarmonyStreamingState,
1805
+ ) -> list[StreamingResponsesResponse]:
1806
+ """Emit events for final channel text delta streaming."""
1807
+ events = []
1808
+ if not state.sent_output_item_added:
1809
+ state.sent_output_item_added = True
1810
+ state.current_item_id = f"msg_{random_uuid()}"
1811
+ events.append(
1812
+ ResponseOutputItemAddedEvent(
1813
+ type="response.output_item.added",
1814
+ sequence_number=-1,
1815
+ output_index=state.current_output_index,
1816
+ item=ResponseOutputMessage(
1817
+ id=state.current_item_id,
1818
+ type="message",
1819
+ role="assistant",
1820
+ content=[],
1821
+ status="in_progress",
1822
+ ),
1823
+ )
1824
+ )
1825
+ state.current_content_index += 1
1826
+ events.append(
1827
+ ResponseContentPartAddedEvent(
1828
+ type="response.content_part.added",
1829
+ sequence_number=-1,
1830
+ output_index=state.current_output_index,
1831
+ item_id=state.current_item_id,
1832
+ content_index=state.current_content_index,
1833
+ part=ResponseOutputText(
1834
+ type="output_text",
1835
+ text="",
1836
+ annotations=[],
1837
+ logprobs=[],
1838
+ ),
1839
+ )
1840
+ )
1841
+ events.append(
1842
+ ResponseTextDeltaEvent(
1843
+ type="response.output_text.delta",
1844
+ sequence_number=-1,
1845
+ content_index=state.current_content_index,
1846
+ output_index=state.current_output_index,
1847
+ item_id=state.current_item_id,
1848
+ delta=ctx.last_content_delta,
1849
+ # TODO, use logprobs from ctx.last_request_output
1850
+ logprobs=[],
1851
+ )
1852
+ )
1853
+ return events
1854
+
1855
+ def _emit_analysis_channel_delta_events(
1856
+ self,
1857
+ ctx: StreamingHarmonyContext,
1858
+ state: HarmonyStreamingState,
1859
+ ) -> list[StreamingResponsesResponse]:
1860
+ """Emit events for analysis channel reasoning delta streaming."""
1861
+ events = []
1862
+ if not state.sent_output_item_added:
1863
+ state.sent_output_item_added = True
1864
+ state.current_item_id = f"msg_{random_uuid()}"
1865
+ events.append(
1866
+ ResponseOutputItemAddedEvent(
1867
+ type="response.output_item.added",
1868
+ sequence_number=-1,
1869
+ output_index=state.current_output_index,
1870
+ item=ResponseReasoningItem(
1871
+ type="reasoning",
1872
+ id=state.current_item_id,
1873
+ summary=[],
1874
+ status="in_progress",
1875
+ ),
1876
+ )
1877
+ )
1878
+ state.current_content_index += 1
1879
+ events.append(
1880
+ ResponseReasoningPartAddedEvent(
1881
+ type="response.reasoning_part.added",
1882
+ sequence_number=-1,
1883
+ output_index=state.current_output_index,
1884
+ item_id=state.current_item_id,
1885
+ content_index=state.current_content_index,
1886
+ part=ResponseReasoningTextContent(
1887
+ text="",
1888
+ type="reasoning_text",
1889
+ ),
1890
+ )
1891
+ )
1892
+ events.append(
1893
+ ResponseReasoningTextDeltaEvent(
1894
+ type="response.reasoning_text.delta",
1895
+ item_id=state.current_item_id,
1896
+ output_index=state.current_output_index,
1897
+ content_index=state.current_content_index,
1898
+ delta=ctx.last_content_delta,
1899
+ sequence_number=-1,
1900
+ )
1901
+ )
1902
+ return events
1903
+
1904
+ def _emit_mcp_tool_delta_events(
1905
+ self,
1906
+ ctx: StreamingHarmonyContext,
1907
+ state: HarmonyStreamingState,
1908
+ recipient: str,
1909
+ ) -> list[StreamingResponsesResponse]:
1910
+ """Emit events for MCP tool delta streaming."""
1911
+ server_label = self._TOOL_NAME_TO_MCP_SERVER_LABEL.get(recipient, recipient)
1912
+ events = []
1913
+ if not state.sent_output_item_added:
1914
+ state.sent_output_item_added = True
1915
+ state.current_item_id = f"mcp_{random_uuid()}"
1916
+ events.append(
1917
+ ResponseOutputItemAddedEvent(
1918
+ type="response.output_item.added",
1919
+ sequence_number=-1,
1920
+ output_index=state.current_output_index,
1921
+ item=McpCall(
1922
+ type="mcp_call",
1923
+ id=state.current_item_id,
1924
+ name=recipient,
1925
+ arguments="",
1926
+ server_label=server_label,
1927
+ status="in_progress",
1928
+ ),
1929
+ )
1930
+ )
1931
+ events.append(
1932
+ ResponseMcpCallInProgressEvent(
1933
+ type="response.mcp_call.in_progress",
1934
+ sequence_number=-1,
1935
+ output_index=state.current_output_index,
1936
+ item_id=state.current_item_id,
1937
+ )
1938
+ )
1939
+ events.append(
1940
+ ResponseMcpCallArgumentsDeltaEvent(
1941
+ type="response.mcp_call_arguments.delta",
1942
+ sequence_number=-1,
1943
+ output_index=state.current_output_index,
1944
+ item_id=state.current_item_id,
1945
+ delta=ctx.last_content_delta,
1946
+ )
1947
+ )
1948
+ return events
1949
+
1950
+ def _emit_code_interpreter_delta_events(
1951
+ self,
1952
+ ctx: StreamingHarmonyContext,
1953
+ state: HarmonyStreamingState,
1954
+ ) -> list[StreamingResponsesResponse]:
1955
+ """Emit events for code interpreter delta streaming."""
1956
+ events = []
1957
+ if not state.sent_output_item_added:
1958
+ state.sent_output_item_added = True
1959
+ state.current_item_id = f"tool_{random_uuid()}"
1960
+ events.append(
1961
+ ResponseOutputItemAddedEvent(
1962
+ type="response.output_item.added",
1963
+ sequence_number=-1,
1964
+ output_index=state.current_output_index,
1965
+ item=ResponseCodeInterpreterToolCallParam(
1966
+ type="code_interpreter_call",
1967
+ id=state.current_item_id,
1968
+ code=None,
1969
+ container_id="auto",
1970
+ outputs=None,
1971
+ status="in_progress",
1972
+ ),
1973
+ )
1974
+ )
1975
+ events.append(
1976
+ ResponseCodeInterpreterCallInProgressEvent(
1977
+ type="response.code_interpreter_call.in_progress",
1978
+ sequence_number=-1,
1979
+ output_index=state.current_output_index,
1980
+ item_id=state.current_item_id,
1981
+ )
1982
+ )
1983
+ events.append(
1984
+ ResponseCodeInterpreterCallCodeDeltaEvent(
1985
+ type="response.code_interpreter_call_code.delta",
1986
+ sequence_number=-1,
1987
+ output_index=state.current_output_index,
1988
+ item_id=state.current_item_id,
1989
+ delta=ctx.last_content_delta,
1990
+ )
1991
+ )
1992
+ return events
1993
+
1994
+ def _emit_mcp_prefix_delta_events(
1995
+ self,
1996
+ ctx: StreamingHarmonyContext,
1997
+ state: HarmonyStreamingState,
1998
+ ) -> list[StreamingResponsesResponse]:
1999
+ """Emit events for MCP prefix (mcp.*) delta streaming."""
2000
+ events = []
2001
+ if not state.sent_output_item_added:
2002
+ state.sent_output_item_added = True
2003
+ state.current_item_id = f"mcp_{random_uuid()}"
2004
+ mcp_name = ctx.parser.current_recipient[len("mcp.") :]
2005
+
2006
+ events.append(
2007
+ ResponseOutputItemAddedEvent(
2008
+ type="response.output_item.added",
2009
+ sequence_number=-1,
2010
+ output_index=state.current_output_index,
2011
+ item=McpCall(
2012
+ type="mcp_call",
2013
+ id=state.current_item_id,
2014
+ name=mcp_name,
2015
+ arguments="",
2016
+ server_label=mcp_name,
2017
+ status="in_progress",
2018
+ ),
2019
+ )
2020
+ )
2021
+ events.append(
2022
+ ResponseMcpCallInProgressEvent(
2023
+ type="response.mcp_call.in_progress",
2024
+ sequence_number=-1,
2025
+ output_index=state.current_output_index,
2026
+ item_id=state.current_item_id,
2027
+ )
2028
+ )
2029
+
2030
+ events.append(
2031
+ ResponseMcpCallArgumentsDeltaEvent(
2032
+ type="response.mcp_call_arguments.delta",
2033
+ sequence_number=-1,
2034
+ output_index=state.current_output_index,
2035
+ item_id=state.current_item_id,
2036
+ delta=ctx.last_content_delta,
2037
+ )
2038
+ )
2039
+ return events
2040
+
2041
+ def _emit_content_delta_events(
2042
+ self,
2043
+ ctx: StreamingHarmonyContext,
2044
+ state: HarmonyStreamingState,
2045
+ ) -> list[StreamingResponsesResponse]:
2046
+ """Emit events for content delta streaming based on channel type."""
2047
+ if not ctx.last_content_delta:
2048
+ return []
2049
+
2050
+ if (
2051
+ ctx.parser.current_channel == "final"
2052
+ and ctx.parser.current_recipient is None
2053
+ ):
2054
+ return self._emit_final_channel_delta_events(ctx, state)
2055
+ elif (
2056
+ ctx.parser.current_channel == "analysis"
2057
+ and ctx.parser.current_recipient is None
2058
+ ):
2059
+ return self._emit_analysis_channel_delta_events(ctx, state)
2060
+ # built-in tools will be triggered on the analysis channel
2061
+ # However, occasionally built-in tools will
2062
+ # still be output to commentary.
2063
+ elif (
2064
+ ctx.parser.current_channel == "commentary"
2065
+ or ctx.parser.current_channel == "analysis"
2066
+ ) and ctx.parser.current_recipient is not None:
2067
+ recipient = ctx.parser.current_recipient
2068
+ # Check for function calls first - they have their own event handling
2069
+ if recipient.startswith("functions."):
2070
+ return self._emit_function_call_delta_events(ctx, state)
2071
+ is_mcp_tool = self._is_mcp_tool_by_namespace(recipient)
2072
+ if is_mcp_tool:
2073
+ return self._emit_mcp_tool_delta_events(ctx, state, recipient)
2074
+ else:
2075
+ return self._emit_code_interpreter_delta_events(ctx, state)
2076
+ elif (
2077
+ (
2078
+ ctx.parser.current_channel == "commentary"
2079
+ or ctx.parser.current_channel == "analysis"
2080
+ )
2081
+ and ctx.parser.current_recipient is not None
2082
+ and ctx.parser.current_recipient.startswith("mcp.")
2083
+ ):
2084
+ return self._emit_mcp_prefix_delta_events(ctx, state)
2085
+
2086
+ return []
2087
+
2088
+ def _emit_browser_tool_events(
2089
+ self,
2090
+ previous_item,
2091
+ state: HarmonyStreamingState,
2092
+ ) -> list[StreamingResponsesResponse]:
2093
+ """Emit events for browser tool calls (web search)."""
2094
+ function_name = previous_item.recipient[len("browser.") :]
2095
+ parsed_args = json.loads(previous_item.content[0].text)
2096
+ action = None
2097
+
2098
+ if function_name == "search":
2099
+ action = response_function_web_search.ActionSearch(
2100
+ type="search",
2101
+ query=parsed_args["query"],
2102
+ )
2103
+ elif function_name == "open":
2104
+ action = response_function_web_search.ActionOpenPage(
2105
+ type="open_page",
2106
+ # TODO: translate to url
2107
+ url=f"cursor:{parsed_args.get('cursor', '')}",
2108
+ )
2109
+ elif function_name == "find":
2110
+ action = response_function_web_search.ActionFind(
2111
+ type="find",
2112
+ pattern=parsed_args["pattern"],
2113
+ # TODO: translate to url
2114
+ url=f"cursor:{parsed_args.get('cursor', '')}",
2115
+ )
2116
+ else:
2117
+ raise ValueError(f"Unknown function name: {function_name}")
2118
+
2119
+ state.current_item_id = f"tool_{random_uuid()}"
2120
+ events = []
2121
+ events.append(
2122
+ ResponseOutputItemAddedEvent(
2123
+ type="response.output_item.added",
2124
+ sequence_number=-1,
2125
+ output_index=state.current_output_index,
2126
+ item=response_function_web_search.ResponseFunctionWebSearch(
2127
+ # TODO: generate a unique id for web search call
2128
+ type="web_search_call",
2129
+ id=state.current_item_id,
2130
+ action=action,
2131
+ status="in_progress",
2132
+ ),
2133
+ )
2134
+ )
2135
+ events.append(
2136
+ ResponseWebSearchCallInProgressEvent(
2137
+ type="response.web_search_call.in_progress",
2138
+ sequence_number=-1,
2139
+ output_index=state.current_output_index,
2140
+ item_id=state.current_item_id,
2141
+ )
2142
+ )
2143
+ events.append(
2144
+ ResponseWebSearchCallSearchingEvent(
2145
+ type="response.web_search_call.searching",
2146
+ sequence_number=-1,
2147
+ output_index=state.current_output_index,
2148
+ item_id=state.current_item_id,
2149
+ )
2150
+ )
2151
+ # enqueue
2152
+ events.append(
2153
+ ResponseWebSearchCallCompletedEvent(
2154
+ type="response.web_search_call.completed",
2155
+ sequence_number=-1,
2156
+ output_index=state.current_output_index,
2157
+ item_id=state.current_item_id,
2158
+ )
2159
+ )
2160
+ events.append(
2161
+ ResponseOutputItemDoneEvent(
2162
+ type="response.output_item.done",
2163
+ sequence_number=-1,
2164
+ output_index=state.current_output_index,
2165
+ item=ResponseFunctionWebSearch(
2166
+ type="web_search_call",
2167
+ id=state.current_item_id,
2168
+ action=action,
2169
+ status="completed",
2170
+ ),
2171
+ )
2172
+ )
2173
+ return events
2174
+
2175
+ def _emit_mcp_tool_completion_events(
2176
+ self,
2177
+ previous_item,
2178
+ state: HarmonyStreamingState,
2179
+ ) -> list[StreamingResponsesResponse]:
2180
+ """Emit events when an MCP tool completes during assistant action turn."""
2181
+ recipient = previous_item.recipient
2182
+ server_label = self._TOOL_NAME_TO_MCP_SERVER_LABEL.get(recipient, recipient)
2183
+ events = []
2184
+ events.append(
2185
+ ResponseMcpCallArgumentsDoneEvent(
2186
+ type="response.mcp_call_arguments.done",
2187
+ sequence_number=-1,
2188
+ output_index=state.current_output_index,
2189
+ item_id=state.current_item_id,
2190
+ arguments=previous_item.content[0].text,
2191
+ name=recipient,
2192
+ )
2193
+ )
2194
+ events.append(
2195
+ ResponseMcpCallCompletedEvent(
2196
+ type="response.mcp_call.completed",
2197
+ sequence_number=-1,
2198
+ output_index=state.current_output_index,
2199
+ item_id=state.current_item_id,
2200
+ )
2201
+ )
2202
+ events.append(
2203
+ ResponseOutputItemDoneEvent(
2204
+ type="response.output_item.done",
2205
+ sequence_number=-1,
2206
+ output_index=state.current_output_index,
2207
+ item=McpCall(
2208
+ type="mcp_call",
2209
+ id=state.current_item_id,
2210
+ name=recipient,
2211
+ arguments=previous_item.content[0].text,
2212
+ server_label=server_label,
2213
+ status="completed",
2214
+ ),
2215
+ )
2216
+ )
2217
+ return events
2218
+
2219
+ def _emit_code_interpreter_completion_events(
2220
+ self,
2221
+ previous_item,
2222
+ state: HarmonyStreamingState,
2223
+ ) -> list[StreamingResponsesResponse]:
2224
+ """Emit events when code interpreter completes."""
2225
+ events = []
2226
+ events.append(
2227
+ ResponseCodeInterpreterCallCodeDoneEvent(
2228
+ type="response.code_interpreter_call_code.done",
2229
+ sequence_number=-1,
2230
+ output_index=state.current_output_index,
2231
+ item_id=state.current_item_id,
2232
+ code=previous_item.content[0].text,
2233
+ )
2234
+ )
2235
+ events.append(
2236
+ ResponseCodeInterpreterCallInterpretingEvent(
2237
+ type="response.code_interpreter_call.interpreting",
2238
+ sequence_number=-1,
2239
+ output_index=state.current_output_index,
2240
+ item_id=state.current_item_id,
2241
+ )
2242
+ )
2243
+ events.append(
2244
+ ResponseCodeInterpreterCallCompletedEvent(
2245
+ type="response.code_interpreter_call.completed",
2246
+ sequence_number=-1,
2247
+ output_index=state.current_output_index,
2248
+ item_id=state.current_item_id,
2249
+ )
2250
+ )
2251
+ events.append(
2252
+ ResponseOutputItemDoneEvent(
2253
+ type="response.output_item.done",
2254
+ sequence_number=-1,
2255
+ output_index=state.current_output_index,
2256
+ item=ResponseCodeInterpreterToolCallParam(
2257
+ type="code_interpreter_call",
2258
+ id=state.current_item_id,
2259
+ code=previous_item.content[0].text,
2260
+ container_id="auto",
2261
+ outputs=[],
2262
+ status="completed",
2263
+ ),
2264
+ )
2265
+ )
2266
+ return events
2267
+
2268
+ def _emit_mcp_prefix_completion_events(
2269
+ self,
2270
+ previous_item,
2271
+ state: HarmonyStreamingState,
2272
+ ) -> list[StreamingResponsesResponse]:
2273
+ """Emit events when an MCP prefix tool (mcp.*) completes."""
2274
+ mcp_name = previous_item.recipient[len("mcp.") :]
2275
+ events = []
2276
+ events.append(
2277
+ ResponseMcpCallArgumentsDoneEvent(
2278
+ type="response.mcp_call_arguments.done",
2279
+ sequence_number=-1,
2280
+ output_index=state.current_output_index,
2281
+ item_id=state.current_item_id,
2282
+ arguments=previous_item.content[0].text,
2283
+ name=mcp_name,
2284
+ )
2285
+ )
2286
+ events.append(
2287
+ ResponseMcpCallCompletedEvent(
2288
+ type="response.mcp_call.completed",
2289
+ sequence_number=-1,
2290
+ output_index=state.current_output_index,
2291
+ item_id=state.current_item_id,
2292
+ )
2293
+ )
2294
+ events.append(
2295
+ ResponseOutputItemDoneEvent(
2296
+ type="response.output_item.done",
2297
+ sequence_number=-1,
2298
+ output_index=state.current_output_index,
2299
+ item=McpCall(
2300
+ type="mcp_call",
2301
+ id=state.current_item_id,
2302
+ name=mcp_name,
2303
+ arguments=previous_item.content[0].text,
2304
+ server_label=mcp_name,
2305
+ status="completed",
2306
+ ),
2307
+ )
2308
+ )
2309
+ return events
2310
+
2311
+ def _emit_tool_action_events(
2312
+ self,
2313
+ ctx: StreamingHarmonyContext,
2314
+ state: HarmonyStreamingState,
2315
+ ) -> list[StreamingResponsesResponse]:
2316
+ """Emit events for tool action turn."""
2317
+ if not ctx.is_assistant_action_turn() or len(ctx.parser.messages) == 0:
2318
+ return []
2319
+
2320
+ events = []
2321
+ previous_item = ctx.parser.messages[-1]
2322
+
2323
+ # Handle browser tool
2324
+ if (
2325
+ self.tool_server is not None
2326
+ and self.tool_server.has_tool("browser")
2327
+ and previous_item.recipient is not None
2328
+ and previous_item.recipient.startswith("browser.")
2329
+ ):
2330
+ events.extend(self._emit_browser_tool_events(previous_item, state))
2331
+
2332
+ # Handle tool completion
2333
+ if (
2334
+ self.tool_server is not None
2335
+ and previous_item.recipient is not None
2336
+ and state.current_item_id is not None
2337
+ and state.sent_output_item_added
2338
+ ):
2339
+ recipient = previous_item.recipient
2340
+ # Handle MCP prefix tool completion first
2341
+ if recipient.startswith("mcp."):
2342
+ events.extend(
2343
+ self._emit_mcp_prefix_completion_events(previous_item, state)
2344
+ )
2345
+ else:
2346
+ # Handle other MCP tool and code interpreter completion
2347
+ is_mcp_tool = self._is_mcp_tool_by_namespace(
2348
+ recipient
2349
+ ) and state.current_item_id.startswith("mcp_")
2350
+ if is_mcp_tool:
2351
+ events.extend(
2352
+ self._emit_mcp_tool_completion_events(previous_item, state)
2353
+ )
2354
+ else:
2355
+ events.extend(
2356
+ self._emit_code_interpreter_completion_events(
2357
+ previous_item, state
2358
+ )
2359
+ )
2360
+
2361
+ return events
2362
+
2363
+ def _emit_function_call_delta_events(
2364
+ self,
2365
+ ctx: StreamingHarmonyContext,
2366
+ state: HarmonyStreamingState,
2367
+ ) -> list[StreamingResponsesResponse]:
2368
+ """Emit events for developer function calls on commentary channel."""
2369
+ if not (
2370
+ ctx.parser.current_channel == "commentary"
2371
+ and ctx.parser.current_recipient
2372
+ and ctx.parser.current_recipient.startswith("functions.")
2373
+ ):
2374
+ return []
2375
+
2376
+ events = []
2377
+ if state.is_first_function_call_delta is False:
2378
+ state.is_first_function_call_delta = True
2379
+ fc_name = ctx.parser.current_recipient[len("functions.") :]
2380
+ state.current_item_id = f"fc_{random_uuid()}"
2381
+ tool_call_item = ResponseFunctionToolCall(
2382
+ name=fc_name,
2383
+ type="function_call",
2384
+ id=state.current_item_id,
2385
+ call_id=f"call_{random_uuid()}",
2386
+ arguments="",
2387
+ status="in_progress",
2388
+ )
2389
+ events.append(
2390
+ ResponseOutputItemAddedEvent(
2391
+ type="response.output_item.added",
2392
+ sequence_number=-1,
2393
+ output_index=state.current_output_index,
2394
+ item=tool_call_item,
2395
+ )
2396
+ )
2397
+ # Always emit the delta (including on first call)
2398
+ events.append(
2399
+ ResponseFunctionCallArgumentsDeltaEvent(
2400
+ item_id=state.current_item_id,
2401
+ delta=ctx.last_content_delta,
2402
+ output_index=state.current_output_index,
2403
+ sequence_number=-1,
2404
+ type="response.function_call_arguments.delta",
2405
+ )
2406
+ )
2407
+ return events
2408
+
2409
+ async def _process_harmony_streaming_events(
2410
+ self,
2411
+ request: ResponsesRequest,
2412
+ sampling_params: SamplingParams,
2413
+ result_generator: AsyncIterator[ConversationContext | None],
2414
+ context: ConversationContext,
2415
+ model_name: str,
2416
+ tokenizer: TokenizerLike,
2417
+ request_metadata: RequestResponseMetadata,
2418
+ created_time: int,
2419
+ _increment_sequence_number_and_return: Callable[
2420
+ [StreamingResponsesResponse], StreamingResponsesResponse
2421
+ ],
2422
+ ) -> AsyncGenerator[StreamingResponsesResponse, None]:
2423
+ state = HarmonyStreamingState()
2424
+
2425
+ async for ctx in result_generator:
2426
+ assert isinstance(ctx, StreamingHarmonyContext)
2427
+
2428
+ # finish_reason='error' indicates a retryable error
2429
+ self._raise_if_error(ctx.finish_reason, request.request_id)
2430
+
2431
+ if ctx.is_expecting_start():
2432
+ if len(ctx.parser.messages) > 0:
2433
+ previous_item = ctx.parser.messages[-1]
2434
+ for event in self._emit_previous_item_done_events(
2435
+ previous_item, state
2436
+ ):
2437
+ yield _increment_sequence_number_and_return(event)
2438
+ state.reset_for_new_item()
2439
+
2440
+ # Stream the output of a harmony message
2441
+ for event in self._emit_content_delta_events(ctx, state):
2442
+ yield _increment_sequence_number_and_return(event)
2443
+
2444
+ # Stream tool call outputs
2445
+ for event in self._emit_tool_action_events(ctx, state):
2446
+ yield _increment_sequence_number_and_return(event)
2447
+
2448
+ async def responses_stream_generator(
2449
+ self,
2450
+ request: ResponsesRequest,
2451
+ sampling_params: SamplingParams,
2452
+ result_generator: AsyncIterator[ConversationContext | None],
2453
+ context: ConversationContext,
2454
+ model_name: str,
2455
+ tokenizer: TokenizerLike,
2456
+ request_metadata: RequestResponseMetadata,
2457
+ created_time: int | None = None,
2458
+ ) -> AsyncGenerator[StreamingResponsesResponse, None]:
2459
+ # TODO:
2460
+ # 1. Handle disconnect
2461
+
2462
+ created_time = created_time or int(time.time())
2463
+
2464
+ sequence_number = 0
2465
+
2466
+ def _increment_sequence_number_and_return(
2467
+ event: StreamingResponsesResponse,
2468
+ ) -> StreamingResponsesResponse:
2469
+ nonlocal sequence_number
2470
+ # Set sequence_number if the event has this attribute
2471
+ if hasattr(event, "sequence_number"):
2472
+ event.sequence_number = sequence_number
2473
+ sequence_number += 1
2474
+ return event
2475
+
2476
+ async with AsyncExitStack() as exit_stack:
2477
+ if self.use_harmony:
2478
+ # TODO: in streaming, we noticed this bug:
2479
+ # https://github.com/vllm-project/vllm/issues/25697
2480
+ await self._initialize_tool_sessions(request, context, exit_stack)
2481
+ processer = self._process_harmony_streaming_events
2482
+ else:
2483
+ processer = self._process_simple_streaming_events
2484
+ # TODO Hanchen make sampling params to include the structural tag
2485
+
2486
+ initial_response = ResponsesResponse.from_request(
2487
+ request,
2488
+ sampling_params,
2489
+ model_name=model_name,
2490
+ created_time=created_time,
2491
+ output=[],
2492
+ status="in_progress",
2493
+ usage=None,
2494
+ ).model_dump()
2495
+ yield _increment_sequence_number_and_return(
2496
+ ResponseCreatedEvent(
2497
+ type="response.created",
2498
+ sequence_number=-1,
2499
+ response=initial_response,
2500
+ )
2501
+ )
2502
+ yield _increment_sequence_number_and_return(
2503
+ ResponseInProgressEvent(
2504
+ type="response.in_progress",
2505
+ sequence_number=-1,
2506
+ response=initial_response,
2507
+ )
2508
+ )
2509
+
2510
+ try:
2511
+ async for event_data in processer(
2512
+ request,
2513
+ sampling_params,
2514
+ result_generator,
2515
+ context,
2516
+ model_name,
2517
+ tokenizer,
2518
+ request_metadata,
2519
+ created_time,
2520
+ _increment_sequence_number_and_return,
2521
+ ):
2522
+ yield event_data
2523
+ except GenerationError as e:
2524
+ error_json = self._convert_generation_error_to_streaming_response(e)
2525
+ yield _increment_sequence_number_and_return(
2526
+ TypeAdapter(StreamingResponsesResponse).validate_json(error_json)
2527
+ )
2528
+ return
2529
+
2530
+ async def empty_async_generator():
2531
+ # A hack to trick Python to think this is a generator but
2532
+ # in fact it immediately returns.
2533
+ if False:
2534
+ yield
2535
+
2536
+ final_response = await self.responses_full_generator(
2537
+ request,
2538
+ sampling_params,
2539
+ empty_async_generator(),
2540
+ context,
2541
+ model_name,
2542
+ tokenizer,
2543
+ request_metadata,
2544
+ created_time=created_time,
2545
+ )
2546
+ yield _increment_sequence_number_and_return(
2547
+ ResponseCompletedEvent(
2548
+ type="response.completed",
2549
+ sequence_number=-1,
2550
+ response=final_response,
2551
+ )
2552
+ )