vllm-cpu-amxbf16 0.11.2.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1536) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +225 -0
  3. vllm/_aiter_ops.py +983 -0
  4. vllm/_bc_linter.py +54 -0
  5. vllm/_custom_ops.py +2863 -0
  6. vllm/_ipex_ops.py +457 -0
  7. vllm/_version.py +34 -0
  8. vllm/assets/__init__.py +0 -0
  9. vllm/assets/audio.py +43 -0
  10. vllm/assets/base.py +40 -0
  11. vllm/assets/image.py +59 -0
  12. vllm/assets/video.py +149 -0
  13. vllm/attention/__init__.py +18 -0
  14. vllm/attention/backends/__init__.py +0 -0
  15. vllm/attention/backends/abstract.py +391 -0
  16. vllm/attention/backends/registry.py +195 -0
  17. vllm/attention/backends/utils.py +33 -0
  18. vllm/attention/layer.py +1052 -0
  19. vllm/attention/layers/__init__.py +0 -0
  20. vllm/attention/layers/chunked_local_attention.py +121 -0
  21. vllm/attention/layers/cross_attention.py +178 -0
  22. vllm/attention/layers/encoder_only_attention.py +103 -0
  23. vllm/attention/ops/__init__.py +0 -0
  24. vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
  25. vllm/attention/ops/common.py +414 -0
  26. vllm/attention/ops/flashmla.py +251 -0
  27. vllm/attention/ops/merge_attn_states.py +47 -0
  28. vllm/attention/ops/paged_attn.py +262 -0
  29. vllm/attention/ops/pallas_kv_cache_update.py +130 -0
  30. vllm/attention/ops/prefix_prefill.py +814 -0
  31. vllm/attention/ops/rocm_aiter_paged_attn.py +123 -0
  32. vllm/attention/ops/triton_decode_attention.py +712 -0
  33. vllm/attention/ops/triton_merge_attn_states.py +105 -0
  34. vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
  35. vllm/attention/ops/triton_unified_attention.py +941 -0
  36. vllm/attention/ops/vit_attn_wrappers.py +178 -0
  37. vllm/attention/selector.py +231 -0
  38. vllm/attention/utils/__init__.py +0 -0
  39. vllm/attention/utils/fa_utils.py +109 -0
  40. vllm/attention/utils/kv_sharing_utils.py +33 -0
  41. vllm/attention/utils/kv_transfer_utils.py +60 -0
  42. vllm/beam_search.py +88 -0
  43. vllm/benchmarks/__init__.py +0 -0
  44. vllm/benchmarks/datasets.py +3222 -0
  45. vllm/benchmarks/latency.py +172 -0
  46. vllm/benchmarks/lib/__init__.py +3 -0
  47. vllm/benchmarks/lib/endpoint_request_func.py +777 -0
  48. vllm/benchmarks/lib/ready_checker.py +72 -0
  49. vllm/benchmarks/lib/utils.py +79 -0
  50. vllm/benchmarks/serve.py +1531 -0
  51. vllm/benchmarks/sweep/__init__.py +0 -0
  52. vllm/benchmarks/sweep/cli.py +38 -0
  53. vllm/benchmarks/sweep/param_sweep.py +91 -0
  54. vllm/benchmarks/sweep/plot.py +580 -0
  55. vllm/benchmarks/sweep/serve.py +416 -0
  56. vllm/benchmarks/sweep/serve_sla.py +492 -0
  57. vllm/benchmarks/sweep/server.py +114 -0
  58. vllm/benchmarks/sweep/sla_sweep.py +132 -0
  59. vllm/benchmarks/sweep/utils.py +4 -0
  60. vllm/benchmarks/throughput.py +799 -0
  61. vllm/collect_env.py +857 -0
  62. vllm/compilation/__init__.py +0 -0
  63. vllm/compilation/activation_quant_fusion.py +209 -0
  64. vllm/compilation/backends.py +759 -0
  65. vllm/compilation/base_static_graph.py +57 -0
  66. vllm/compilation/caching.py +178 -0
  67. vllm/compilation/collective_fusion.py +1234 -0
  68. vllm/compilation/compiler_interface.py +639 -0
  69. vllm/compilation/counter.py +48 -0
  70. vllm/compilation/cuda_graph.py +208 -0
  71. vllm/compilation/decorators.py +571 -0
  72. vllm/compilation/fix_functionalization.py +253 -0
  73. vllm/compilation/fusion.py +374 -0
  74. vllm/compilation/fusion_attn.py +359 -0
  75. vllm/compilation/fx_utils.py +91 -0
  76. vllm/compilation/inductor_pass.py +133 -0
  77. vllm/compilation/matcher_utils.py +317 -0
  78. vllm/compilation/monitor.py +62 -0
  79. vllm/compilation/noop_elimination.py +134 -0
  80. vllm/compilation/partition_rules.py +72 -0
  81. vllm/compilation/pass_manager.py +135 -0
  82. vllm/compilation/piecewise_backend.py +121 -0
  83. vllm/compilation/post_cleanup.py +21 -0
  84. vllm/compilation/qk_norm_rope_fusion.py +238 -0
  85. vllm/compilation/sequence_parallelism.py +363 -0
  86. vllm/compilation/torch25_custom_graph_pass.py +44 -0
  87. vllm/compilation/vllm_inductor_pass.py +173 -0
  88. vllm/compilation/wrapper.py +238 -0
  89. vllm/config/__init__.py +102 -0
  90. vllm/config/cache.py +207 -0
  91. vllm/config/compilation.py +975 -0
  92. vllm/config/device.py +75 -0
  93. vllm/config/ec_transfer.py +110 -0
  94. vllm/config/kv_events.py +56 -0
  95. vllm/config/kv_transfer.py +114 -0
  96. vllm/config/load.py +124 -0
  97. vllm/config/lora.py +112 -0
  98. vllm/config/model.py +2162 -0
  99. vllm/config/multimodal.py +248 -0
  100. vllm/config/observability.py +123 -0
  101. vllm/config/parallel.py +655 -0
  102. vllm/config/pooler.py +122 -0
  103. vllm/config/scheduler.py +298 -0
  104. vllm/config/speculative.py +654 -0
  105. vllm/config/speech_to_text.py +38 -0
  106. vllm/config/structured_outputs.py +92 -0
  107. vllm/config/utils.py +178 -0
  108. vllm/config/vllm.py +1166 -0
  109. vllm/connections.py +189 -0
  110. vllm/device_allocator/__init__.py +0 -0
  111. vllm/device_allocator/cumem.py +327 -0
  112. vllm/distributed/__init__.py +6 -0
  113. vllm/distributed/communication_op.py +43 -0
  114. vllm/distributed/device_communicators/__init__.py +0 -0
  115. vllm/distributed/device_communicators/all2all.py +490 -0
  116. vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
  117. vllm/distributed/device_communicators/base_device_communicator.py +297 -0
  118. vllm/distributed/device_communicators/cpu_communicator.py +209 -0
  119. vllm/distributed/device_communicators/cuda_communicator.py +340 -0
  120. vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
  121. vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
  122. vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
  123. vllm/distributed/device_communicators/pynccl.py +386 -0
  124. vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
  125. vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
  126. vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
  127. vllm/distributed/device_communicators/ray_communicator.py +259 -0
  128. vllm/distributed/device_communicators/shm_broadcast.py +733 -0
  129. vllm/distributed/device_communicators/shm_object_storage.py +660 -0
  130. vllm/distributed/device_communicators/symm_mem.py +156 -0
  131. vllm/distributed/device_communicators/tpu_communicator.py +107 -0
  132. vllm/distributed/device_communicators/xpu_communicator.py +95 -0
  133. vllm/distributed/ec_transfer/__init__.py +14 -0
  134. vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
  135. vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
  136. vllm/distributed/ec_transfer/ec_connector/factory.py +88 -0
  137. vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
  138. vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
  139. vllm/distributed/eplb/__init__.py +8 -0
  140. vllm/distributed/eplb/eplb_state.py +837 -0
  141. vllm/distributed/eplb/rebalance_algo.py +260 -0
  142. vllm/distributed/eplb/rebalance_execute.py +431 -0
  143. vllm/distributed/kv_events.py +371 -0
  144. vllm/distributed/kv_transfer/README.md +29 -0
  145. vllm/distributed/kv_transfer/__init__.py +20 -0
  146. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  147. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  148. vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
  149. vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
  150. vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
  151. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
  152. vllm/distributed/kv_transfer/kv_connector/v1/base.py +546 -0
  153. vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
  154. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
  155. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
  156. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +379 -0
  157. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
  158. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
  159. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +867 -0
  160. vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
  161. vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
  162. vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2440 -0
  163. vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +504 -0
  164. vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
  165. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
  166. vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
  167. vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
  168. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
  169. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  170. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
  171. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
  172. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
  173. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  174. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  175. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
  176. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
  177. vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
  178. vllm/distributed/parallel_state.py +1759 -0
  179. vllm/distributed/tpu_distributed_utils.py +188 -0
  180. vllm/distributed/utils.py +543 -0
  181. vllm/engine/__init__.py +0 -0
  182. vllm/engine/arg_utils.py +2144 -0
  183. vllm/engine/async_llm_engine.py +6 -0
  184. vllm/engine/llm_engine.py +6 -0
  185. vllm/engine/protocol.py +170 -0
  186. vllm/entrypoints/__init__.py +0 -0
  187. vllm/entrypoints/anthropic/__init__.py +0 -0
  188. vllm/entrypoints/anthropic/protocol.py +162 -0
  189. vllm/entrypoints/anthropic/serving_messages.py +460 -0
  190. vllm/entrypoints/api_server.py +184 -0
  191. vllm/entrypoints/chat_utils.py +1690 -0
  192. vllm/entrypoints/cli/__init__.py +13 -0
  193. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  194. vllm/entrypoints/cli/benchmark/base.py +25 -0
  195. vllm/entrypoints/cli/benchmark/latency.py +21 -0
  196. vllm/entrypoints/cli/benchmark/main.py +56 -0
  197. vllm/entrypoints/cli/benchmark/serve.py +21 -0
  198. vllm/entrypoints/cli/benchmark/sweep.py +21 -0
  199. vllm/entrypoints/cli/benchmark/throughput.py +21 -0
  200. vllm/entrypoints/cli/collect_env.py +38 -0
  201. vllm/entrypoints/cli/main.py +79 -0
  202. vllm/entrypoints/cli/openai.py +256 -0
  203. vllm/entrypoints/cli/run_batch.py +68 -0
  204. vllm/entrypoints/cli/serve.py +249 -0
  205. vllm/entrypoints/cli/types.py +29 -0
  206. vllm/entrypoints/constants.py +10 -0
  207. vllm/entrypoints/context.py +572 -0
  208. vllm/entrypoints/dynamic_lora.py +57 -0
  209. vllm/entrypoints/harmony_utils.py +535 -0
  210. vllm/entrypoints/launcher.py +175 -0
  211. vllm/entrypoints/llm.py +1768 -0
  212. vllm/entrypoints/logger.py +84 -0
  213. vllm/entrypoints/openai/__init__.py +0 -0
  214. vllm/entrypoints/openai/api_server.py +2096 -0
  215. vllm/entrypoints/openai/cli_args.py +302 -0
  216. vllm/entrypoints/openai/orca_metrics.py +120 -0
  217. vllm/entrypoints/openai/protocol.py +3299 -0
  218. vllm/entrypoints/openai/run_batch.py +547 -0
  219. vllm/entrypoints/openai/serving_chat.py +1772 -0
  220. vllm/entrypoints/openai/serving_classification.py +235 -0
  221. vllm/entrypoints/openai/serving_completion.py +715 -0
  222. vllm/entrypoints/openai/serving_embedding.py +695 -0
  223. vllm/entrypoints/openai/serving_engine.py +1433 -0
  224. vllm/entrypoints/openai/serving_models.py +304 -0
  225. vllm/entrypoints/openai/serving_pooling.py +346 -0
  226. vllm/entrypoints/openai/serving_responses.py +2021 -0
  227. vllm/entrypoints/openai/serving_score.py +503 -0
  228. vllm/entrypoints/openai/serving_tokenization.py +203 -0
  229. vllm/entrypoints/openai/serving_tokens.py +269 -0
  230. vllm/entrypoints/openai/serving_transcription.py +148 -0
  231. vllm/entrypoints/openai/speech_to_text.py +405 -0
  232. vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
  233. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
  234. vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
  235. vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
  236. vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
  237. vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
  238. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
  239. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
  240. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
  241. vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
  242. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
  243. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +323 -0
  244. vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
  245. vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
  246. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +290 -0
  247. vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
  248. vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
  249. vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
  250. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
  251. vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
  252. vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
  253. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
  254. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
  255. vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
  256. vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
  257. vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
  258. vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
  259. vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
  260. vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
  261. vllm/entrypoints/renderer.py +409 -0
  262. vllm/entrypoints/responses_utils.py +77 -0
  263. vllm/entrypoints/sagemaker/__init__.py +4 -0
  264. vllm/entrypoints/sagemaker/routes.py +72 -0
  265. vllm/entrypoints/score_utils.py +242 -0
  266. vllm/entrypoints/ssl.py +78 -0
  267. vllm/entrypoints/tool.py +143 -0
  268. vllm/entrypoints/tool_server.py +209 -0
  269. vllm/entrypoints/utils.py +319 -0
  270. vllm/env_override.py +378 -0
  271. vllm/envs.py +1659 -0
  272. vllm/forward_context.py +356 -0
  273. vllm/inputs/__init__.py +44 -0
  274. vllm/inputs/data.py +359 -0
  275. vllm/inputs/parse.py +137 -0
  276. vllm/inputs/preprocess.py +727 -0
  277. vllm/logger.py +267 -0
  278. vllm/logging_utils/__init__.py +10 -0
  279. vllm/logging_utils/dump_input.py +83 -0
  280. vllm/logging_utils/formatter.py +77 -0
  281. vllm/logging_utils/log_time.py +34 -0
  282. vllm/logits_process.py +121 -0
  283. vllm/logprobs.py +208 -0
  284. vllm/lora/__init__.py +0 -0
  285. vllm/lora/layers/__init__.py +41 -0
  286. vllm/lora/layers/base.py +67 -0
  287. vllm/lora/layers/base_linear.py +164 -0
  288. vllm/lora/layers/column_parallel_linear.py +578 -0
  289. vllm/lora/layers/fused_moe.py +472 -0
  290. vllm/lora/layers/logits_processor.py +252 -0
  291. vllm/lora/layers/replicated_linear.py +70 -0
  292. vllm/lora/layers/row_parallel_linear.py +181 -0
  293. vllm/lora/layers/utils.py +65 -0
  294. vllm/lora/layers/vocal_parallel_embedding.py +166 -0
  295. vllm/lora/lora_weights.py +198 -0
  296. vllm/lora/models.py +890 -0
  297. vllm/lora/ops/__init__.py +0 -0
  298. vllm/lora/ops/ipex_ops/__init__.py +6 -0
  299. vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
  300. vllm/lora/ops/torch_ops/__init__.py +20 -0
  301. vllm/lora/ops/torch_ops/lora_ops.py +128 -0
  302. vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
  303. vllm/lora/ops/triton_ops/__init__.py +21 -0
  304. vllm/lora/ops/triton_ops/fused_moe_lora_op.py +641 -0
  305. vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
  306. vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
  307. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
  308. vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
  309. vllm/lora/ops/triton_ops/utils.py +295 -0
  310. vllm/lora/ops/xla_ops/__init__.py +6 -0
  311. vllm/lora/ops/xla_ops/lora_ops.py +141 -0
  312. vllm/lora/peft_helper.py +128 -0
  313. vllm/lora/punica_wrapper/__init__.py +10 -0
  314. vllm/lora/punica_wrapper/punica_base.py +492 -0
  315. vllm/lora/punica_wrapper/punica_cpu.py +351 -0
  316. vllm/lora/punica_wrapper/punica_gpu.py +411 -0
  317. vllm/lora/punica_wrapper/punica_selector.py +21 -0
  318. vllm/lora/punica_wrapper/punica_tpu.py +359 -0
  319. vllm/lora/punica_wrapper/punica_xpu.py +279 -0
  320. vllm/lora/punica_wrapper/utils.py +150 -0
  321. vllm/lora/request.py +100 -0
  322. vllm/lora/resolver.py +88 -0
  323. vllm/lora/utils.py +293 -0
  324. vllm/lora/worker_manager.py +279 -0
  325. vllm/model_executor/__init__.py +11 -0
  326. vllm/model_executor/custom_op.py +194 -0
  327. vllm/model_executor/layers/__init__.py +0 -0
  328. vllm/model_executor/layers/activation.py +569 -0
  329. vllm/model_executor/layers/attention_layer_base.py +35 -0
  330. vllm/model_executor/layers/batch_invariant.py +854 -0
  331. vllm/model_executor/layers/conv.py +236 -0
  332. vllm/model_executor/layers/fla/__init__.py +8 -0
  333. vllm/model_executor/layers/fla/ops/__init__.py +17 -0
  334. vllm/model_executor/layers/fla/ops/chunk.py +240 -0
  335. vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
  336. vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
  337. vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
  338. vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
  339. vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
  340. vllm/model_executor/layers/fla/ops/index.py +41 -0
  341. vllm/model_executor/layers/fla/ops/kda.py +1351 -0
  342. vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
  343. vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
  344. vllm/model_executor/layers/fla/ops/op.py +60 -0
  345. vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
  346. vllm/model_executor/layers/fla/ops/utils.py +194 -0
  347. vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
  348. vllm/model_executor/layers/fused_moe/__init__.py +106 -0
  349. vllm/model_executor/layers/fused_moe/all2all_utils.py +160 -0
  350. vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
  351. vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
  352. vllm/model_executor/layers/fused_moe/config.py +916 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
  426. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  427. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  428. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  429. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  430. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  431. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
  432. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  433. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  434. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  435. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  436. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  437. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  438. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  439. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
  440. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  441. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  442. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
  443. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  444. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  445. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
  446. vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
  447. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  448. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
  449. vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
  450. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  451. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  452. vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  453. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  454. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  455. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  456. vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  457. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  458. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  459. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  460. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  461. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  462. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  463. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  464. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  465. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  466. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  467. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  468. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  469. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  470. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  471. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  472. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  473. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  474. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  475. vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  476. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  477. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  478. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  479. vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
  480. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  481. vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  482. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  483. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  484. vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  485. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  486. vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  487. vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
  488. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  489. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  490. vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  491. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  492. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
  493. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
  494. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  495. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
  496. vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
  497. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  498. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  499. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  500. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  501. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
  502. vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
  503. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
  504. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
  505. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  506. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
  507. vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
  508. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
  509. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
  510. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
  511. vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
  512. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  513. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  514. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  515. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  516. vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
  517. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
  518. vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  519. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
  520. vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  521. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  522. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  523. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  524. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  525. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  526. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  527. vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
  528. vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  529. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  530. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  531. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  532. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  533. vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
  534. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  535. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  536. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  537. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  538. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  539. vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
  540. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  541. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  542. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  543. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  544. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  545. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  546. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  547. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
  548. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
  549. vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
  550. vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
  551. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
  552. vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
  553. vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
  554. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
  555. vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  556. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
  557. vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  558. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  559. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  560. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  561. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  562. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  563. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  564. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  565. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  566. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  567. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  568. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  569. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  570. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  571. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  572. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  573. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  574. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  575. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  576. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  577. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  578. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  579. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  580. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  581. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  582. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  583. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  584. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  585. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
  586. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  587. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  588. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  589. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  590. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  591. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  592. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  593. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  594. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  595. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  596. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  597. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  598. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  599. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  600. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  601. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  602. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  603. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  604. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  605. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  606. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  607. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  608. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  609. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  610. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  611. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  612. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  613. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  614. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  615. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  616. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  617. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  618. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  619. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  620. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  621. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  622. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  623. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  624. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  625. vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +354 -0
  626. vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
  627. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
  628. vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
  629. vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
  630. vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +367 -0
  631. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
  632. vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
  633. vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
  634. vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
  635. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +792 -0
  636. vllm/model_executor/layers/fused_moe/fused_moe.py +2175 -0
  637. vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +112 -0
  638. vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +164 -0
  639. vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +316 -0
  640. vllm/model_executor/layers/fused_moe/layer.py +1944 -0
  641. vllm/model_executor/layers/fused_moe/modular_kernel.py +1222 -0
  642. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
  643. vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
  644. vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
  645. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
  646. vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
  647. vllm/model_executor/layers/fused_moe/prepare_finalize.py +77 -0
  648. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
  649. vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
  650. vllm/model_executor/layers/fused_moe/shared_fused_moe.py +97 -0
  651. vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
  652. vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
  653. vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
  654. vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +578 -0
  655. vllm/model_executor/layers/fused_moe/utils.py +332 -0
  656. vllm/model_executor/layers/kda.py +448 -0
  657. vllm/model_executor/layers/layernorm.py +442 -0
  658. vllm/model_executor/layers/lightning_attn.py +729 -0
  659. vllm/model_executor/layers/linear.py +1424 -0
  660. vllm/model_executor/layers/logits_processor.py +106 -0
  661. vllm/model_executor/layers/mamba/__init__.py +0 -0
  662. vllm/model_executor/layers/mamba/abstract.py +71 -0
  663. vllm/model_executor/layers/mamba/linear_attn.py +402 -0
  664. vllm/model_executor/layers/mamba/mamba_mixer.py +535 -0
  665. vllm/model_executor/layers/mamba/mamba_mixer2.py +928 -0
  666. vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
  667. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  668. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
  669. vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
  670. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
  671. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
  672. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
  673. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
  674. vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
  675. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
  676. vllm/model_executor/layers/mamba/short_conv.py +264 -0
  677. vllm/model_executor/layers/mla.py +168 -0
  678. vllm/model_executor/layers/pooler.py +817 -0
  679. vllm/model_executor/layers/quantization/__init__.py +174 -0
  680. vllm/model_executor/layers/quantization/auto_round.py +454 -0
  681. vllm/model_executor/layers/quantization/awq.py +277 -0
  682. vllm/model_executor/layers/quantization/awq_marlin.py +659 -0
  683. vllm/model_executor/layers/quantization/awq_triton.py +337 -0
  684. vllm/model_executor/layers/quantization/base_config.py +170 -0
  685. vllm/model_executor/layers/quantization/bitblas.py +502 -0
  686. vllm/model_executor/layers/quantization/bitsandbytes.py +658 -0
  687. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
  688. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +914 -0
  689. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2284 -0
  690. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
  691. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
  692. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
  693. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
  694. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
  695. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
  696. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
  697. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
  698. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
  699. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
  700. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
  701. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +219 -0
  702. vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
  703. vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
  704. vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
  705. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
  706. vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
  707. vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
  708. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
  709. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
  710. vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
  711. vllm/model_executor/layers/quantization/experts_int8.py +240 -0
  712. vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
  713. vllm/model_executor/layers/quantization/fp8.py +1333 -0
  714. vllm/model_executor/layers/quantization/fp_quant.py +420 -0
  715. vllm/model_executor/layers/quantization/gguf.py +643 -0
  716. vllm/model_executor/layers/quantization/gptq.py +393 -0
  717. vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
  718. vllm/model_executor/layers/quantization/gptq_marlin.py +789 -0
  719. vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
  720. vllm/model_executor/layers/quantization/hqq_marlin.py +371 -0
  721. vllm/model_executor/layers/quantization/inc.py +65 -0
  722. vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
  723. vllm/model_executor/layers/quantization/ipex_quant.py +467 -0
  724. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  725. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
  726. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
  727. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  728. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
  729. vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
  730. vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
  731. vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
  732. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
  733. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
  734. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +166 -0
  735. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
  736. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
  737. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
  738. vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
  739. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
  740. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
  741. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
  742. vllm/model_executor/layers/quantization/kv_cache.py +146 -0
  743. vllm/model_executor/layers/quantization/modelopt.py +1788 -0
  744. vllm/model_executor/layers/quantization/moe_wna16.py +541 -0
  745. vllm/model_executor/layers/quantization/mxfp4.py +1162 -0
  746. vllm/model_executor/layers/quantization/petit.py +320 -0
  747. vllm/model_executor/layers/quantization/ptpc_fp8.py +137 -0
  748. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  749. vllm/model_executor/layers/quantization/quark/quark.py +528 -0
  750. vllm/model_executor/layers/quantization/quark/quark_moe.py +683 -0
  751. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
  752. vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +306 -0
  753. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
  754. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
  755. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
  756. vllm/model_executor/layers/quantization/quark/utils.py +105 -0
  757. vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
  758. vllm/model_executor/layers/quantization/rtn.py +652 -0
  759. vllm/model_executor/layers/quantization/schema.py +90 -0
  760. vllm/model_executor/layers/quantization/torchao.py +380 -0
  761. vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
  762. vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
  763. vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
  764. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
  765. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  766. vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  767. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  768. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  769. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  770. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  771. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  772. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  773. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  774. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  775. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  776. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  777. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  778. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  779. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  780. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  781. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  782. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  783. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  784. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  785. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  786. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  787. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  788. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  789. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  790. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  791. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  792. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  793. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  794. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  795. vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  796. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  797. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  798. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  799. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  800. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  801. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  802. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  803. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  804. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  805. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  806. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  807. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  808. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  809. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  810. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  811. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  812. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  813. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  814. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  815. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  816. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  817. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  818. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  819. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  820. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  821. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  822. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  823. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  824. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  825. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  826. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  827. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  828. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  829. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  830. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  831. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  832. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  833. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  834. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  835. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  836. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  837. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  838. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  839. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  840. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  841. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  842. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  843. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  844. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  845. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  846. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  847. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  848. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  849. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  850. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  851. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  852. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  853. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  854. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  855. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  856. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  857. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  858. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  859. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  860. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  861. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  862. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  863. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  864. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  865. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  866. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  867. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  868. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  869. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  870. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  871. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  872. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  873. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  874. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  875. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  876. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  877. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  878. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  879. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  880. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  881. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  882. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  883. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  884. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  885. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  886. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  887. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  888. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  889. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  890. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  891. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  892. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  893. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  894. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  895. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  896. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  897. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  898. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  899. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  900. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  901. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  902. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  903. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  904. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  905. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  906. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  907. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  908. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  909. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  910. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  911. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  912. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  913. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  914. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  915. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  916. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  917. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  918. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  919. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  920. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  921. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  922. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  923. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  924. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  925. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  926. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  927. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  928. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  929. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  930. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  931. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  932. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  933. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  934. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  935. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  936. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  937. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  938. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  939. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  940. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  941. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  942. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  943. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  944. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  945. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  946. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  947. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  948. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  949. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  950. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  951. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  952. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  953. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  954. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  955. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  956. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  957. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  958. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  959. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  960. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  961. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  962. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  963. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  964. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  965. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  966. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  967. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  968. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  969. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  970. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  971. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  972. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  973. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  974. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  975. vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
  976. vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +89 -0
  977. vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +298 -0
  978. vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
  979. vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
  980. vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
  981. vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
  982. vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
  983. vllm/model_executor/layers/quantization/utils/marlin_utils.py +575 -0
  984. vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +397 -0
  985. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +351 -0
  986. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +161 -0
  987. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
  988. vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +181 -0
  989. vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
  990. vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
  991. vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
  992. vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +63 -0
  993. vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
  994. vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
  995. vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
  996. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
  997. vllm/model_executor/layers/resampler.py +283 -0
  998. vllm/model_executor/layers/rotary_embedding/__init__.py +278 -0
  999. vllm/model_executor/layers/rotary_embedding/base.py +235 -0
  1000. vllm/model_executor/layers/rotary_embedding/common.py +188 -0
  1001. vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
  1002. vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
  1003. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
  1004. vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
  1005. vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
  1006. vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
  1007. vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
  1008. vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
  1009. vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
  1010. vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
  1011. vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
  1012. vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +81 -0
  1013. vllm/model_executor/layers/utils.py +251 -0
  1014. vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
  1015. vllm/model_executor/model_loader/__init__.py +148 -0
  1016. vllm/model_executor/model_loader/base_loader.py +57 -0
  1017. vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
  1018. vllm/model_executor/model_loader/default_loader.py +327 -0
  1019. vllm/model_executor/model_loader/dummy_loader.py +28 -0
  1020. vllm/model_executor/model_loader/gguf_loader.py +176 -0
  1021. vllm/model_executor/model_loader/online_quantization.py +224 -0
  1022. vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
  1023. vllm/model_executor/model_loader/sharded_state_loader.py +206 -0
  1024. vllm/model_executor/model_loader/tensorizer.py +790 -0
  1025. vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
  1026. vllm/model_executor/model_loader/tpu.py +118 -0
  1027. vllm/model_executor/model_loader/utils.py +288 -0
  1028. vllm/model_executor/model_loader/weight_utils.py +1084 -0
  1029. vllm/model_executor/models/__init__.py +44 -0
  1030. vllm/model_executor/models/adapters.py +543 -0
  1031. vllm/model_executor/models/afmoe.py +711 -0
  1032. vllm/model_executor/models/aimv2.py +247 -0
  1033. vllm/model_executor/models/apertus.py +587 -0
  1034. vllm/model_executor/models/arcee.py +439 -0
  1035. vllm/model_executor/models/arctic.py +635 -0
  1036. vllm/model_executor/models/aria.py +655 -0
  1037. vllm/model_executor/models/aya_vision.py +450 -0
  1038. vllm/model_executor/models/baichuan.py +496 -0
  1039. vllm/model_executor/models/bailing_moe.py +646 -0
  1040. vllm/model_executor/models/bamba.py +522 -0
  1041. vllm/model_executor/models/bee.py +157 -0
  1042. vllm/model_executor/models/bert.py +925 -0
  1043. vllm/model_executor/models/bert_with_rope.py +732 -0
  1044. vllm/model_executor/models/blip.py +349 -0
  1045. vllm/model_executor/models/blip2.py +695 -0
  1046. vllm/model_executor/models/bloom.py +390 -0
  1047. vllm/model_executor/models/chameleon.py +1120 -0
  1048. vllm/model_executor/models/chatglm.py +498 -0
  1049. vllm/model_executor/models/clip.py +965 -0
  1050. vllm/model_executor/models/cohere2_vision.py +472 -0
  1051. vllm/model_executor/models/commandr.py +473 -0
  1052. vllm/model_executor/models/config.py +503 -0
  1053. vllm/model_executor/models/dbrx.py +482 -0
  1054. vllm/model_executor/models/deepencoder.py +673 -0
  1055. vllm/model_executor/models/deepseek_eagle.py +260 -0
  1056. vllm/model_executor/models/deepseek_mtp.py +360 -0
  1057. vllm/model_executor/models/deepseek_ocr.py +593 -0
  1058. vllm/model_executor/models/deepseek_v2.py +1649 -0
  1059. vllm/model_executor/models/deepseek_vl2.py +655 -0
  1060. vllm/model_executor/models/dots1.py +574 -0
  1061. vllm/model_executor/models/dots_ocr.py +900 -0
  1062. vllm/model_executor/models/ernie45.py +53 -0
  1063. vllm/model_executor/models/ernie45_moe.py +759 -0
  1064. vllm/model_executor/models/ernie45_vl.py +1742 -0
  1065. vllm/model_executor/models/ernie45_vl_moe.py +803 -0
  1066. vllm/model_executor/models/ernie_mtp.py +279 -0
  1067. vllm/model_executor/models/exaone.py +545 -0
  1068. vllm/model_executor/models/exaone4.py +531 -0
  1069. vllm/model_executor/models/fairseq2_llama.py +154 -0
  1070. vllm/model_executor/models/falcon.py +545 -0
  1071. vllm/model_executor/models/falcon_h1.py +685 -0
  1072. vllm/model_executor/models/flex_olmo.py +155 -0
  1073. vllm/model_executor/models/fuyu.py +373 -0
  1074. vllm/model_executor/models/gemma.py +426 -0
  1075. vllm/model_executor/models/gemma2.py +439 -0
  1076. vllm/model_executor/models/gemma3.py +571 -0
  1077. vllm/model_executor/models/gemma3_mm.py +741 -0
  1078. vllm/model_executor/models/gemma3n.py +1165 -0
  1079. vllm/model_executor/models/gemma3n_mm.py +811 -0
  1080. vllm/model_executor/models/glm.py +23 -0
  1081. vllm/model_executor/models/glm4.py +305 -0
  1082. vllm/model_executor/models/glm4_1v.py +1821 -0
  1083. vllm/model_executor/models/glm4_moe.py +747 -0
  1084. vllm/model_executor/models/glm4_moe_mtp.py +359 -0
  1085. vllm/model_executor/models/glm4v.py +784 -0
  1086. vllm/model_executor/models/gpt2.py +397 -0
  1087. vllm/model_executor/models/gpt_bigcode.py +339 -0
  1088. vllm/model_executor/models/gpt_j.py +346 -0
  1089. vllm/model_executor/models/gpt_neox.py +344 -0
  1090. vllm/model_executor/models/gpt_oss.py +738 -0
  1091. vllm/model_executor/models/granite.py +516 -0
  1092. vllm/model_executor/models/granite_speech.py +913 -0
  1093. vllm/model_executor/models/granitemoe.py +569 -0
  1094. vllm/model_executor/models/granitemoehybrid.py +709 -0
  1095. vllm/model_executor/models/granitemoeshared.py +333 -0
  1096. vllm/model_executor/models/gritlm.py +245 -0
  1097. vllm/model_executor/models/grok1.py +558 -0
  1098. vllm/model_executor/models/h2ovl.py +554 -0
  1099. vllm/model_executor/models/hunyuan_v1.py +1053 -0
  1100. vllm/model_executor/models/hyperclovax_vision.py +1166 -0
  1101. vllm/model_executor/models/idefics2_vision_model.py +426 -0
  1102. vllm/model_executor/models/idefics3.py +717 -0
  1103. vllm/model_executor/models/interfaces.py +1092 -0
  1104. vllm/model_executor/models/interfaces_base.py +214 -0
  1105. vllm/model_executor/models/intern_vit.py +453 -0
  1106. vllm/model_executor/models/internlm2.py +460 -0
  1107. vllm/model_executor/models/internlm2_ve.py +142 -0
  1108. vllm/model_executor/models/interns1.py +830 -0
  1109. vllm/model_executor/models/interns1_vit.py +432 -0
  1110. vllm/model_executor/models/internvl.py +1452 -0
  1111. vllm/model_executor/models/jais.py +397 -0
  1112. vllm/model_executor/models/jamba.py +610 -0
  1113. vllm/model_executor/models/jina_vl.py +147 -0
  1114. vllm/model_executor/models/keye.py +1761 -0
  1115. vllm/model_executor/models/keye_vl1_5.py +726 -0
  1116. vllm/model_executor/models/kimi_linear.py +663 -0
  1117. vllm/model_executor/models/kimi_vl.py +578 -0
  1118. vllm/model_executor/models/lfm2.py +532 -0
  1119. vllm/model_executor/models/lfm2_moe.py +762 -0
  1120. vllm/model_executor/models/lightonocr.py +195 -0
  1121. vllm/model_executor/models/llama.py +732 -0
  1122. vllm/model_executor/models/llama4.py +859 -0
  1123. vllm/model_executor/models/llama4_eagle.py +223 -0
  1124. vllm/model_executor/models/llama_eagle.py +218 -0
  1125. vllm/model_executor/models/llama_eagle3.py +367 -0
  1126. vllm/model_executor/models/llava.py +842 -0
  1127. vllm/model_executor/models/llava_next.py +583 -0
  1128. vllm/model_executor/models/llava_next_video.py +467 -0
  1129. vllm/model_executor/models/llava_onevision.py +923 -0
  1130. vllm/model_executor/models/longcat_flash.py +749 -0
  1131. vllm/model_executor/models/longcat_flash_mtp.py +349 -0
  1132. vllm/model_executor/models/mamba.py +276 -0
  1133. vllm/model_executor/models/mamba2.py +289 -0
  1134. vllm/model_executor/models/medusa.py +179 -0
  1135. vllm/model_executor/models/midashenglm.py +827 -0
  1136. vllm/model_executor/models/mimo.py +188 -0
  1137. vllm/model_executor/models/mimo_mtp.py +294 -0
  1138. vllm/model_executor/models/minicpm.py +664 -0
  1139. vllm/model_executor/models/minicpm3.py +242 -0
  1140. vllm/model_executor/models/minicpm_eagle.py +389 -0
  1141. vllm/model_executor/models/minicpmo.py +768 -0
  1142. vllm/model_executor/models/minicpmv.py +1745 -0
  1143. vllm/model_executor/models/minimax_m2.py +552 -0
  1144. vllm/model_executor/models/minimax_text_01.py +1012 -0
  1145. vllm/model_executor/models/minimax_vl_01.py +396 -0
  1146. vllm/model_executor/models/mistral3.py +637 -0
  1147. vllm/model_executor/models/mixtral.py +621 -0
  1148. vllm/model_executor/models/mllama4.py +1147 -0
  1149. vllm/model_executor/models/mlp_speculator.py +235 -0
  1150. vllm/model_executor/models/modernbert.py +450 -0
  1151. vllm/model_executor/models/module_mapping.py +74 -0
  1152. vllm/model_executor/models/molmo.py +1555 -0
  1153. vllm/model_executor/models/moonvit.py +677 -0
  1154. vllm/model_executor/models/mpt.py +335 -0
  1155. vllm/model_executor/models/nano_nemotron_vl.py +1740 -0
  1156. vllm/model_executor/models/nemotron.py +518 -0
  1157. vllm/model_executor/models/nemotron_h.py +852 -0
  1158. vllm/model_executor/models/nemotron_nas.py +491 -0
  1159. vllm/model_executor/models/nemotron_vl.py +653 -0
  1160. vllm/model_executor/models/nvlm_d.py +216 -0
  1161. vllm/model_executor/models/olmo.py +414 -0
  1162. vllm/model_executor/models/olmo2.py +454 -0
  1163. vllm/model_executor/models/olmoe.py +498 -0
  1164. vllm/model_executor/models/openpangu.py +1062 -0
  1165. vllm/model_executor/models/openpangu_mtp.py +265 -0
  1166. vllm/model_executor/models/opt.py +426 -0
  1167. vllm/model_executor/models/orion.py +372 -0
  1168. vllm/model_executor/models/ouro.py +516 -0
  1169. vllm/model_executor/models/ovis.py +559 -0
  1170. vllm/model_executor/models/ovis2_5.py +673 -0
  1171. vllm/model_executor/models/paddleocr_vl.py +1407 -0
  1172. vllm/model_executor/models/paligemma.py +412 -0
  1173. vllm/model_executor/models/persimmon.py +377 -0
  1174. vllm/model_executor/models/phi.py +374 -0
  1175. vllm/model_executor/models/phi3.py +18 -0
  1176. vllm/model_executor/models/phi3v.py +737 -0
  1177. vllm/model_executor/models/phi4_multimodal.py +1447 -0
  1178. vllm/model_executor/models/phi4mm.py +1253 -0
  1179. vllm/model_executor/models/phi4mm_audio.py +1296 -0
  1180. vllm/model_executor/models/phi4mm_utils.py +1907 -0
  1181. vllm/model_executor/models/phimoe.py +675 -0
  1182. vllm/model_executor/models/pixtral.py +1352 -0
  1183. vllm/model_executor/models/plamo2.py +981 -0
  1184. vllm/model_executor/models/qwen.py +368 -0
  1185. vllm/model_executor/models/qwen2.py +541 -0
  1186. vllm/model_executor/models/qwen2_5_omni_thinker.py +1246 -0
  1187. vllm/model_executor/models/qwen2_5_vl.py +1613 -0
  1188. vllm/model_executor/models/qwen2_audio.py +473 -0
  1189. vllm/model_executor/models/qwen2_moe.py +596 -0
  1190. vllm/model_executor/models/qwen2_rm.py +123 -0
  1191. vllm/model_executor/models/qwen2_vl.py +1670 -0
  1192. vllm/model_executor/models/qwen3.py +336 -0
  1193. vllm/model_executor/models/qwen3_moe.py +744 -0
  1194. vllm/model_executor/models/qwen3_next.py +1395 -0
  1195. vllm/model_executor/models/qwen3_next_mtp.py +296 -0
  1196. vllm/model_executor/models/qwen3_omni_moe_thinker.py +1721 -0
  1197. vllm/model_executor/models/qwen3_vl.py +1673 -0
  1198. vllm/model_executor/models/qwen3_vl_moe.py +415 -0
  1199. vllm/model_executor/models/qwen_vl.py +802 -0
  1200. vllm/model_executor/models/radio.py +555 -0
  1201. vllm/model_executor/models/registry.py +1155 -0
  1202. vllm/model_executor/models/roberta.py +259 -0
  1203. vllm/model_executor/models/rvl.py +107 -0
  1204. vllm/model_executor/models/seed_oss.py +497 -0
  1205. vllm/model_executor/models/siglip.py +1174 -0
  1206. vllm/model_executor/models/siglip2navit.py +724 -0
  1207. vllm/model_executor/models/skyworkr1v.py +953 -0
  1208. vllm/model_executor/models/smolvlm.py +38 -0
  1209. vllm/model_executor/models/solar.py +502 -0
  1210. vllm/model_executor/models/stablelm.py +359 -0
  1211. vllm/model_executor/models/starcoder2.py +367 -0
  1212. vllm/model_executor/models/step3_text.py +559 -0
  1213. vllm/model_executor/models/step3_vl.py +1148 -0
  1214. vllm/model_executor/models/swin.py +514 -0
  1215. vllm/model_executor/models/tarsier.py +619 -0
  1216. vllm/model_executor/models/telechat2.py +153 -0
  1217. vllm/model_executor/models/teleflm.py +78 -0
  1218. vllm/model_executor/models/terratorch.py +319 -0
  1219. vllm/model_executor/models/transformers/__init__.py +127 -0
  1220. vllm/model_executor/models/transformers/base.py +464 -0
  1221. vllm/model_executor/models/transformers/causal.py +65 -0
  1222. vllm/model_executor/models/transformers/legacy.py +90 -0
  1223. vllm/model_executor/models/transformers/moe.py +318 -0
  1224. vllm/model_executor/models/transformers/multimodal.py +411 -0
  1225. vllm/model_executor/models/transformers/pooling.py +119 -0
  1226. vllm/model_executor/models/transformers/utils.py +207 -0
  1227. vllm/model_executor/models/ultravox.py +681 -0
  1228. vllm/model_executor/models/utils.py +877 -0
  1229. vllm/model_executor/models/vision.py +552 -0
  1230. vllm/model_executor/models/voxtral.py +845 -0
  1231. vllm/model_executor/models/whisper.py +959 -0
  1232. vllm/model_executor/models/zamba2.py +986 -0
  1233. vllm/model_executor/parameter.py +642 -0
  1234. vllm/model_executor/utils.py +94 -0
  1235. vllm/model_executor/warmup/__init__.py +0 -0
  1236. vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
  1237. vllm/model_executor/warmup/kernel_warmup.py +98 -0
  1238. vllm/multimodal/__init__.py +40 -0
  1239. vllm/multimodal/audio.py +118 -0
  1240. vllm/multimodal/base.py +26 -0
  1241. vllm/multimodal/cache.py +755 -0
  1242. vllm/multimodal/evs.py +294 -0
  1243. vllm/multimodal/hasher.py +106 -0
  1244. vllm/multimodal/image.py +130 -0
  1245. vllm/multimodal/inputs.py +1036 -0
  1246. vllm/multimodal/parse.py +544 -0
  1247. vllm/multimodal/processing.py +2186 -0
  1248. vllm/multimodal/profiling.py +369 -0
  1249. vllm/multimodal/registry.py +360 -0
  1250. vllm/multimodal/utils.py +512 -0
  1251. vllm/multimodal/video.py +306 -0
  1252. vllm/outputs.py +345 -0
  1253. vllm/platforms/__init__.py +277 -0
  1254. vllm/platforms/cpu.py +414 -0
  1255. vllm/platforms/cuda.py +657 -0
  1256. vllm/platforms/interface.py +639 -0
  1257. vllm/platforms/rocm.py +466 -0
  1258. vllm/platforms/tpu.py +276 -0
  1259. vllm/platforms/xpu.py +274 -0
  1260. vllm/plugins/__init__.py +78 -0
  1261. vllm/plugins/io_processors/__init__.py +68 -0
  1262. vllm/plugins/io_processors/interface.py +77 -0
  1263. vllm/plugins/lora_resolvers/__init__.py +0 -0
  1264. vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
  1265. vllm/pooling_params.py +228 -0
  1266. vllm/profiler/__init__.py +0 -0
  1267. vllm/profiler/gpu_profiler.py +37 -0
  1268. vllm/profiler/layerwise_profile.py +392 -0
  1269. vllm/profiler/utils.py +151 -0
  1270. vllm/py.typed +2 -0
  1271. vllm/ray/__init__.py +0 -0
  1272. vllm/ray/lazy_utils.py +26 -0
  1273. vllm/ray/ray_env.py +79 -0
  1274. vllm/reasoning/__init__.py +92 -0
  1275. vllm/reasoning/abs_reasoning_parsers.py +290 -0
  1276. vllm/reasoning/basic_parsers.py +162 -0
  1277. vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
  1278. vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
  1279. vllm/reasoning/ernie45_reasoning_parser.py +165 -0
  1280. vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
  1281. vllm/reasoning/gptoss_reasoning_parser.py +173 -0
  1282. vllm/reasoning/granite_reasoning_parser.py +363 -0
  1283. vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
  1284. vllm/reasoning/identity_reasoning_parser.py +58 -0
  1285. vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
  1286. vllm/reasoning/mistral_reasoning_parser.py +55 -0
  1287. vllm/reasoning/olmo3_reasoning_parser.py +302 -0
  1288. vllm/reasoning/qwen3_reasoning_parser.py +67 -0
  1289. vllm/reasoning/seedoss_reasoning_parser.py +27 -0
  1290. vllm/reasoning/step3_reasoning_parser.py +107 -0
  1291. vllm/sampling_params.py +669 -0
  1292. vllm/scalar_type.py +355 -0
  1293. vllm/scripts.py +17 -0
  1294. vllm/sequence.py +98 -0
  1295. vllm/tasks.py +13 -0
  1296. vllm/third_party/__init__.py +0 -0
  1297. vllm/third_party/pynvml.py +6140 -0
  1298. vllm/tracing.py +135 -0
  1299. vllm/transformers_utils/__init__.py +26 -0
  1300. vllm/transformers_utils/chat_templates/__init__.py +5 -0
  1301. vllm/transformers_utils/chat_templates/registry.py +73 -0
  1302. vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
  1303. vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
  1304. vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
  1305. vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
  1306. vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
  1307. vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
  1308. vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
  1309. vllm/transformers_utils/config.py +1203 -0
  1310. vllm/transformers_utils/config_parser_base.py +20 -0
  1311. vllm/transformers_utils/configs/__init__.py +70 -0
  1312. vllm/transformers_utils/configs/afmoe.py +84 -0
  1313. vllm/transformers_utils/configs/arctic.py +206 -0
  1314. vllm/transformers_utils/configs/chatglm.py +75 -0
  1315. vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
  1316. vllm/transformers_utils/configs/dotsocr.py +71 -0
  1317. vllm/transformers_utils/configs/eagle.py +84 -0
  1318. vllm/transformers_utils/configs/falcon.py +89 -0
  1319. vllm/transformers_utils/configs/flex_olmo.py +77 -0
  1320. vllm/transformers_utils/configs/jais.py +243 -0
  1321. vllm/transformers_utils/configs/kimi_linear.py +144 -0
  1322. vllm/transformers_utils/configs/kimi_vl.py +38 -0
  1323. vllm/transformers_utils/configs/lfm2_moe.py +159 -0
  1324. vllm/transformers_utils/configs/medusa.py +65 -0
  1325. vllm/transformers_utils/configs/midashenglm.py +103 -0
  1326. vllm/transformers_utils/configs/mistral.py +174 -0
  1327. vllm/transformers_utils/configs/mlp_speculator.py +69 -0
  1328. vllm/transformers_utils/configs/moonvit.py +33 -0
  1329. vllm/transformers_utils/configs/nemotron.py +212 -0
  1330. vllm/transformers_utils/configs/nemotron_h.py +282 -0
  1331. vllm/transformers_utils/configs/olmo3.py +79 -0
  1332. vllm/transformers_utils/configs/ovis.py +182 -0
  1333. vllm/transformers_utils/configs/qwen3_next.py +274 -0
  1334. vllm/transformers_utils/configs/radio.py +89 -0
  1335. vllm/transformers_utils/configs/speculators/__init__.py +2 -0
  1336. vllm/transformers_utils/configs/speculators/algos.py +38 -0
  1337. vllm/transformers_utils/configs/speculators/base.py +114 -0
  1338. vllm/transformers_utils/configs/step3_vl.py +174 -0
  1339. vllm/transformers_utils/configs/ultravox.py +118 -0
  1340. vllm/transformers_utils/detokenizer_utils.py +198 -0
  1341. vllm/transformers_utils/dynamic_module.py +59 -0
  1342. vllm/transformers_utils/processor.py +402 -0
  1343. vllm/transformers_utils/processors/__init__.py +15 -0
  1344. vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
  1345. vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
  1346. vllm/transformers_utils/processors/ovis.py +453 -0
  1347. vllm/transformers_utils/processors/ovis2_5.py +468 -0
  1348. vllm/transformers_utils/runai_utils.py +104 -0
  1349. vllm/transformers_utils/s3_utils.py +95 -0
  1350. vllm/transformers_utils/tokenizer.py +293 -0
  1351. vllm/transformers_utils/tokenizer_base.py +155 -0
  1352. vllm/transformers_utils/tokenizers/__init__.py +16 -0
  1353. vllm/transformers_utils/tokenizers/mistral.py +502 -0
  1354. vllm/transformers_utils/utils.py +130 -0
  1355. vllm/triton_utils/__init__.py +19 -0
  1356. vllm/triton_utils/importing.py +103 -0
  1357. vllm/usage/__init__.py +0 -0
  1358. vllm/usage/usage_lib.py +294 -0
  1359. vllm/utils/__init__.py +82 -0
  1360. vllm/utils/argparse_utils.py +487 -0
  1361. vllm/utils/async_utils.py +303 -0
  1362. vllm/utils/cache.py +214 -0
  1363. vllm/utils/collection_utils.py +139 -0
  1364. vllm/utils/counter.py +45 -0
  1365. vllm/utils/deep_gemm.py +391 -0
  1366. vllm/utils/flashinfer.py +490 -0
  1367. vllm/utils/func_utils.py +236 -0
  1368. vllm/utils/gc_utils.py +147 -0
  1369. vllm/utils/hashing.py +63 -0
  1370. vllm/utils/import_utils.py +411 -0
  1371. vllm/utils/jsontree.py +165 -0
  1372. vllm/utils/math_utils.py +32 -0
  1373. vllm/utils/mem_constants.py +13 -0
  1374. vllm/utils/mem_utils.py +232 -0
  1375. vllm/utils/nccl.py +64 -0
  1376. vllm/utils/network_utils.py +331 -0
  1377. vllm/utils/platform_utils.py +59 -0
  1378. vllm/utils/profiling.py +56 -0
  1379. vllm/utils/registry.py +49 -0
  1380. vllm/utils/serial_utils.py +169 -0
  1381. vllm/utils/system_utils.py +229 -0
  1382. vllm/utils/tensor_schema.py +255 -0
  1383. vllm/utils/torch_utils.py +657 -0
  1384. vllm/v1/__init__.py +0 -0
  1385. vllm/v1/attention/__init__.py +0 -0
  1386. vllm/v1/attention/backends/__init__.py +0 -0
  1387. vllm/v1/attention/backends/cpu_attn.py +496 -0
  1388. vllm/v1/attention/backends/flash_attn.py +1028 -0
  1389. vllm/v1/attention/backends/flashinfer.py +1572 -0
  1390. vllm/v1/attention/backends/flex_attention.py +926 -0
  1391. vllm/v1/attention/backends/gdn_attn.py +387 -0
  1392. vllm/v1/attention/backends/linear_attn.py +74 -0
  1393. vllm/v1/attention/backends/mamba1_attn.py +165 -0
  1394. vllm/v1/attention/backends/mamba2_attn.py +354 -0
  1395. vllm/v1/attention/backends/mamba_attn.py +115 -0
  1396. vllm/v1/attention/backends/mla/__init__.py +0 -0
  1397. vllm/v1/attention/backends/mla/common.py +2031 -0
  1398. vllm/v1/attention/backends/mla/cutlass_mla.py +275 -0
  1399. vllm/v1/attention/backends/mla/flashattn_mla.py +337 -0
  1400. vllm/v1/attention/backends/mla/flashinfer_mla.py +171 -0
  1401. vllm/v1/attention/backends/mla/flashmla.py +314 -0
  1402. vllm/v1/attention/backends/mla/flashmla_sparse.py +548 -0
  1403. vllm/v1/attention/backends/mla/indexer.py +362 -0
  1404. vllm/v1/attention/backends/mla/rocm_aiter_mla.py +294 -0
  1405. vllm/v1/attention/backends/mla/triton_mla.py +171 -0
  1406. vllm/v1/attention/backends/pallas.py +436 -0
  1407. vllm/v1/attention/backends/rocm_aiter_fa.py +816 -0
  1408. vllm/v1/attention/backends/rocm_aiter_unified_attn.py +196 -0
  1409. vllm/v1/attention/backends/rocm_attn.py +362 -0
  1410. vllm/v1/attention/backends/short_conv_attn.py +105 -0
  1411. vllm/v1/attention/backends/tree_attn.py +425 -0
  1412. vllm/v1/attention/backends/triton_attn.py +373 -0
  1413. vllm/v1/attention/backends/utils.py +1116 -0
  1414. vllm/v1/attention/backends/xformers.py +417 -0
  1415. vllm/v1/core/__init__.py +0 -0
  1416. vllm/v1/core/block_pool.py +428 -0
  1417. vllm/v1/core/encoder_cache_manager.py +343 -0
  1418. vllm/v1/core/kv_cache_coordinator.py +480 -0
  1419. vllm/v1/core/kv_cache_manager.py +420 -0
  1420. vllm/v1/core/kv_cache_utils.py +1340 -0
  1421. vllm/v1/core/sched/__init__.py +0 -0
  1422. vllm/v1/core/sched/async_scheduler.py +62 -0
  1423. vllm/v1/core/sched/interface.py +181 -0
  1424. vllm/v1/core/sched/output.py +202 -0
  1425. vllm/v1/core/sched/request_queue.py +221 -0
  1426. vllm/v1/core/sched/scheduler.py +1617 -0
  1427. vllm/v1/core/sched/utils.py +72 -0
  1428. vllm/v1/core/single_type_kv_cache_manager.py +736 -0
  1429. vllm/v1/cudagraph_dispatcher.py +148 -0
  1430. vllm/v1/engine/__init__.py +206 -0
  1431. vllm/v1/engine/async_llm.py +797 -0
  1432. vllm/v1/engine/coordinator.py +377 -0
  1433. vllm/v1/engine/core.py +1420 -0
  1434. vllm/v1/engine/core_client.py +1400 -0
  1435. vllm/v1/engine/detokenizer.py +351 -0
  1436. vllm/v1/engine/exceptions.py +18 -0
  1437. vllm/v1/engine/llm_engine.py +408 -0
  1438. vllm/v1/engine/logprobs.py +182 -0
  1439. vllm/v1/engine/output_processor.py +642 -0
  1440. vllm/v1/engine/parallel_sampling.py +145 -0
  1441. vllm/v1/engine/processor.py +621 -0
  1442. vllm/v1/engine/utils.py +1072 -0
  1443. vllm/v1/executor/__init__.py +6 -0
  1444. vllm/v1/executor/abstract.py +352 -0
  1445. vllm/v1/executor/multiproc_executor.py +877 -0
  1446. vllm/v1/executor/ray_distributed_executor.py +8 -0
  1447. vllm/v1/executor/ray_executor.py +626 -0
  1448. vllm/v1/executor/ray_utils.py +465 -0
  1449. vllm/v1/executor/uniproc_executor.py +183 -0
  1450. vllm/v1/kv_cache_interface.py +403 -0
  1451. vllm/v1/kv_offload/__init__.py +0 -0
  1452. vllm/v1/kv_offload/abstract.py +161 -0
  1453. vllm/v1/kv_offload/arc_manager.py +237 -0
  1454. vllm/v1/kv_offload/backend.py +97 -0
  1455. vllm/v1/kv_offload/backends/__init__.py +0 -0
  1456. vllm/v1/kv_offload/backends/cpu.py +62 -0
  1457. vllm/v1/kv_offload/cpu.py +93 -0
  1458. vllm/v1/kv_offload/factory.py +56 -0
  1459. vllm/v1/kv_offload/lru_manager.py +139 -0
  1460. vllm/v1/kv_offload/mediums.py +39 -0
  1461. vllm/v1/kv_offload/spec.py +62 -0
  1462. vllm/v1/kv_offload/worker/__init__.py +0 -0
  1463. vllm/v1/kv_offload/worker/cpu_gpu.py +185 -0
  1464. vllm/v1/kv_offload/worker/worker.py +144 -0
  1465. vllm/v1/metrics/__init__.py +0 -0
  1466. vllm/v1/metrics/loggers.py +1238 -0
  1467. vllm/v1/metrics/prometheus.py +82 -0
  1468. vllm/v1/metrics/ray_wrappers.py +169 -0
  1469. vllm/v1/metrics/reader.py +257 -0
  1470. vllm/v1/metrics/stats.py +420 -0
  1471. vllm/v1/outputs.py +249 -0
  1472. vllm/v1/pool/__init__.py +0 -0
  1473. vllm/v1/pool/metadata.py +82 -0
  1474. vllm/v1/request.py +259 -0
  1475. vllm/v1/sample/__init__.py +0 -0
  1476. vllm/v1/sample/logits_processor/__init__.py +352 -0
  1477. vllm/v1/sample/logits_processor/builtin.py +274 -0
  1478. vllm/v1/sample/logits_processor/interface.py +106 -0
  1479. vllm/v1/sample/logits_processor/state.py +165 -0
  1480. vllm/v1/sample/metadata.py +44 -0
  1481. vllm/v1/sample/ops/__init__.py +0 -0
  1482. vllm/v1/sample/ops/bad_words.py +52 -0
  1483. vllm/v1/sample/ops/logprobs.py +25 -0
  1484. vllm/v1/sample/ops/penalties.py +57 -0
  1485. vllm/v1/sample/ops/topk_topp_sampler.py +290 -0
  1486. vllm/v1/sample/rejection_sampler.py +793 -0
  1487. vllm/v1/sample/sampler.py +316 -0
  1488. vllm/v1/sample/tpu/__init__.py +0 -0
  1489. vllm/v1/sample/tpu/metadata.py +120 -0
  1490. vllm/v1/sample/tpu/sampler.py +215 -0
  1491. vllm/v1/serial_utils.py +532 -0
  1492. vllm/v1/spec_decode/__init__.py +0 -0
  1493. vllm/v1/spec_decode/eagle.py +1225 -0
  1494. vllm/v1/spec_decode/medusa.py +73 -0
  1495. vllm/v1/spec_decode/metadata.py +66 -0
  1496. vllm/v1/spec_decode/metrics.py +224 -0
  1497. vllm/v1/spec_decode/ngram_proposer.py +291 -0
  1498. vllm/v1/spec_decode/suffix_decoding.py +103 -0
  1499. vllm/v1/spec_decode/utils.py +16 -0
  1500. vllm/v1/structured_output/__init__.py +338 -0
  1501. vllm/v1/structured_output/backend_guidance.py +265 -0
  1502. vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
  1503. vllm/v1/structured_output/backend_outlines.py +324 -0
  1504. vllm/v1/structured_output/backend_types.py +136 -0
  1505. vllm/v1/structured_output/backend_xgrammar.py +362 -0
  1506. vllm/v1/structured_output/request.py +94 -0
  1507. vllm/v1/structured_output/utils.py +469 -0
  1508. vllm/v1/utils.py +414 -0
  1509. vllm/v1/worker/__init__.py +0 -0
  1510. vllm/v1/worker/block_table.py +327 -0
  1511. vllm/v1/worker/cpu_model_runner.py +122 -0
  1512. vllm/v1/worker/cpu_worker.py +206 -0
  1513. vllm/v1/worker/dp_utils.py +230 -0
  1514. vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
  1515. vllm/v1/worker/gpu_input_batch.py +975 -0
  1516. vllm/v1/worker/gpu_model_runner.py +5102 -0
  1517. vllm/v1/worker/gpu_ubatch_wrapper.py +466 -0
  1518. vllm/v1/worker/gpu_worker.py +894 -0
  1519. vllm/v1/worker/kv_connector_model_runner_mixin.py +144 -0
  1520. vllm/v1/worker/lora_model_runner_mixin.py +213 -0
  1521. vllm/v1/worker/tpu_input_batch.py +593 -0
  1522. vllm/v1/worker/tpu_model_runner.py +2173 -0
  1523. vllm/v1/worker/tpu_worker.py +355 -0
  1524. vllm/v1/worker/ubatch_utils.py +73 -0
  1525. vllm/v1/worker/ubatching.py +231 -0
  1526. vllm/v1/worker/utils.py +366 -0
  1527. vllm/v1/worker/worker_base.py +375 -0
  1528. vllm/v1/worker/xpu_model_runner.py +55 -0
  1529. vllm/v1/worker/xpu_worker.py +189 -0
  1530. vllm/version.py +39 -0
  1531. vllm/vllm_flash_attn/.gitkeep +0 -0
  1532. vllm_cpu_amxbf16-0.11.2.post2.dist-info/METADATA +345 -0
  1533. vllm_cpu_amxbf16-0.11.2.post2.dist-info/RECORD +1536 -0
  1534. vllm_cpu_amxbf16-0.11.2.post2.dist-info/WHEEL +5 -0
  1535. vllm_cpu_amxbf16-0.11.2.post2.dist-info/entry_points.txt +5 -0
  1536. vllm_cpu_amxbf16-0.11.2.post2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1907 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ # Copyright (c) Microsoft Corporation.
4
+ # Licensed under the MIT license.
5
+ # Code copied from Microsoft/MoE by Jacob Platin (jacobplatin@microsoft.com)
6
+ # but implemented by the Phi-Speech team
7
+ #!/usr/bin/env python3
8
+ import math
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ from torch import Tensor, nn
13
+
14
+
15
+ class BlockBase(nn.Module):
16
+ """Block abstract module"""
17
+
18
+ def __init__(self, input_size: int, output_size: int) -> None:
19
+ super().__init__()
20
+ self.input_size = input_size
21
+ self.output_size = output_size
22
+
23
+
24
+ def get_activation(name: str = "relu") -> torch.nn.Module:
25
+ """Select an activation function by name
26
+
27
+ Args:
28
+ name: str
29
+ activation function name,
30
+ one of ["relu", "gelu", "swish", "sigmoid"],
31
+ default "relu".
32
+ """
33
+ name = name.lower()
34
+ if name == "relu":
35
+ return nn.ReLU(inplace=True)
36
+ if name == "gelu":
37
+ return nn.GELU()
38
+ if name == "swish":
39
+ return Swish()
40
+ if name == "sigmoid":
41
+ return torch.nn.Sigmoid()
42
+ return nn.Identity()
43
+
44
+
45
+ def adaptive_enc_mask(
46
+ x_len: int, chunk_start_idx: list[int], left_window: int = 0, right_window: int = 0
47
+ ) -> torch.Tensor:
48
+ """
49
+ The function is very important for Transformer Transducer Streaming mode
50
+ Args:
51
+ x_len: sequence length
52
+ chunk_start_idx: first idx of each chunk, such as [0,18,36,48].
53
+ It also supports adaptive chunk size [0,10,15,45]
54
+ left_window: how many left chunks can be seen
55
+ right_window: how many right chunks can be seen. It is used for
56
+ chunk overlap model.
57
+ Returns:
58
+ mask (torch.Tensor): a mask tensor for streaming model
59
+ Torch 1.0.1
60
+ tensor([[1., 1., 0., 0.],
61
+ [0., 1., 1., 0.],
62
+ [0., 0., 1., 1.]])
63
+ Torch 1.4.1
64
+ tensor([[True., True., False., False.],
65
+ [False., True., True., False.],
66
+ [False., False., True., True.]])
67
+ """
68
+ chunk_start_idx = torch.Tensor(
69
+ chunk_start_idx
70
+ ).long() # first idx of each chunk, such as [0,18,36,48].
71
+ start_pad = torch.nn.functional.pad(
72
+ chunk_start_idx, (1, 0)
73
+ ) # append 0 to the beginning, so it becomes [0, 0, 18, 36, 48]
74
+ end_pad = torch.nn.functional.pad(
75
+ chunk_start_idx, (0, 1), value=x_len
76
+ ) # append x_len to the end, so it becomes [0,18,36,48, x_len]
77
+ seq_range = torch.arange(0, x_len).unsqueeze(-1) # seq_range size: [x_len, 1]
78
+ idx = ((seq_range < end_pad) & (seq_range >= start_pad)).nonzero()[
79
+ :, 1
80
+ ] # idx size: [x_len]
81
+ # boundary = end_pad[idx] # boundary size: [x_len]
82
+ seq_range_expand = (
83
+ torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1)
84
+ ) # seq_range_expand size [x_len, x_len]
85
+ idx_left = idx - left_window
86
+ idx_left[idx_left < 0] = 0
87
+ boundary_left = start_pad[idx_left]
88
+ mask_left = seq_range_expand >= boundary_left.unsqueeze(-1)
89
+ idx_right = idx + right_window
90
+ idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx)
91
+ boundary_right = end_pad[idx_right]
92
+ mask_right = seq_range_expand < boundary_right.unsqueeze(-1)
93
+ return mask_left & mask_right
94
+
95
+
96
+ class Swish(nn.Module):
97
+ """Implement Swish activation module.
98
+ From https://arxiv.org/pdf/2005.03191.pdf
99
+
100
+ """
101
+
102
+ def __init__(self) -> None:
103
+ super().__init__()
104
+ self.act_fn = nn.Sigmoid()
105
+
106
+ def forward(self, x: Tensor) -> Tensor:
107
+ """Apply Swish function
108
+
109
+ Args:
110
+ x: torch.Tensor
111
+ Input.
112
+ """
113
+ return x * self.act_fn(x)
114
+
115
+
116
+ class GLU(nn.Module):
117
+ """Implement Gated Linear Unit (GLU) module"""
118
+
119
+ def __init__(self, dim: int = -1, act_name: str = "sigmoid") -> None:
120
+ super().__init__()
121
+ self.dim = dim
122
+ self.act_name = act_name.lower()
123
+
124
+ if self.act_name == "relu":
125
+ self.act_fn = nn.ReLU(inplace=True)
126
+ elif self.act_name == "gelu":
127
+ self.act_fn = nn.GELU()
128
+ elif self.act_name == "swish":
129
+ self.act_fn = Swish()
130
+ elif self.act_name == "sigmoid":
131
+ self.act_fn = nn.Sigmoid()
132
+ else:
133
+ self.act_fn = nn.Identity()
134
+
135
+ def forward(self, x: Tensor) -> Tensor:
136
+ """GLU forward
137
+ Apply Swish function on the first half of input matrices
138
+ with sigmoid of the second half.
139
+
140
+ Args:
141
+ x: torch.Tensor
142
+ Input.
143
+
144
+ """
145
+ half_x, gate = x.chunk(2, dim=self.dim)
146
+ return half_x * self.act_fn(gate)
147
+
148
+
149
+ # TODO: Abdel, this can be improved using GLU module
150
+ class GLUPointWiseConv(nn.Module):
151
+ """GLUPointWiseConv module
152
+ used for conformer architecture,
153
+ for more details see:
154
+ https://arxiv.org/pdf/2005.08100v1.pdf
155
+
156
+ Args:
157
+ input_dim: int
158
+ input channel size.
159
+ output_dim: int
160
+ output channel size.
161
+ kernel_size: int
162
+ kernel size
163
+ glu_type: str, optional
164
+ activation function one of
165
+ ["sigmoid", "relu", "gelu"]
166
+ default "sigmoid".
167
+ bias_in_glu: bool, optional
168
+ use addtive bias in glu
169
+ causal: bool, optional
170
+ if set to True, padding is set to the half of
171
+ kernel size, ie, convolution can't see future frames.
172
+ default False.
173
+
174
+ """
175
+
176
+ def __init__(
177
+ self,
178
+ input_dim: int,
179
+ output_dim: int,
180
+ kernel_size: int,
181
+ glu_type: str = "sigmoid",
182
+ bias_in_glu: bool = True,
183
+ causal: bool = False,
184
+ ) -> None:
185
+ super().__init__()
186
+
187
+ self.glu_type = glu_type
188
+ self.output_dim = output_dim
189
+ self.bias_in_glu = bias_in_glu
190
+ if causal:
191
+ self.ext_pw_conv_1d = nn.Conv1d(
192
+ input_dim,
193
+ output_dim * 2,
194
+ kernel_size,
195
+ 1,
196
+ padding=(kernel_size - 1),
197
+ )
198
+ else:
199
+ self.ext_pw_conv_1d = nn.Conv1d(
200
+ input_dim,
201
+ output_dim * 2,
202
+ kernel_size,
203
+ 1,
204
+ padding=(kernel_size - 1) // 2,
205
+ )
206
+
207
+ if glu_type == "sigmoid":
208
+ self.glu_act = nn.Sigmoid()
209
+ elif glu_type == "relu":
210
+ self.glu_act = nn.ReLU()
211
+ elif glu_type == "gelu":
212
+ self.glu_act = nn.GELU()
213
+ elif glu_type == "swish":
214
+ self.glu_act = Swish()
215
+ else:
216
+ raise ValueError(f"Unsupported activation type {self.glu_act}")
217
+
218
+ if bias_in_glu:
219
+ self.b1 = nn.Parameter(torch.zeros(1, output_dim, 1))
220
+ self.b2 = nn.Parameter(torch.zeros(1, output_dim, 1))
221
+
222
+ def forward(self, x: Tensor) -> Tensor:
223
+ """
224
+ Args:
225
+ x: input tensor
226
+ """
227
+ # to be consistent with GLULinear, we assume the input always has the
228
+ # #channel (#dim) in the last dimension of the tensor, so need to
229
+ # switch the dimension first for 1D-Conv case
230
+ x = x.permute([0, 2, 1])
231
+ x = self.ext_pw_conv_1d(x)
232
+ if self.glu_type == "bilinear":
233
+ if self.bias_in_glu:
234
+ x = (x[:, 0 : self.output_dim, :] + self.b1) * (
235
+ x[:, self.output_dim : self.output_dim * 2, :] + self.b2
236
+ )
237
+ else:
238
+ x = (
239
+ (x[:, 0 : self.output_dim, :])
240
+ * (x[:, self.output_dim : self.output_dim * 2, :])
241
+ )
242
+ else:
243
+ if self.bias_in_glu:
244
+ x = (x[:, 0 : self.output_dim, :] + self.b1) * self.glu_act(
245
+ x[:, self.output_dim : self.output_dim * 2, :] + self.b2
246
+ )
247
+ else:
248
+ x = (x[:, 0 : self.output_dim, :]) * self.glu_act(
249
+ x[:, self.output_dim : self.output_dim * 2, :]
250
+ )
251
+
252
+ x = x.permute([0, 2, 1])
253
+ return x
254
+
255
+
256
+ class DepthWiseSeperableConv1d(nn.Module):
257
+ """DepthWiseSeperableConv1d module used in Convnet module
258
+ for the conformer, for more details see:
259
+ https://arxiv.org/pdf/2005.08100v1.pdf
260
+
261
+ Args:
262
+ input_dim: int
263
+ input channel size.
264
+ depthwise_seperable_out_channel: int
265
+ if set different to 0, the number of
266
+ depthwise_seperable_out_channel will be used as a channel_out
267
+ of the second conv1d layer.
268
+ otherwise, it equals to 0, the second conv1d layer is skipped.
269
+ kernel_size: int
270
+ kernel_size
271
+ depthwise_multiplier: int
272
+ number of input_dim channels duplication. this value
273
+ will be used to compute the hidden channels of the Conv1D.
274
+ padding: int, optional
275
+ padding for the conv1d,
276
+ default: 0.
277
+
278
+ """
279
+
280
+ def __init__(
281
+ self,
282
+ input_dim: int,
283
+ depthwise_seperable_out_channel: int,
284
+ kernel_size: int,
285
+ depthwise_multiplier: int,
286
+ padding: int = 0,
287
+ ) -> None:
288
+ super().__init__()
289
+
290
+ self.dw_conv = nn.Conv1d(
291
+ input_dim,
292
+ input_dim * depthwise_multiplier,
293
+ kernel_size,
294
+ 1,
295
+ padding=padding,
296
+ groups=input_dim,
297
+ )
298
+
299
+ if depthwise_seperable_out_channel != 0:
300
+ self.pw_conv = nn.Conv1d(
301
+ input_dim * depthwise_multiplier,
302
+ depthwise_seperable_out_channel,
303
+ 1,
304
+ 1,
305
+ 0,
306
+ )
307
+ else:
308
+ self.pw_conv = nn.Identity()
309
+ self.depthwise_seperable_out_channel = depthwise_seperable_out_channel
310
+
311
+ def forward(self, x: Tensor) -> Tensor:
312
+ """
313
+
314
+ Args:
315
+ x: input tensor
316
+ """
317
+ x = self.dw_conv(x)
318
+ if self.depthwise_seperable_out_channel != 0:
319
+ x = self.pw_conv(x)
320
+ return x
321
+
322
+
323
+ class ConvModule(nn.Module):
324
+ """ConvModule Module for the conformer block.
325
+ for more details see:
326
+ https://arxiv.org/pdf/2005.08100v1.pdf
327
+
328
+ Args:
329
+ input_dim: int
330
+ input channel size.
331
+ ext_pw_out_channel: int
332
+ if > 0, ext_pw_out_channel is a dim channel size
333
+ for the last pointwise conv after swish activation.
334
+ depthwise_seperable_out_channel: int
335
+ if set different to 0, the number of
336
+ depthwise_seperable_out_channel
337
+ will be used as a channel_out of the second conv1d layer.
338
+ otherwise, it equal to 0, the second conv1d layer is skipped.
339
+ ext_pw_kernel_size: int
340
+ kernel size of the conv pointwise of the conformer.
341
+ kernel_size: int
342
+ kernel size.
343
+ depthwise_multiplier: int
344
+ number of input_dim channels duplication. this value
345
+ will be used to compute the hidden channels of the Conv1D.
346
+ dropout_rate: float
347
+ dropout rate.
348
+ causal: bool, optional
349
+ if set to True, convolution have no access
350
+ to future frames. default False.
351
+ batch_norm: bool, optional
352
+ if set to True, apply batchnorm before activation.
353
+ default False
354
+ chunk_se: int, optional
355
+ 0 for offline SE.
356
+ 1 for streaming SE, where mean is computed
357
+ by accumulated history until current chunk_se.
358
+ 2 for streaming SE, where mean is computed
359
+ by only the current chunk.
360
+ chunk_size: int, optional
361
+ chunk size for cnn. default 18
362
+ activation: str, optional
363
+ activation function used in ConvModule,
364
+ default: "relu".
365
+ glu_type: str, optional
366
+ activation function used for the glu,
367
+ default: "sigmoid".
368
+ bias_in_glu: bool, optional
369
+ if set to True, use additive bias in the weight module
370
+ before GLU.
371
+ linear_glu_in_convm: bool, optional
372
+ if set to True, use GLULinear module,
373
+ otherwise, used GLUPointWiseConv module.
374
+ default to False.
375
+ export: bool, optional,
376
+ if set to True, padding is equal to 0. This is for inference,
377
+ or onnx export. Typically this is set by the export program or
378
+ the decoder program, and it isn't present in your config file.
379
+ default False
380
+ """
381
+
382
+ def __init__(
383
+ self,
384
+ input_dim: int,
385
+ ext_pw_out_channel: int,
386
+ depthwise_seperable_out_channel: int,
387
+ ext_pw_kernel_size: int,
388
+ kernel_size: int,
389
+ depthwise_multiplier: int,
390
+ dropout_rate: float,
391
+ causal: bool = False,
392
+ batch_norm: bool = False,
393
+ chunk_se: int = 0,
394
+ chunk_size: int = 18,
395
+ activation: str = "relu",
396
+ glu_type: str = "sigmoid",
397
+ bias_in_glu: bool = True,
398
+ linear_glu_in_convm: bool = False,
399
+ export: bool = False,
400
+ ) -> None:
401
+ super().__init__()
402
+ self.layer_norm = nn.LayerNorm(input_dim)
403
+ self.input_dim = input_dim
404
+ self.ext_pw_out_channel = ext_pw_out_channel
405
+ self.ext_pw_kernel_size = ext_pw_kernel_size
406
+ self.depthwise_seperable_out_channel = depthwise_seperable_out_channel
407
+ self.glu_type = glu_type
408
+ self.bias_in_glu = bias_in_glu
409
+ self.linear_glu_in_convm = linear_glu_in_convm
410
+ self.causal = causal
411
+
412
+ self._add_ext_pw_layer()
413
+
414
+ self.batch_norm = batch_norm
415
+ self.kernel_size = kernel_size
416
+
417
+ if batch_norm:
418
+ self.bn_layer = nn.BatchNorm1d(input_dim)
419
+
420
+ self.act = get_activation(activation)
421
+ self.dropout = nn.Dropout(dropout_rate)
422
+ self.export = export
423
+
424
+ if causal:
425
+ padding = 0 if export else kernel_size - 1
426
+ else:
427
+ padding = (kernel_size - 1) // 2
428
+
429
+ self.dw_sep_conv_1d = DepthWiseSeperableConv1d(
430
+ input_dim,
431
+ depthwise_seperable_out_channel,
432
+ kernel_size,
433
+ depthwise_multiplier,
434
+ padding=padding,
435
+ )
436
+
437
+ if depthwise_seperable_out_channel != 0:
438
+ if input_dim != depthwise_seperable_out_channel:
439
+ self.ln2 = nn.Linear(depthwise_seperable_out_channel, input_dim)
440
+ else:
441
+ if depthwise_multiplier != 1:
442
+ self.ln2 = nn.Linear(input_dim * depthwise_multiplier, input_dim)
443
+
444
+ def _add_ext_pw_layer(self) -> None:
445
+ """
446
+ This function is an extension of __init__ function
447
+ and dedicated to the convolution module creation
448
+ of the conformer.
449
+ """
450
+ self.ln1 = self.glu = self.bn_layer = self.ext_pw_conv_1d = (
451
+ nn.Identity()
452
+ ) # jit hacks.
453
+ self.squeeze_excitation = nn.Identity() # jit.
454
+ self.apply_ln1 = self.fix_len1 = False # jit.
455
+
456
+ if self.ext_pw_out_channel != 0:
457
+ if self.causal:
458
+ self.ext_pw_conv_1d = nn.Conv1d(
459
+ self.input_dim,
460
+ self.ext_pw_out_channel,
461
+ self.ext_pw_kernel_size,
462
+ 1,
463
+ padding=(self.ext_pw_kernel_size - 1),
464
+ )
465
+ if self.ext_pw_kernel_size > 1:
466
+ self.fix_len1 = True
467
+ else:
468
+ self.fix_len1 = False
469
+ else:
470
+ self.ext_pw_conv_1d = nn.Conv1d(
471
+ self.input_dim,
472
+ self.ext_pw_out_channel,
473
+ self.ext_pw_kernel_size,
474
+ 1,
475
+ padding=(self.ext_pw_kernel_size - 1) // 2,
476
+ )
477
+ self.fix_len1 = False
478
+
479
+ if self.linear_glu_in_convm:
480
+ self.glu = GLULinear(
481
+ self.input_dim,
482
+ self.ext_pw_out_channel,
483
+ self.glu_type,
484
+ self.bias_in_glu,
485
+ )
486
+ else:
487
+ self.glu = GLUPointWiseConv(
488
+ self.input_dim,
489
+ self.ext_pw_out_channel,
490
+ self.ext_pw_kernel_size,
491
+ self.glu_type,
492
+ self.bias_in_glu,
493
+ self.causal,
494
+ )
495
+
496
+ if self.input_dim != self.ext_pw_out_channel:
497
+ self.apply_ln1 = True
498
+ self.ln1 = nn.Linear(self.ext_pw_out_channel, self.input_dim)
499
+ else:
500
+ self.apply_ln1 = False
501
+ else:
502
+ self.pw_conv_simplify_w = torch.nn.Parameter(torch.ones(3))
503
+ self.pw_conv_simplify_b = torch.nn.Parameter(torch.zeros(3))
504
+
505
+ def forward(self, x: Tensor) -> Tensor:
506
+ """ConvModule Forward.
507
+
508
+ Args:
509
+ x: input tensor.
510
+ """
511
+ x = self.layer_norm(x)
512
+
513
+ if self.ext_pw_out_channel != 0:
514
+ x = self.glu(x)
515
+ if self.causal and self.ext_pw_kernel_size > 1:
516
+ x = x[:, : -(self.ext_pw_kernel_size - 1), :]
517
+ if self.apply_ln1:
518
+ x = self.ln1(x)
519
+ else:
520
+ x_0 = x * self.pw_conv_simplify_w[0] + self.pw_conv_simplify_b[0]
521
+ x_1 = x * self.pw_conv_simplify_w[1] + self.pw_conv_simplify_b[1]
522
+ x = x_0 + x_1
523
+
524
+ x = x.permute([0, 2, 1])
525
+
526
+ x = self.dw_sep_conv_1d(x)
527
+ if self.causal and self.kernel_size > 1:
528
+ x = x[:, :, : -(self.kernel_size - 1)]
529
+ if hasattr(self, "ln2"):
530
+ x = x.permute([0, 2, 1])
531
+ x = self.ln2(x)
532
+ x = x.permute([0, 2, 1])
533
+ if self.batch_norm:
534
+ x = self.bn_layer(x)
535
+ x = self.act(x)
536
+
537
+ if self.ext_pw_out_channel != 0:
538
+ x = self.ext_pw_conv_1d(x)
539
+ if self.fix_len1:
540
+ x = x[:, :, : -(self.ext_pw_kernel_size - 1)]
541
+
542
+ if self.apply_ln1:
543
+ x = x.permute([0, 2, 1])
544
+ x = self.ln1(x)
545
+ x = x.permute([0, 2, 1])
546
+
547
+ x = x.permute([0, 2, 1])
548
+ else:
549
+ x = x.unsqueeze(1).permute([0, 1, 3, 2])
550
+ x = x * self.pw_conv_simplify_w[2] + self.pw_conv_simplify_b[2]
551
+ x = x.squeeze(1)
552
+
553
+ x = self.dropout(x)
554
+ return x
555
+
556
+
557
+ class GLULinear(nn.Module):
558
+ """Linear + GLU module
559
+
560
+ Args:
561
+ input_dim: int
562
+ input size
563
+ output_dim: int
564
+ output size.
565
+ glu_type:
566
+ activation function name used in glu module.
567
+ default "sigmoid" (swish function).
568
+ bias_in_glu: bool, optional
569
+ If True, the addtive bias is added. Default False.
570
+ """
571
+
572
+ def __init__(
573
+ self,
574
+ input_dim: int,
575
+ output_dim: int,
576
+ glu_type: str = "sigmoid",
577
+ bias_in_glu: bool = True,
578
+ ) -> None:
579
+ super().__init__()
580
+ self.linear = nn.Linear(input_dim, output_dim * 2, bias_in_glu)
581
+ self.glu_act = GLU(-1, glu_type)
582
+
583
+ def forward(self, x: Tensor) -> Tensor:
584
+ """GLULinear forward
585
+
586
+ Args:
587
+ x: input tensor.
588
+ """
589
+ x = self.linear(x)
590
+ return self.glu_act(x)
591
+
592
+
593
+ class FeedForward(nn.Module):
594
+ """FeedForward Module.
595
+ For more details see Conformer paper:
596
+ https://arxiv.org/pdf/2005.08100.pdf
597
+
598
+ Args:
599
+ d_model: int
600
+ input size.
601
+ d_inner: int
602
+ output size.
603
+ dropout_rate: float,
604
+ dropout rate.
605
+ activation: str,
606
+ activation function name,
607
+ one of ["relu", "swish", "sigmoid"],
608
+ sigmoid activation is only used with "glu_in_fnn=True",
609
+ default "sigmoid".
610
+ bias_in_glu: bool, optional
611
+ """
612
+
613
+ def __init__(
614
+ self,
615
+ d_model: int,
616
+ d_inner: int,
617
+ dropout_rate: float,
618
+ activation: str = "sigmoid",
619
+ bias_in_glu: bool = True,
620
+ ) -> None:
621
+ super().__init__()
622
+ self.d_model = d_model
623
+ self.d_inner = d_inner
624
+
625
+ self.layer_norm = nn.LayerNorm(d_model)
626
+ module = GLULinear(d_model, d_inner, activation, bias_in_glu)
627
+ self.net = nn.Sequential(
628
+ module,
629
+ nn.Dropout(dropout_rate),
630
+ nn.Linear(d_inner, d_model),
631
+ nn.Dropout(dropout_rate),
632
+ )
633
+
634
+ def forward(self, x: Tensor) -> Tensor:
635
+ """FeedForward forward function.
636
+
637
+ Args:
638
+ x: input tensor.
639
+ """
640
+ out = self.net(self.layer_norm(x))
641
+
642
+ return out
643
+
644
+
645
+ #### positional encoding starts here
646
+ def _pre_hook(
647
+ state_dict: dict,
648
+ prefix: str,
649
+ local_metadata: dict,
650
+ strict: bool,
651
+ missing_keys: list[str],
652
+ unexpected_keys: list[str],
653
+ error_msgs: list[str],
654
+ ) -> None:
655
+ """Perform pre-hook in load_state_dict for backward compatibility.
656
+
657
+ Note:
658
+ We saved self.pe until v.0.5.2 but we have omitted it later.
659
+ Therefore, we remove the item "pe" from `state_dict` for backward
660
+ compatibility.
661
+
662
+ """
663
+ k = prefix + "pe"
664
+ if k in state_dict:
665
+ state_dict.pop(k)
666
+
667
+
668
+ class T5RelativeAttentionLogitBias(nn.Module):
669
+ """
670
+ This module implements the relative position bias described in Section
671
+ 2.1 of the T5 paper: https://arxiv.org/pdf/1910.10683.pdf
672
+
673
+ The Huggingface implementation is used as a reference
674
+ https://github.com/huggingface/transformers/blob/v4.30.0/src/
675
+ transformers/models/t5/modeling_t5.py#L435
676
+
677
+ Modifies attention as Q*K^T + B, where B is a learned scalar bias based
678
+ on relative position of the query and key. It is HxNxN, where H is the
679
+ number of heads, N is the sequence length.
680
+
681
+ I've made these modifications to the original T5 bias:
682
+ - Skipping of the bucketing step. Original T5 bias converted rel
683
+ position distances into logarithmically increasing buckets. This is
684
+ supposed to help with length generalization.
685
+ - I just directly use rel position index as bias values, as we don't
686
+ need length generalization (40s max is good enough for ASR encoder),
687
+ and it keeps ONNX export simple.
688
+ - I've also extended it so that biases can be asymmetric, the default
689
+ implementation treats L->R and R->L the same. Asymmetric was found to
690
+ yield better results in my experiments.
691
+
692
+ Args:
693
+ num_heads: int
694
+ Number of attention heads
695
+ num_buckets: int
696
+ Number of buckets to use for relative attention bias. This is the
697
+ size of the learnable bias parameter. Bucketing is not yet
698
+ supported, so this defaults to -1 which means no bucketing is
699
+ used (max_distance determines size of bias param).
700
+ max_distance: int
701
+ Maximum distance to use for relative attention bias. With
702
+ num_buckets=-1, this directly controls the max size of the bias
703
+ parameter. When num_buckets > 0 is supported, this will control
704
+ the maximum distance for logarithmic bucketing after which all
705
+ positions are in the same bucket.
706
+ symmetric: bool
707
+ Whether to use symmetric or asymmetric biases. symmetric=False uses
708
+ 2x number of bias params to distinguish L->R from R->L. This was
709
+ found to be better for the encoder.
710
+ """
711
+
712
+ def __init__(
713
+ self,
714
+ num_heads: int,
715
+ num_buckets: int = -1,
716
+ max_distance: int = 1000,
717
+ symmetric: bool = False,
718
+ ) -> None:
719
+ super().__init__()
720
+ self.num_heads = num_heads
721
+ self.num_buckets = num_buckets
722
+ self.max_distance = max_distance
723
+ self.symmetric = symmetric
724
+ self._skip_bucketing = self.num_buckets < 0
725
+ if self._skip_bucketing:
726
+ self.num_buckets = max_distance
727
+ else:
728
+ raise NotImplementedError(
729
+ "T5 attention bias with bucketed positions is not yet tested"
730
+ )
731
+ if not self.symmetric:
732
+ self.num_buckets *= 2
733
+ self.bias_values = nn.Embedding(self.num_buckets, self.num_heads)
734
+
735
+ def forward(self, x: Tensor) -> Tensor:
736
+ # instantiate bias compatible with shape of x
737
+ maxpos = x.size(1)
738
+ context_position = torch.arange(maxpos, device=x.device, dtype=torch.long)[
739
+ :, None
740
+ ]
741
+ memory_position = torch.arange(maxpos, device=x.device, dtype=torch.long)[
742
+ None, :
743
+ ]
744
+ relative_position = memory_position - context_position
745
+ # clipping to a maximum distance using ops that play well with ONNX
746
+ # export
747
+ relative_position = relative_position.masked_fill(
748
+ relative_position < -self.max_distance, -self.max_distance
749
+ )
750
+ relative_position = relative_position.masked_fill(
751
+ relative_position > self.max_distance - 1, self.max_distance - 1
752
+ )
753
+
754
+ # mapping from relative position to index in the bias parameter
755
+ if self._skip_bucketing:
756
+ bias_idx = relative_position
757
+ else:
758
+ bias_idx = self._bucket_relative_position(relative_position)
759
+ if self.symmetric:
760
+ bias_idx = bias_idx.abs()
761
+ else:
762
+ bias_idx += self.num_buckets // 2
763
+
764
+ t5_rel_att_bias = self.bias_values(bias_idx) # [L, L, H]
765
+ t5_rel_att_bias = t5_rel_att_bias.permute(2, 0, 1).unsqueeze(0) # [1, H, L, L]
766
+
767
+ return t5_rel_att_bias
768
+
769
+ def _bucket_relative_position(self, relative_position: Tensor) -> Tensor:
770
+ # this is a placeholder (isn't tested, likely buggy) using HuggingFace
771
+ # implem as a reference this also needs to be extended to support
772
+ # asymmetric +/- ve positions
773
+ relative_buckets = 0
774
+ if not self.causal:
775
+ self.num_buckets //= 2
776
+ relative_buckets += (relative_position > 0).to(
777
+ torch.long
778
+ ) * self.num_buckets
779
+ relative_position = torch.abs(relative_position)
780
+ else:
781
+ relative_position = -torch.min(
782
+ relative_position, torch.zeros_like(relative_position)
783
+ )
784
+ # now relative_position is in the range [0, inf)
785
+
786
+ # half of the buckets are for exact increments in positions
787
+ max_exact = self.num_buckets // 2
788
+ is_small = relative_position < max_exact
789
+
790
+ # The other half of the buckets are for logarithmically bigger bins in
791
+ # positions up to max_distance
792
+ relative_position_if_large = max_exact + (
793
+ torch.log(relative_position.float() / max_exact)
794
+ / math.log(self.max_distance / max_exact)
795
+ * (self.num_buckets - max_exact)
796
+ ).to(torch.long)
797
+ relative_position_if_large = torch.min(
798
+ relative_position_if_large,
799
+ torch.full_like(relative_position_if_large, self.num_buckets - 1),
800
+ )
801
+
802
+ relative_buckets += torch.where(
803
+ is_small, relative_position, relative_position_if_large
804
+ )
805
+ return relative_buckets
806
+
807
+
808
+ class AbsolutePositionalEncoding(nn.Module):
809
+ """Absolute Positional encoding module.
810
+ This module implement Absolute sinusoidal positional encoding
811
+ from: https://arxiv.org/pdf/1706.03762.pdf
812
+
813
+ Args:
814
+ d_model: int
815
+ Input embedding size.
816
+ dropout_rate: float
817
+ dropout rate
818
+ max_len: int, optional
819
+ Maximum input length sequence, Default 5000
820
+
821
+ """
822
+
823
+ def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000) -> None:
824
+ """Construct an PositionalEncoding object."""
825
+ super().__init__()
826
+ self.d_model = d_model
827
+ self.xscale = math.sqrt(self.d_model)
828
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
829
+ self.pe = None
830
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
831
+ self._register_load_state_dict_pre_hook(_pre_hook)
832
+
833
+ def extend_pe(self, x: torch.Tensor) -> None:
834
+ """Reset the positional encodings.
835
+
836
+ Args:
837
+ x: input tensor
838
+ """
839
+ if self.pe is not None and self.pe.size(1) >= x.size(1):
840
+ if self.pe.dtype != x.dtype or self.pe.device != x.device:
841
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
842
+ return
843
+ pe = torch.zeros(x.size(1), self.d_model)
844
+ position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
845
+ div_term = torch.exp(
846
+ torch.arange(0, self.d_model, 2, dtype=torch.float32)
847
+ * -(math.log(10000.0) / self.d_model)
848
+ )
849
+ pe[:, 0::2] = torch.sin(position * div_term)
850
+ pe[:, 1::2] = torch.cos(position * div_term)
851
+ pe = pe.unsqueeze(0)
852
+ self.pe = pe.to(device=x.device, dtype=x.dtype)
853
+
854
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
855
+ """Add positional encoding.
856
+
857
+ Args:
858
+ x: Input tensor. shape is (batch, time, ...)
859
+
860
+ Returns:
861
+ Encoded tensor. Its shape is (batch, time, ...)
862
+
863
+ """
864
+ self.extend_pe(x)
865
+ x = x * self.xscale + self.pe[:, : x.size(1)]
866
+ return self.dropout(x)
867
+
868
+
869
+ #### forward embedding layers starts here
870
+ class MeanVarianceNormLayer(nn.Module):
871
+ """Mean/variance normalization layer.
872
+
873
+ Will subtract mean and multiply input by inverted standard deviation.
874
+ Typically used as a very first layer in a model.
875
+
876
+ Args:
877
+ input_size: int
878
+ layer input size.
879
+ """
880
+
881
+ def __init__(self, input_size: int) -> None:
882
+ super().__init__()
883
+ self.input_size = input_size
884
+ self.global_mean = nn.Parameter(torch.zeros(input_size))
885
+ self.global_invstd = nn.Parameter(torch.ones(input_size))
886
+
887
+ def forward(self, input_: Tensor) -> Tensor:
888
+ """MeanVarianceNormLayer Forward
889
+
890
+ Args:
891
+ input_: input tensor.
892
+ """
893
+ return (input_ - self.global_mean) * self.global_invstd
894
+
895
+
896
+ class CausalConv1D(nn.Conv1d):
897
+ """
898
+ A causal version of nn.Conv1d where each step would have limited access to
899
+ locations on its right or left
900
+ All arguments are the same as nn.Conv1d except padding.
901
+
902
+ If padding is set None, then paddings are set automatically to make it a
903
+ causal convolution where each location would not see any steps on its right.
904
+
905
+ If padding is set as a list (size of 2), then padding[0] would be used as
906
+ left padding and padding[1] as right padding.
907
+ It would make it possible to control the number of steps to be accessible
908
+ on the right and left.
909
+ This mode is not supported when stride > 1. padding[0]+padding[1] should
910
+ be equal to (kernel_size - 1).
911
+ """
912
+
913
+ def __init__(
914
+ self,
915
+ in_channels: int,
916
+ out_channels: int,
917
+ kernel_size: int,
918
+ stride: int = 1,
919
+ padding: str | int = 0,
920
+ dilation: int = 1,
921
+ groups: int = 1,
922
+ bias: bool = True,
923
+ padding_mode: str = "zeros",
924
+ device=None,
925
+ dtype=None,
926
+ ) -> None:
927
+ self.cache_drop_size = None
928
+ if padding is None:
929
+ self._left_padding = kernel_size - 1
930
+ self._right_padding = stride - 1
931
+ else:
932
+ if stride != 1 and padding != kernel_size - 1:
933
+ raise ValueError("No striding allowed for non-symmetric convolutions!")
934
+ if isinstance(padding, int):
935
+ self._left_padding = padding
936
+ self._right_padding = padding
937
+ elif (
938
+ isinstance(padding, list)
939
+ and len(padding) == 2
940
+ and padding[0] + padding[1] == kernel_size - 1
941
+ ):
942
+ self._left_padding = padding[0]
943
+ self._right_padding = padding[1]
944
+ else:
945
+ raise ValueError(f"Invalid padding param: {padding}!")
946
+
947
+ self._max_cache_len = self._left_padding
948
+
949
+ super().__init__(
950
+ in_channels=in_channels,
951
+ out_channels=out_channels,
952
+ kernel_size=kernel_size,
953
+ stride=stride,
954
+ padding=0,
955
+ dilation=dilation,
956
+ groups=groups,
957
+ bias=bias,
958
+ padding_mode=padding_mode,
959
+ device=device,
960
+ dtype=dtype,
961
+ )
962
+
963
+ def update_cache(
964
+ self, x: Tensor, cache: Tensor | None = None
965
+ ) -> tuple[Tensor, Tensor | None]:
966
+ if cache is None:
967
+ new_x = F.pad(x, pad=(self._left_padding, self._right_padding))
968
+ next_cache = cache
969
+ else:
970
+ new_x = F.pad(x, pad=(0, self._right_padding))
971
+ new_x = torch.cat([cache, new_x], dim=-1)
972
+ if self.cache_drop_size > 0:
973
+ next_cache = new_x[:, :, : -self.cache_drop_size]
974
+ else:
975
+ next_cache = new_x
976
+ next_cache = next_cache[:, :, -cache.size(-1) :]
977
+ return new_x, next_cache
978
+
979
+ def forward(
980
+ self, x: Tensor, cache: Tensor | None = None
981
+ ) -> Tensor | tuple[Tensor, Tensor | None]:
982
+ x, cache = self.update_cache(x, cache=cache)
983
+ x = super().forward(x)
984
+ if cache is None:
985
+ return x
986
+ else:
987
+ return x, cache
988
+
989
+
990
+ class CausalConv2D(nn.Conv2d):
991
+ """
992
+ A causal version of nn.Conv2d where each location in the 2D matrix would
993
+ have no access to locations on its right or down
994
+ All arguments are the same as nn.Conv2d except padding which should be
995
+ set as None
996
+ """
997
+
998
+ def __init__(
999
+ self,
1000
+ in_channels: int,
1001
+ out_channels: int,
1002
+ kernel_size: int,
1003
+ stride: int = 1,
1004
+ padding: str | int = 0,
1005
+ dilation: int = 1,
1006
+ groups: int = 1,
1007
+ bias: bool = True,
1008
+ padding_mode: str = "zeros",
1009
+ device=None,
1010
+ dtype=None,
1011
+ ) -> None:
1012
+ if padding is not None:
1013
+ raise ValueError("Argument padding should be set to None for CausalConv2D.")
1014
+ self._left_padding = kernel_size - 1
1015
+ self._right_padding = stride - 1
1016
+
1017
+ padding = 0
1018
+ super().__init__(
1019
+ in_channels,
1020
+ out_channels,
1021
+ kernel_size,
1022
+ stride,
1023
+ padding,
1024
+ dilation,
1025
+ groups,
1026
+ bias,
1027
+ padding_mode,
1028
+ device,
1029
+ dtype,
1030
+ )
1031
+
1032
+ def forward(
1033
+ self,
1034
+ x: Tensor,
1035
+ ) -> Tensor:
1036
+ x = F.pad(
1037
+ x,
1038
+ pad=(self._left_padding, self._right_padding, 0, 0),
1039
+ )
1040
+ x = super().forward(x)
1041
+ return x
1042
+
1043
+
1044
+ class NemoConvSubsampling(torch.nn.Module):
1045
+ """Convlutional subsampling module, taken from NeMo ASR
1046
+ (https://github.com/NVIDIA/NeMo/blob/b367413645d5c72db3c2c96e46e95a
1047
+ 34501479cf/nemo/collections/asr/parts/submodules/subsampling.py)
1048
+
1049
+ Striding Subsampling: "Speech-Transformer: A No-Recurrence
1050
+ Sequence-to-Sequence Model for Speech Recognition" by Linhao Dong
1051
+ et al. (https://ieeexplore.ieee.org/document/8462506)
1052
+
1053
+
1054
+ Compared with the EncoderConv2D (`input_layer: custom`), this is a
1055
+ much simplified approach, and uses no LayerNorm and far fewer Conv2Ds.
1056
+ Moreover, depthwise convolutions are used to reduce FLOPs, but the first
1057
+ layer is kept as a regular convolution so as not to degrade accuracy.
1058
+
1059
+ `Striding` and `dw_striding` are the same except that the latter uses
1060
+ depthwise convolutions after the first layer, whereas the former does not.
1061
+
1062
+ Args:
1063
+ subsampling_factor (int): Time reduction factor
1064
+ feat_in (int): size of the input features
1065
+ feat_out (int): size of the output features
1066
+ subsampling (str): The subsampling technique, choose from
1067
+ {"striding", "dw-striding", "striding_conv1d",
1068
+ "dw_striding_conv1d"}
1069
+ conv_channels (int): Number of channels for the convolution layers,
1070
+ default is 256.
1071
+ subsampling_conv_chunking_factor (int): Input chunking factor which
1072
+ can be -1 (no chunking) 1 (auto) or a power of 2. Default is 1
1073
+ activation (Module): activation function, default is nn.ReLU()
1074
+ is_causal (bool): whether to use causal Conv1/2D, where each step will
1075
+ have limited access to locations on its right or left
1076
+ """
1077
+
1078
+ def __init__(
1079
+ self,
1080
+ feat_in: int,
1081
+ feat_out: int,
1082
+ subsampling_factor: int = 4,
1083
+ subsampling: str = "dw_striding",
1084
+ conv_channels: int = 256,
1085
+ subsampling_conv_chunking_factor: int = 1,
1086
+ activation: torch.nn.Module = nn.ReLU(), # noqa: B008
1087
+ is_causal: bool = False,
1088
+ ) -> None:
1089
+ super().__init__()
1090
+ self._subsampling = subsampling
1091
+ self._conv_channels = conv_channels
1092
+ self._feat_in = feat_in
1093
+ self._feat_out = feat_out
1094
+
1095
+ if subsampling_factor % 2 != 0:
1096
+ raise ValueError("Sampling factor should be a multiply of 2!")
1097
+ self._sampling_num = int(math.log(subsampling_factor, 2))
1098
+ self.subsampling_factor = subsampling_factor
1099
+ self.is_causal = is_causal
1100
+ self.subsampling_causal_cond = subsampling in (
1101
+ "dw_striding",
1102
+ "striding",
1103
+ "striding_conv1d",
1104
+ )
1105
+
1106
+ if (
1107
+ subsampling_conv_chunking_factor != -1
1108
+ and subsampling_conv_chunking_factor != 1
1109
+ and subsampling_conv_chunking_factor % 2 != 0
1110
+ ):
1111
+ raise ValueError(
1112
+ "subsampling_conv_chunking_factor should be -1, 1, or a power of 2"
1113
+ )
1114
+ self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor
1115
+
1116
+ in_channels = 1
1117
+ layers = []
1118
+
1119
+ if subsampling == "dw_striding":
1120
+ self._stride = 2
1121
+ self._kernel_size = 3
1122
+ self._ceil_mode = False
1123
+
1124
+ if self.is_causal:
1125
+ self._left_padding = self._kernel_size - 1
1126
+ self._right_padding = self._stride - 1
1127
+ self._max_cache_len = subsampling_factor + 1
1128
+ else:
1129
+ self._left_padding = (self._kernel_size - 1) // 2
1130
+ self._right_padding = (self._kernel_size - 1) // 2
1131
+ self._max_cache_len = 0
1132
+
1133
+ # Layer 1
1134
+ if self.is_causal:
1135
+ layers.append(
1136
+ CausalConv2D(
1137
+ in_channels=in_channels,
1138
+ out_channels=conv_channels,
1139
+ kernel_size=self._kernel_size,
1140
+ stride=self._stride,
1141
+ padding=None,
1142
+ )
1143
+ )
1144
+ else:
1145
+ layers.append(
1146
+ torch.nn.Conv2d(
1147
+ in_channels=in_channels,
1148
+ out_channels=conv_channels,
1149
+ kernel_size=self._kernel_size,
1150
+ stride=self._stride,
1151
+ padding=self._left_padding,
1152
+ )
1153
+ )
1154
+ in_channels = conv_channels
1155
+ layers.append(activation)
1156
+
1157
+ for i in range(self._sampling_num - 1):
1158
+ if self.is_causal:
1159
+ layers.append(
1160
+ CausalConv2D(
1161
+ in_channels=in_channels,
1162
+ out_channels=in_channels,
1163
+ kernel_size=self._kernel_size,
1164
+ stride=self._stride,
1165
+ padding=None,
1166
+ groups=in_channels,
1167
+ )
1168
+ )
1169
+ else:
1170
+ layers.append(
1171
+ torch.nn.Conv2d(
1172
+ in_channels=in_channels,
1173
+ out_channels=in_channels,
1174
+ kernel_size=self._kernel_size,
1175
+ stride=self._stride,
1176
+ padding=self._left_padding,
1177
+ groups=in_channels,
1178
+ )
1179
+ )
1180
+
1181
+ layers.append(
1182
+ torch.nn.Conv2d(
1183
+ in_channels=in_channels,
1184
+ out_channels=conv_channels,
1185
+ kernel_size=1,
1186
+ stride=1,
1187
+ padding=0,
1188
+ groups=1,
1189
+ )
1190
+ )
1191
+ layers.append(activation)
1192
+ in_channels = conv_channels
1193
+
1194
+ elif subsampling == "striding":
1195
+ self._stride = 2
1196
+ self._kernel_size = 3
1197
+ self._ceil_mode = False
1198
+
1199
+ if self.is_causal:
1200
+ self._left_padding = self._kernel_size - 1
1201
+ self._right_padding = self._stride - 1
1202
+ self._max_cache_len = subsampling_factor + 1
1203
+ else:
1204
+ self._left_padding = (self._kernel_size - 1) // 2
1205
+ self._right_padding = (self._kernel_size - 1) // 2
1206
+ self._max_cache_len = 0
1207
+
1208
+ for i in range(self._sampling_num):
1209
+ if self.is_causal:
1210
+ layers.append(
1211
+ CausalConv2D(
1212
+ in_channels=in_channels,
1213
+ out_channels=conv_channels,
1214
+ kernel_size=self._kernel_size,
1215
+ stride=self._stride,
1216
+ padding=None,
1217
+ )
1218
+ )
1219
+ else:
1220
+ layers.append(
1221
+ torch.nn.Conv2d(
1222
+ in_channels=in_channels,
1223
+ out_channels=conv_channels,
1224
+ kernel_size=self._kernel_size,
1225
+ stride=self._stride,
1226
+ padding=self._left_padding,
1227
+ )
1228
+ )
1229
+ layers.append(activation)
1230
+ in_channels = conv_channels
1231
+
1232
+ elif subsampling == "striding_conv1d":
1233
+ in_channels = feat_in
1234
+
1235
+ self._stride = 2
1236
+ self._kernel_size = 5
1237
+ self._ceil_mode = False
1238
+
1239
+ if self.is_causal:
1240
+ self._left_padding = self._kernel_size - 1
1241
+ self._right_padding = self._stride - 1
1242
+ self._max_cache_len = subsampling_factor + 1
1243
+ else:
1244
+ self._left_padding = (self._kernel_size - 1) // 2
1245
+ self._right_padding = (self._kernel_size - 1) // 2
1246
+ self._max_cache_len = 0
1247
+
1248
+ for i in range(self._sampling_num):
1249
+ if self.is_causal:
1250
+ layers.append(
1251
+ CausalConv1D(
1252
+ in_channels=in_channels,
1253
+ out_channels=(
1254
+ feat_out
1255
+ if self._sampling_num == i + 1
1256
+ else conv_channels
1257
+ ),
1258
+ kernel_size=self._kernel_size,
1259
+ stride=self._stride,
1260
+ padding=None,
1261
+ )
1262
+ )
1263
+ else:
1264
+ layers.append(
1265
+ torch.nn.Conv1d(
1266
+ in_channels=in_channels,
1267
+ out_channels=(
1268
+ feat_out
1269
+ if self._sampling_num == i + 1
1270
+ else conv_channels
1271
+ ),
1272
+ kernel_size=self._kernel_size,
1273
+ stride=self._stride,
1274
+ padding=self._left_padding,
1275
+ )
1276
+ )
1277
+ layers.append(activation)
1278
+ in_channels = conv_channels
1279
+
1280
+ elif subsampling == "dw_striding_conv1d":
1281
+ in_channels = feat_in
1282
+
1283
+ self._stride = 2
1284
+ self._kernel_size = 5
1285
+ self._ceil_mode = False
1286
+
1287
+ self._left_padding = (self._kernel_size - 1) // 2
1288
+ self._right_padding = (self._kernel_size - 1) // 2
1289
+
1290
+ # Layer 1
1291
+ layers.extend(
1292
+ [
1293
+ torch.nn.Conv1d(
1294
+ in_channels=in_channels,
1295
+ out_channels=in_channels,
1296
+ kernel_size=self._kernel_size,
1297
+ stride=self._stride,
1298
+ padding=self._left_padding,
1299
+ groups=in_channels,
1300
+ ),
1301
+ torch.nn.Conv1d(
1302
+ in_channels=in_channels,
1303
+ out_channels=(
1304
+ feat_out if self._sampling_num == 1 else conv_channels
1305
+ ),
1306
+ kernel_size=1,
1307
+ stride=1,
1308
+ padding=0,
1309
+ groups=1,
1310
+ ),
1311
+ ]
1312
+ )
1313
+ in_channels = conv_channels
1314
+ layers.append(activation)
1315
+
1316
+ for i in range(self._sampling_num - 1):
1317
+ layers.extend(
1318
+ [
1319
+ torch.nn.Conv1d(
1320
+ in_channels=in_channels,
1321
+ out_channels=in_channels,
1322
+ kernel_size=self._kernel_size,
1323
+ stride=self._stride,
1324
+ padding=self._left_padding,
1325
+ groups=in_channels,
1326
+ ),
1327
+ torch.nn.Conv1d(
1328
+ in_channels=in_channels,
1329
+ out_channels=(
1330
+ feat_out
1331
+ if self._sampling_num == i + 2
1332
+ else conv_channels
1333
+ ),
1334
+ kernel_size=1,
1335
+ stride=1,
1336
+ padding=0,
1337
+ groups=1,
1338
+ ),
1339
+ ]
1340
+ )
1341
+ layers.append(activation)
1342
+ in_channels = conv_channels
1343
+
1344
+ else:
1345
+ raise ValueError(f"Not valid sub-sampling: {subsampling}!")
1346
+
1347
+ if subsampling in ["dw_striding", "striding"]:
1348
+ in_length = torch.tensor(feat_in, dtype=torch.float)
1349
+ out_length = calc_length(
1350
+ lengths=in_length,
1351
+ all_paddings=self._left_padding + self._right_padding,
1352
+ kernel_size=self._kernel_size,
1353
+ stride=self._stride,
1354
+ ceil_mode=self._ceil_mode,
1355
+ repeat_num=self._sampling_num,
1356
+ )
1357
+ self.out = torch.nn.Linear(conv_channels * int(out_length), feat_out)
1358
+ self.conv2d_subsampling = True
1359
+ elif subsampling in ["striding_conv1d", "dw_striding_conv1d"]:
1360
+ self.out = None
1361
+ self.conv2d_subsampling = False
1362
+ else:
1363
+ raise ValueError(f"Not valid sub-sampling: {subsampling}!")
1364
+
1365
+ self.conv = torch.nn.Sequential(*layers)
1366
+
1367
+ def get_sampling_frames(self) -> list[int]:
1368
+ return [1, self.subsampling_factor]
1369
+
1370
+ def get_streaming_cache_size(self) -> list[int]:
1371
+ return [0, self.subsampling_factor + 1]
1372
+
1373
+ def forward(self, x: Tensor, mask: Tensor | None) -> tuple[Tensor, Tensor | None]:
1374
+ """
1375
+ Forward method for NeMo subsampling.
1376
+
1377
+ Args:
1378
+ x: input tensor
1379
+ mask: input mask
1380
+
1381
+ Returns:
1382
+ x: Resulting tensor from subsampling (B, T //
1383
+ time_reduction_factor, feat_out)
1384
+ pad_mask: tensor of padded hidden state sequences (B, 1, T //
1385
+ time_reduction_factor)
1386
+ """
1387
+ x = x.unsqueeze(1) if self.conv2d_subsampling else x.transpose(1, 2)
1388
+
1389
+ # split inputs if chunking_factor is set
1390
+ if self.subsampling_conv_chunking_factor != -1 and self.conv2d_subsampling:
1391
+ if self.subsampling_conv_chunking_factor == 1:
1392
+ # if subsampling_conv_chunking_factor is 1, we split only
1393
+ # if needed.
1394
+ # avoiding a bug / feature limiting indexing of tensors
1395
+ # to 2**31.
1396
+ # see https://github.com/pytorch/pytorch/issues/80020
1397
+ x_ceil = 2**31 / self._conv_channels * self._stride * self._stride
1398
+ need_to_split = torch.numel(x) > x_ceil
1399
+ else:
1400
+ # if subsampling_conv_chunking_factor > 1 we always split
1401
+ need_to_split = True
1402
+
1403
+ if need_to_split:
1404
+ x, success = self.conv_split_by_batch(x)
1405
+ if not success: # if unable to split by batch, try by channel
1406
+ if self._subsampling == "dw_striding":
1407
+ x = self.conv_split_by_channel(x)
1408
+ else:
1409
+ x = self.conv(x) # try anyway
1410
+ else:
1411
+ x = self.conv(x)
1412
+ else:
1413
+ x = self.conv(x)
1414
+
1415
+ # Flatten Channel and Frequency Axes
1416
+ if self.conv2d_subsampling:
1417
+ b, c, t, f = x.size()
1418
+ x = self.out(x.transpose(1, 2).reshape(b, t, -1))
1419
+ # Transpose to Channel Last mode
1420
+ else:
1421
+ x = x.transpose(1, 2)
1422
+
1423
+ if mask is None:
1424
+ return x, None
1425
+
1426
+ max_audio_length = x.shape[1]
1427
+ feature_lens = mask.sum(1)
1428
+ padding_length = torch.ceil(feature_lens / self.subsampling_factor)
1429
+ if self.is_causal and self.subsampling_causal_cond:
1430
+ feature_lens_remainder = feature_lens % self.subsampling_factor
1431
+ padding_length[feature_lens_remainder != 1] += 1
1432
+ pad_mask = torch.arange(0, max_audio_length, device=x.device).expand(
1433
+ padding_length.size(0), -1
1434
+ ) < padding_length.unsqueeze(1)
1435
+ return x, pad_mask.unsqueeze(1)
1436
+
1437
+ def reset_parameters(self) -> None:
1438
+ # initialize weights
1439
+ if self._subsampling == "dw_striding":
1440
+ with torch.no_grad():
1441
+ # init conv
1442
+ scale = 1.0 / self._kernel_size
1443
+ dw_max = (self._kernel_size**2) ** -0.5
1444
+ pw_max = self._conv_channels**-0.5
1445
+
1446
+ torch.nn.init.uniform_(self.conv[0].weight, -scale, scale)
1447
+ torch.nn.init.uniform_(self.conv[0].bias, -scale, scale)
1448
+
1449
+ for idx in range(2, len(self.conv), 3):
1450
+ torch.nn.init.uniform_(self.conv[idx].weight, -dw_max, dw_max)
1451
+ torch.nn.init.uniform_(self.conv[idx].bias, -dw_max, dw_max)
1452
+ torch.nn.init.uniform_(self.conv[idx + 1].weight, -pw_max, pw_max)
1453
+ torch.nn.init.uniform_(self.conv[idx + 1].bias, -pw_max, pw_max)
1454
+
1455
+ # init fc (80 * 64 = 5120 from https://github.com/kssteven418/
1456
+ # Squeezeformer/blob/13c97d6cf92f2844d2cb3142b4c5bfa9ad1a8951/
1457
+ # src/models/conformer_encoder.py#L487
1458
+ fc_scale = (self._feat_out * self._feat_in / self._sampling_num) ** -0.5
1459
+ torch.nn.init.uniform_(self.out.weight, -fc_scale, fc_scale)
1460
+ torch.nn.init.uniform_(self.out.bias, -fc_scale, fc_scale)
1461
+
1462
+ def conv_split_by_batch(self, x: Tensor) -> tuple[Tensor, bool]:
1463
+ """Tries to split input by batch, run conv and concat results"""
1464
+ b, _, _, _ = x.size()
1465
+ if b == 1: # can't split if batch size is 1
1466
+ return x, False
1467
+
1468
+ if self.subsampling_conv_chunking_factor > 1:
1469
+ cf = self.subsampling_conv_chunking_factor
1470
+ else:
1471
+ # avoiding a bug / feature limiting indexing of tensors to 2**31
1472
+ # see https://github.com/pytorch/pytorch/issues/80020
1473
+ x_ceil = 2**31 / self._conv_channels * self._stride * self._stride
1474
+ p = math.ceil(math.log(torch.numel(x) / x_ceil, 2))
1475
+ cf = 2**p
1476
+
1477
+ new_batch_size = b // cf
1478
+ if new_batch_size == 0: # input is too big
1479
+ return x, False
1480
+
1481
+ return (
1482
+ torch.cat(
1483
+ [self.conv(chunk) for chunk in torch.split(x, new_batch_size, 0)]
1484
+ ),
1485
+ True,
1486
+ )
1487
+
1488
+ def conv_split_by_channel(self, x: Tensor) -> Tensor:
1489
+ """For dw convs, tries to split input by time, run conv and concat
1490
+ results"""
1491
+ x = self.conv[0](x) # full conv2D
1492
+ x = self.conv[1](x) # activation
1493
+
1494
+ for i in range(self._sampling_num - 1):
1495
+ _, c, t, _ = x.size()
1496
+
1497
+ if self.subsampling_conv_chunking_factor > 1:
1498
+ cf = self.subsampling_conv_chunking_factor
1499
+ else:
1500
+ # avoiding a bug / feature limiting indexing of tensors
1501
+ # to 2**31
1502
+ # see https://github.com/pytorch/pytorch/issues/80020
1503
+ p = math.ceil(math.log(torch.numel(x) / 2**31, 2))
1504
+ cf = 2**p
1505
+
1506
+ new_c = int(c // cf)
1507
+ if new_c == 0:
1508
+ new_c = 1
1509
+
1510
+ new_t = int(t // cf)
1511
+ if new_t == 0:
1512
+ new_t = 1
1513
+
1514
+ x = self.channel_chunked_conv(
1515
+ self.conv[i * 3 + 2], new_c, x
1516
+ ) # conv2D, depthwise
1517
+
1518
+ # splitting pointwise convs by time
1519
+ x = torch.cat(
1520
+ [self.conv[i * 3 + 3](chunk) for chunk in torch.split(x, new_t, 2)],
1521
+ 2,
1522
+ ) # conv2D, pointwise
1523
+ x = self.conv[i * 3 + 4](x) # activation
1524
+ return x
1525
+
1526
+ def channel_chunked_conv(
1527
+ self, conv: torch.nn.Module, chunk_size: int, x: Tensor
1528
+ ) -> Tensor:
1529
+ """Performs channel chunked convolution"""
1530
+
1531
+ ind = 0
1532
+ out_chunks = []
1533
+ for chunk in torch.split(x, chunk_size, 1):
1534
+ step = chunk.size()[1]
1535
+
1536
+ if self.is_causal:
1537
+ chunk = nn.functional.pad(
1538
+ chunk,
1539
+ pad=(
1540
+ self._kernel_size - 1,
1541
+ self._stride - 1,
1542
+ self._kernel_size - 1,
1543
+ self._stride - 1,
1544
+ ),
1545
+ )
1546
+ ch_out = nn.functional.conv2d(
1547
+ chunk,
1548
+ conv.weight[ind : ind + step, :, :, :],
1549
+ bias=conv.bias[ind : ind + step],
1550
+ stride=self._stride,
1551
+ padding=0,
1552
+ groups=step,
1553
+ )
1554
+ else:
1555
+ ch_out = nn.functional.conv2d(
1556
+ chunk,
1557
+ conv.weight[ind : ind + step, :, :, :],
1558
+ bias=conv.bias[ind : ind + step],
1559
+ stride=self._stride,
1560
+ padding=self._left_padding,
1561
+ groups=step,
1562
+ )
1563
+ out_chunks.append(ch_out)
1564
+ ind += step
1565
+
1566
+ return torch.cat(out_chunks, 1)
1567
+
1568
+ def change_subsampling_conv_chunking_factor(
1569
+ self, subsampling_conv_chunking_factor: int
1570
+ ) -> None:
1571
+ if (
1572
+ subsampling_conv_chunking_factor != -1
1573
+ and subsampling_conv_chunking_factor != 1
1574
+ and subsampling_conv_chunking_factor % 2 != 0
1575
+ ):
1576
+ raise ValueError(
1577
+ "subsampling_conv_chunking_factor should be -1, 1, or a power of 2"
1578
+ )
1579
+ self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor
1580
+
1581
+
1582
+ def calc_length(
1583
+ lengths: Tensor,
1584
+ all_paddings: int,
1585
+ kernel_size: int,
1586
+ stride: int,
1587
+ ceil_mode: bool,
1588
+ repeat_num: int = 1,
1589
+ ) -> Tensor:
1590
+ """Calculates the output length of a Tensor passed through a convolution or
1591
+ max pooling layer"""
1592
+ add_pad: float = all_paddings - kernel_size
1593
+ one: float = 1.0
1594
+ for i in range(repeat_num):
1595
+ lengths = torch.div(lengths.to(dtype=torch.float) + add_pad, stride) + one
1596
+ lengths = torch.ceil(lengths) if ceil_mode else torch.floor(lengths)
1597
+ return lengths.to(dtype=torch.int)
1598
+
1599
+
1600
+ #### multihead attention starts here
1601
+ class AttModule(nn.Module):
1602
+ """Attention abstraction module"""
1603
+
1604
+ def __init__(self) -> None:
1605
+ super().__init__()
1606
+ self.export_mode = False
1607
+
1608
+ def set_export(self, mode: bool = True) -> None:
1609
+ """set the export mode"""
1610
+ self.export_mode = mode
1611
+
1612
+ def forward(
1613
+ self,
1614
+ x: Tensor,
1615
+ memory: Tensor | None = None,
1616
+ pos_emb: Tensor | None = None,
1617
+ att_mask: Tensor | None = None,
1618
+ ) -> tuple[Tensor, Tensor, Tensor | None, Tensor | None]:
1619
+ """AttModule forward
1620
+
1621
+ Args:
1622
+ x: input tensor.
1623
+ memory: memory tensor.
1624
+ pos_emb: positional encoder embedding.
1625
+ att_mask: attention mask tensor.
1626
+ """
1627
+ return x, memory, pos_emb, att_mask
1628
+
1629
+
1630
+ class AttBlock(BlockBase, AttModule):
1631
+ """Attention Block module to support both Attention and Block module."""
1632
+
1633
+ def memory_dims(self, max_len: bool = False) -> tuple[int, int]:
1634
+ """memory dimensions"""
1635
+ return (1, self.input_size)
1636
+
1637
+
1638
+ def masked_softmax(
1639
+ scores: Tensor,
1640
+ mask: Tensor | None,
1641
+ ) -> Tensor:
1642
+ if mask is not None:
1643
+ mask = mask.unsqueeze(1).eq(0) # (batch, 1, time1, time2)
1644
+ scores = scores.masked_fill(mask, -torch.inf)
1645
+ attn = torch.softmax(scores, dim=-1).masked_fill(
1646
+ mask, 0.0
1647
+ ) # (batch, head, time1, time2)
1648
+ else:
1649
+ attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
1650
+ return attn
1651
+
1652
+
1653
+ class MultiHeadedAttention(nn.Module):
1654
+ """Multi-Head Attention layer with optional relative position embedding
1655
+ and GLU.
1656
+
1657
+ Args:
1658
+ n_head: int
1659
+ the number of heads.
1660
+ n_feat: int
1661
+ input size features.
1662
+ dropout_rate: float
1663
+ dropout rate.
1664
+ attention_inner_dim: int, optional
1665
+ the attention dimension used in the class,
1666
+ it can be different from the input dimension n_feat.
1667
+ default: -1 (equal to n_feat).
1668
+ use_pt_scaled_dot_product_attention: bool, optional
1669
+ if set True, use pytorch scaled dot product attention in training.
1670
+ NOTE: this will NOT be used in ONNX decoding due to a lack of
1671
+ support. In that case, we use the original attention
1672
+ implementation, which shows no regression.
1673
+ default: False.
1674
+ n_value: int, optional
1675
+ if set to values other than -1, use a different dimension for
1676
+ value. With the default value (i.e. -1), it is backward compatible.
1677
+ group_size: int, optional. must divide `n_head`
1678
+ if group_size > 1: GQA
1679
+ if group_size = 1: MHA
1680
+ if group_size = n_head: MQA
1681
+ """
1682
+
1683
+ inv_sqrt_d_k: torch.jit.Final[float]
1684
+ h: torch.jit.Final[int]
1685
+ h_k: torch.jit.Final[int]
1686
+ g: torch.jit.Final[int]
1687
+
1688
+ def __init__(
1689
+ self,
1690
+ n_head: int,
1691
+ n_feat: int,
1692
+ dropout_rate: float,
1693
+ attention_inner_dim: int = -1,
1694
+ glu_type: str = "swish",
1695
+ bias_in_glu: bool = True,
1696
+ use_pt_scaled_dot_product_attention: bool = False,
1697
+ n_value: int = -1,
1698
+ group_size: int = 1,
1699
+ ) -> None:
1700
+ super().__init__()
1701
+ if n_value == -1:
1702
+ n_value = n_feat
1703
+ if attention_inner_dim == -1:
1704
+ attention_inner_dim = n_feat
1705
+ assert attention_inner_dim % n_head == 0
1706
+
1707
+ # We assume d_v always equals d_k
1708
+ self.d_k = attention_inner_dim // n_head
1709
+ self.inv_sqrt_d_k = 1.0 / math.sqrt(self.d_k)
1710
+ self.h = n_head
1711
+ assert n_head % group_size == 0, "group_size must divide n_head"
1712
+ self.g = group_size
1713
+ self.h_k = n_head // group_size
1714
+
1715
+ self.linear_q = nn.Linear(n_feat, attention_inner_dim)
1716
+ self.linear_k = nn.Linear(n_feat, attention_inner_dim // group_size)
1717
+ self.linear_v = nn.Linear(n_value, attention_inner_dim // group_size)
1718
+ self.linear_out = nn.Linear(attention_inner_dim // group_size, n_value)
1719
+
1720
+ self.attn = torch.jit.Attribute(None, Tensor | None)
1721
+ self.dropout = nn.Dropout(p=dropout_rate)
1722
+ self.dropout_rate = dropout_rate
1723
+ self.use_pt_scaled_dot_product_attention = use_pt_scaled_dot_product_attention
1724
+
1725
+ if use_pt_scaled_dot_product_attention and group_size > 1:
1726
+ raise ValueError("Cannot use PT Scaled Attention with GQA")
1727
+
1728
+ # Torchscript eager quantization. Note that these functions below are
1729
+ # NOOPs and have very little impact on performance unless quantization
1730
+ # is enabled.
1731
+ self.quant_q = torch.ao.quantization.QuantStub()
1732
+ self.quant_x = torch.ao.quantization.QuantStub()
1733
+ self.dequant = torch.ao.quantization.DeQuantStub()
1734
+ self.ffunc = torch.ao.nn.quantized.FloatFunctional()
1735
+
1736
+ def forward(
1737
+ self,
1738
+ query: Tensor,
1739
+ key: Tensor,
1740
+ value: Tensor,
1741
+ pos_k: Tensor | None,
1742
+ pos_v: Tensor | None,
1743
+ mask: Tensor | None,
1744
+ relative_attention_bias: Tensor | None = None,
1745
+ ) -> Tensor:
1746
+ """Compute 'Scaled Dot Product Attention'.
1747
+
1748
+ Args:
1749
+ query: query tensor (batch, time1, size)
1750
+ key: key tensor (batch, time2, size)
1751
+ value: value tensor (batch, time1, size)
1752
+ pos_k: key tensor used for relative positional embedding.
1753
+ pos_v: value tensor used for relative positional embedding.
1754
+ mask: mask tensor (batch, time1, time2)
1755
+ relative_attention_bias: bias added to attention logits w.r.t.
1756
+ relative positions
1757
+ (1, n_head, time1, time2)
1758
+ """
1759
+ n_batch = query.size(0)
1760
+
1761
+ q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) # (b, t, d)
1762
+ k = self.linear_k(key).view(n_batch, -1, self.h_k, self.d_k) # (b, t, d)
1763
+ v = self.linear_v(value).view(n_batch, -1, self.h_k, self.d_k)
1764
+ q = (
1765
+ q.transpose(1, 2)
1766
+ if self.use_pt_scaled_dot_product_attention and not torch.jit.is_scripting()
1767
+ else q.transpose(1, 2) * self.inv_sqrt_d_k
1768
+ )
1769
+ k = k.transpose(1, 2) # (batch, head_k, time2, d_k)
1770
+ v = v.transpose(1, 2) # (batch, head_k, time2, d_k)
1771
+
1772
+ if self.use_pt_scaled_dot_product_attention and not torch.jit.is_scripting():
1773
+ attn_mask = None
1774
+ if mask is not None:
1775
+ mask = mask.unsqueeze(1)
1776
+ if relative_attention_bias is not None:
1777
+ attn_mask = mask + relative_attention_bias
1778
+ else:
1779
+ attn_mask = mask
1780
+ if mask.dtype != q.dtype:
1781
+ attn_mask = attn_mask.to(q.dtype)
1782
+
1783
+ with torch.nn.attention.sdpa_kernel(
1784
+ [
1785
+ torch.nn.attention.SDPBackend.FLASH_ATTENTION,
1786
+ torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION,
1787
+ torch.nn.attention.SDPBackend.MATH,
1788
+ torch.nn.attention.SDPBackend.CUDNN_ATTENTION,
1789
+ ]
1790
+ ):
1791
+ x = torch.nn.functional.scaled_dot_product_attention(
1792
+ q,
1793
+ k,
1794
+ v,
1795
+ attn_mask=attn_mask,
1796
+ dropout_p=self.dropout_rate,
1797
+ )
1798
+ else:
1799
+ if self.h != self.h_k:
1800
+ q = q.reshape(n_batch, self.g, self.h_k, -1, self.d_k)
1801
+ A = torch.einsum("b g h t d, b h s d -> b h t s", q, k)
1802
+ else:
1803
+ A = torch.matmul(q, k.transpose(-2, -1))
1804
+ if pos_k is not None:
1805
+ if self.h != self.h_k:
1806
+ B = torch.einsum("b g h t d, t s d -> b h t s", q, pos_k)
1807
+ else:
1808
+ reshape_q = (
1809
+ q.contiguous()
1810
+ .view(n_batch * self.h, -1, self.d_k)
1811
+ .transpose(0, 1)
1812
+ ) # (t1,nh,dk)
1813
+ B = torch.matmul(
1814
+ reshape_q, pos_k.transpose(-2, -1)
1815
+ ) # pos_k: (t1,dk,t2)
1816
+ B = B.transpose(0, 1).view(
1817
+ n_batch, self.h, pos_k.size(0), pos_k.size(1)
1818
+ )
1819
+ scores = A + B
1820
+ else:
1821
+ scores = A
1822
+
1823
+ if relative_attention_bias is not None:
1824
+ scores = scores + relative_attention_bias
1825
+
1826
+ attn = masked_softmax(scores, mask) # (batch, head, time1, time2)
1827
+
1828
+ self.attn = attn
1829
+
1830
+ p_attn = self.dropout(attn)
1831
+ x = torch.matmul(p_attn.to(v.dtype), v) # (batch, head, time1, d_k)
1832
+ if pos_v is not None:
1833
+ reshape_attn = (
1834
+ p_attn.contiguous()
1835
+ .view(n_batch * self.h, pos_v.size(0), pos_v.size(1))
1836
+ .transpose(0, 1)
1837
+ ) # (t1, bh, t2)
1838
+
1839
+ attn_v = (
1840
+ torch.matmul(reshape_attn, pos_v)
1841
+ .transpose(0, 1)
1842
+ .contiguous()
1843
+ .view(n_batch, self.h, pos_v.size(0), self.d_k)
1844
+ )
1845
+ x = x + attn_v
1846
+ x = (
1847
+ x.transpose(1, 2).contiguous().view(n_batch, -1, self.h_k * self.d_k)
1848
+ ) # (batch, time1, d_model)
1849
+
1850
+ return self.linear_out(x) # (batch, time1, d_model)
1851
+
1852
+
1853
+ class MultiSequential(torch.nn.Sequential):
1854
+ """Multi-input multi-output torch.nn.Sequential"""
1855
+
1856
+ @torch.jit.ignore
1857
+ def forward(self, *args) -> tuple:
1858
+ """Forward method implementation."""
1859
+ for m in self:
1860
+ args = m(*args)
1861
+ return args
1862
+
1863
+
1864
+ def get_offset(input_layer: str, time_reduction: int) -> int:
1865
+ """Get an offset. We will use the offset for determining #frames of a
1866
+ subsampled feature.
1867
+
1868
+ Args:
1869
+ input_layer: Type of an input layer
1870
+ time_reduction: time reduction factor for downsampling a feature
1871
+ Returns:
1872
+ int: offset
1873
+ """
1874
+ if input_layer in ("conv2d", "nemo_conv") and time_reduction == 4:
1875
+ return 3
1876
+ if input_layer in ("conv2d",) and time_reduction == 6:
1877
+ return 1
1878
+ if input_layer in ("conv2d", "nemo_conv") and time_reduction == 8:
1879
+ return 7
1880
+ return 0
1881
+
1882
+
1883
+ def unfold_tensor(xs_pad: Tensor, max_seq_len: int) -> Tensor:
1884
+ """
1885
+ For a given tensor with shape of (N, T, D), if sequence length T is
1886
+ longer than max_seq_len, this function unfold it to a
1887
+ (NT', max_seq_len, D) where T' is T // max_seq_len.
1888
+ Args:
1889
+ xs_pad: input tensor with shape (N, T, D)
1890
+ max_seq_len: maximum sequence length
1891
+ """
1892
+ _, _, D = xs_pad.shape
1893
+ xs_pad = xs_pad.transpose(-1, -2) # convert to N, D, T
1894
+ # N x D x 1 x T => N x (D x max_seq_len) x T'
1895
+ xs_pad = F.unfold(
1896
+ xs_pad[..., None, :],
1897
+ kernel_size=(1, max_seq_len),
1898
+ stride=(1, max_seq_len),
1899
+ )
1900
+ new_bsz, _, slen = xs_pad.shape
1901
+ # N x D x max_seq_len x T'
1902
+ xs_pad = xs_pad.view(new_bsz, -1, max_seq_len, slen)
1903
+ # N x T' x max_seq_len x D
1904
+ xs_pad = xs_pad.permute(0, 3, 2, 1).contiguous()
1905
+ # NT' x max_seq_len x D
1906
+ xs_pad = xs_pad.view(-1, max_seq_len, D)
1907
+ return xs_pad