vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vllm-cpu might be problematic. Click here for more details.

Files changed (1103) hide show
  1. vllm/_C.abi3.so +0 -0
  2. vllm/__init__.py +170 -0
  3. vllm/_custom_ops.py +1536 -0
  4. vllm/_ipex_ops.py +241 -0
  5. vllm/_version.py +34 -0
  6. vllm/adapter_commons/__init__.py +0 -0
  7. vllm/adapter_commons/layers.py +16 -0
  8. vllm/adapter_commons/models.py +105 -0
  9. vllm/adapter_commons/request.py +25 -0
  10. vllm/adapter_commons/utils.py +92 -0
  11. vllm/adapter_commons/worker_manager.py +38 -0
  12. vllm/assets/__init__.py +0 -0
  13. vllm/assets/audio.py +38 -0
  14. vllm/assets/base.py +40 -0
  15. vllm/assets/image.py +31 -0
  16. vllm/assets/video.py +103 -0
  17. vllm/attention/__init__.py +19 -0
  18. vllm/attention/backends/__init__.py +0 -0
  19. vllm/attention/backends/abstract.py +306 -0
  20. vllm/attention/backends/blocksparse_attn.py +457 -0
  21. vllm/attention/backends/cpu_mla.py +303 -0
  22. vllm/attention/backends/flash_attn.py +999 -0
  23. vllm/attention/backends/flashinfer.py +1092 -0
  24. vllm/attention/backends/flashmla.py +242 -0
  25. vllm/attention/backends/hpu_attn.py +301 -0
  26. vllm/attention/backends/ipex_attn.py +396 -0
  27. vllm/attention/backends/mla/__init__.py +0 -0
  28. vllm/attention/backends/mla/common.py +1444 -0
  29. vllm/attention/backends/pallas.py +346 -0
  30. vllm/attention/backends/placeholder_attn.py +399 -0
  31. vllm/attention/backends/rocm_aiter_mla.py +412 -0
  32. vllm/attention/backends/rocm_flash_attn.py +969 -0
  33. vllm/attention/backends/torch_sdpa.py +691 -0
  34. vllm/attention/backends/triton_mla.py +113 -0
  35. vllm/attention/backends/utils.py +609 -0
  36. vllm/attention/backends/xformers.py +798 -0
  37. vllm/attention/layer.py +443 -0
  38. vllm/attention/ops/__init__.py +0 -0
  39. vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
  40. vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
  41. vllm/attention/ops/blocksparse_attention/interface.py +238 -0
  42. vllm/attention/ops/blocksparse_attention/utils.py +244 -0
  43. vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
  44. vllm/attention/ops/flashmla.py +115 -0
  45. vllm/attention/ops/hpu_paged_attn.py +105 -0
  46. vllm/attention/ops/ipex_attn.py +193 -0
  47. vllm/attention/ops/merge_attn_states.py +42 -0
  48. vllm/attention/ops/nki_flash_attn.py +905 -0
  49. vllm/attention/ops/paged_attn.py +255 -0
  50. vllm/attention/ops/prefix_prefill.py +902 -0
  51. vllm/attention/ops/rocm_aiter_mla.py +42 -0
  52. vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
  53. vllm/attention/ops/triton_decode_attention.py +675 -0
  54. vllm/attention/ops/triton_flash_attention.py +1375 -0
  55. vllm/attention/ops/triton_merge_attn_states.py +96 -0
  56. vllm/attention/selector.py +186 -0
  57. vllm/attention/utils/fa_utils.py +54 -0
  58. vllm/beam_search.py +82 -0
  59. vllm/benchmarks/__init__.py +0 -0
  60. vllm/benchmarks/datasets.py +831 -0
  61. vllm/benchmarks/endpoint_request_func.py +160 -0
  62. vllm/benchmarks/latency.py +181 -0
  63. vllm/benchmarks/serve.py +925 -0
  64. vllm/benchmarks/throughput.py +608 -0
  65. vllm/benchmarks/utils.py +69 -0
  66. vllm/collect_env.py +795 -0
  67. vllm/compilation/__init__.py +0 -0
  68. vllm/compilation/backends.py +715 -0
  69. vllm/compilation/compiler_interface.py +437 -0
  70. vllm/compilation/counter.py +33 -0
  71. vllm/compilation/decorators.py +249 -0
  72. vllm/compilation/fix_functionalization.py +182 -0
  73. vllm/compilation/fusion.py +617 -0
  74. vllm/compilation/fx_utils.py +60 -0
  75. vllm/compilation/inductor_pass.py +114 -0
  76. vllm/compilation/monitor.py +38 -0
  77. vllm/compilation/multi_output_match.py +108 -0
  78. vllm/compilation/noop_elimination.py +135 -0
  79. vllm/compilation/pass_manager.py +74 -0
  80. vllm/compilation/sequence_parallelism.py +266 -0
  81. vllm/compilation/torch25_custom_graph_pass.py +41 -0
  82. vllm/compilation/vllm_inductor_pass.py +68 -0
  83. vllm/compilation/wrapper.py +129 -0
  84. vllm/config.py +4179 -0
  85. vllm/connections.py +170 -0
  86. vllm/core/__init__.py +0 -0
  87. vllm/core/block/__init__.py +0 -0
  88. vllm/core/block/block_table.py +398 -0
  89. vllm/core/block/common.py +370 -0
  90. vllm/core/block/cpu_gpu_block_allocator.py +440 -0
  91. vllm/core/block/interfaces.py +318 -0
  92. vllm/core/block/naive_block.py +465 -0
  93. vllm/core/block/prefix_caching_block.py +1134 -0
  94. vllm/core/block/utils.py +27 -0
  95. vllm/core/block_manager.py +520 -0
  96. vllm/core/evictor.py +156 -0
  97. vllm/core/interfaces.py +134 -0
  98. vllm/core/placeholder_block_space_manager.py +99 -0
  99. vllm/core/scheduler.py +2060 -0
  100. vllm/device_allocator/__init__.py +0 -0
  101. vllm/device_allocator/cumem.py +280 -0
  102. vllm/distributed/__init__.py +5 -0
  103. vllm/distributed/communication_op.py +40 -0
  104. vllm/distributed/device_communicators/__init__.py +0 -0
  105. vllm/distributed/device_communicators/base_device_communicator.py +151 -0
  106. vllm/distributed/device_communicators/cpu_communicator.py +139 -0
  107. vllm/distributed/device_communicators/cuda_communicator.py +131 -0
  108. vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
  109. vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
  110. vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
  111. vllm/distributed/device_communicators/hpu_communicator.py +45 -0
  112. vllm/distributed/device_communicators/neuron_communicator.py +19 -0
  113. vllm/distributed/device_communicators/pynccl.py +217 -0
  114. vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
  115. vllm/distributed/device_communicators/shm_broadcast.py +557 -0
  116. vllm/distributed/device_communicators/tpu_communicator.py +93 -0
  117. vllm/distributed/device_communicators/xpu_communicator.py +54 -0
  118. vllm/distributed/kv_transfer/README.md +29 -0
  119. vllm/distributed/kv_transfer/__init__.py +11 -0
  120. vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
  121. vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
  122. vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
  123. vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
  124. vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
  125. vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
  126. vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
  127. vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
  128. vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
  129. vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
  130. vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
  131. vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
  132. vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
  133. vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
  134. vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
  135. vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
  136. vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
  137. vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
  138. vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
  139. vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
  140. vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
  141. vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
  142. vllm/distributed/parallel_state.py +1209 -0
  143. vllm/distributed/utils.py +366 -0
  144. vllm/engine/__init__.py +0 -0
  145. vllm/engine/arg_utils.py +1724 -0
  146. vllm/engine/async_llm_engine.py +1261 -0
  147. vllm/engine/async_timeout.py +191 -0
  148. vllm/engine/llm_engine.py +2150 -0
  149. vllm/engine/metrics.py +717 -0
  150. vllm/engine/metrics_types.py +96 -0
  151. vllm/engine/multiprocessing/__init__.py +183 -0
  152. vllm/engine/multiprocessing/client.py +745 -0
  153. vllm/engine/multiprocessing/engine.py +450 -0
  154. vllm/engine/output_processor/__init__.py +0 -0
  155. vllm/engine/output_processor/interfaces.py +74 -0
  156. vllm/engine/output_processor/multi_step.py +210 -0
  157. vllm/engine/output_processor/single_step.py +136 -0
  158. vllm/engine/output_processor/stop_checker.py +130 -0
  159. vllm/engine/output_processor/util.py +27 -0
  160. vllm/engine/protocol.py +302 -0
  161. vllm/entrypoints/__init__.py +0 -0
  162. vllm/entrypoints/api_server.py +177 -0
  163. vllm/entrypoints/chat_utils.py +1259 -0
  164. vllm/entrypoints/cli/__init__.py +0 -0
  165. vllm/entrypoints/cli/benchmark/__init__.py +0 -0
  166. vllm/entrypoints/cli/benchmark/base.py +38 -0
  167. vllm/entrypoints/cli/benchmark/latency.py +29 -0
  168. vllm/entrypoints/cli/benchmark/main.py +53 -0
  169. vllm/entrypoints/cli/benchmark/serve.py +29 -0
  170. vllm/entrypoints/cli/benchmark/throughput.py +29 -0
  171. vllm/entrypoints/cli/collect_env.py +35 -0
  172. vllm/entrypoints/cli/main.py +59 -0
  173. vllm/entrypoints/cli/openai.py +175 -0
  174. vllm/entrypoints/cli/serve.py +59 -0
  175. vllm/entrypoints/cli/types.py +24 -0
  176. vllm/entrypoints/launcher.py +146 -0
  177. vllm/entrypoints/llm.py +1450 -0
  178. vllm/entrypoints/logger.py +44 -0
  179. vllm/entrypoints/openai/__init__.py +0 -0
  180. vllm/entrypoints/openai/api_server.py +1130 -0
  181. vllm/entrypoints/openai/cli_args.py +296 -0
  182. vllm/entrypoints/openai/logits_processors.py +89 -0
  183. vllm/entrypoints/openai/protocol.py +1806 -0
  184. vllm/entrypoints/openai/run_batch.py +439 -0
  185. vllm/entrypoints/openai/serving_chat.py +1210 -0
  186. vllm/entrypoints/openai/serving_completion.py +557 -0
  187. vllm/entrypoints/openai/serving_embedding.py +245 -0
  188. vllm/entrypoints/openai/serving_engine.py +569 -0
  189. vllm/entrypoints/openai/serving_models.py +314 -0
  190. vllm/entrypoints/openai/serving_pooling.py +237 -0
  191. vllm/entrypoints/openai/serving_score.py +439 -0
  192. vllm/entrypoints/openai/serving_tokenization.py +147 -0
  193. vllm/entrypoints/openai/serving_transcription.py +421 -0
  194. vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
  195. vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
  196. vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
  197. vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
  198. vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
  199. vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
  200. vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
  201. vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
  202. vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
  203. vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
  204. vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
  205. vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
  206. vllm/entrypoints/score_utils.py +49 -0
  207. vllm/entrypoints/ssl.py +74 -0
  208. vllm/entrypoints/utils.py +136 -0
  209. vllm/env_override.py +34 -0
  210. vllm/envs.py +800 -0
  211. vllm/executor/__init__.py +0 -0
  212. vllm/executor/executor_base.py +400 -0
  213. vllm/executor/mp_distributed_executor.py +243 -0
  214. vllm/executor/msgspec_utils.py +29 -0
  215. vllm/executor/multiproc_worker_utils.py +312 -0
  216. vllm/executor/ray_distributed_executor.py +700 -0
  217. vllm/executor/ray_utils.py +400 -0
  218. vllm/executor/uniproc_executor.py +141 -0
  219. vllm/forward_context.py +159 -0
  220. vllm/inputs/__init__.py +37 -0
  221. vllm/inputs/data.py +248 -0
  222. vllm/inputs/parse.py +121 -0
  223. vllm/inputs/preprocess.py +745 -0
  224. vllm/inputs/registry.py +212 -0
  225. vllm/jsontree.py +79 -0
  226. vllm/logger.py +210 -0
  227. vllm/logging_utils/__init__.py +7 -0
  228. vllm/logging_utils/formatter.py +17 -0
  229. vllm/logits_process.py +121 -0
  230. vllm/lora/__init__.py +0 -0
  231. vllm/lora/fully_sharded_layers.py +335 -0
  232. vllm/lora/layers.py +1263 -0
  233. vllm/lora/lora.py +198 -0
  234. vllm/lora/models.py +802 -0
  235. vllm/lora/ops/__init__.py +0 -0
  236. vllm/lora/ops/torch_ops/__init__.py +15 -0
  237. vllm/lora/ops/torch_ops/lora_ops.py +115 -0
  238. vllm/lora/ops/triton_ops/__init__.py +11 -0
  239. vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
  240. vllm/lora/ops/triton_ops/lora_expand.py +293 -0
  241. vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
  242. vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
  243. vllm/lora/ops/triton_ops/utils.py +121 -0
  244. vllm/lora/peft_helper.py +115 -0
  245. vllm/lora/punica_wrapper/__init__.py +9 -0
  246. vllm/lora/punica_wrapper/punica_base.py +483 -0
  247. vllm/lora/punica_wrapper/punica_cpu.py +348 -0
  248. vllm/lora/punica_wrapper/punica_gpu.py +289 -0
  249. vllm/lora/punica_wrapper/punica_hpu.py +144 -0
  250. vllm/lora/punica_wrapper/punica_selector.py +20 -0
  251. vllm/lora/punica_wrapper/utils.py +161 -0
  252. vllm/lora/request.py +97 -0
  253. vllm/lora/resolver.py +83 -0
  254. vllm/lora/utils.py +237 -0
  255. vllm/lora/worker_manager.py +251 -0
  256. vllm/model_executor/__init__.py +15 -0
  257. vllm/model_executor/custom_op.py +153 -0
  258. vllm/model_executor/guided_decoding/__init__.py +180 -0
  259. vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
  260. vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
  261. vllm/model_executor/guided_decoding/guided_fields.py +42 -0
  262. vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
  263. vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
  264. vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
  265. vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
  266. vllm/model_executor/guided_decoding/utils.py +241 -0
  267. vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
  268. vllm/model_executor/layers/__init__.py +0 -0
  269. vllm/model_executor/layers/activation.py +368 -0
  270. vllm/model_executor/layers/fused_moe/__init__.py +51 -0
  271. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  272. vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  273. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  274. vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  275. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  276. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
  277. vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
  278. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  279. vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  280. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  281. vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  282. vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  283. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  284. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
  285. vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
  286. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  287. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
  288. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  289. vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
  290. vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  291. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  292. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
  293. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  294. vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
  295. vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
  296. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
  297. vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
  298. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  299. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  300. vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  301. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  302. vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  303. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
  304. vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  305. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  306. vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  307. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  308. vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  309. vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  310. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  311. vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
  312. vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  313. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
  314. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  315. vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
  316. vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
  317. vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  318. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
  319. vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  320. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  321. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  322. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  323. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
  324. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  325. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  326. vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  327. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  328. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  329. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  330. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  331. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  332. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  333. vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  334. vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
  335. vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  336. vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
  337. vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
  338. vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
  339. vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
  340. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  341. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  342. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  343. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  344. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  345. vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
  346. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  347. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  348. vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
  349. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  350. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  351. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  352. vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
  353. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  354. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
  355. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  356. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  357. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  358. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  359. vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
  360. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  361. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
  362. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  363. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
  364. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
  365. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  366. vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
  367. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  368. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
  369. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  370. vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
  371. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  372. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
  373. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  374. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
  375. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  376. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  377. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  378. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  379. vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
  380. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  381. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
  382. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  383. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
  384. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  385. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  386. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  387. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  388. vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
  389. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  390. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
  391. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  392. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
  393. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
  394. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  395. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
  396. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  397. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  398. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  399. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
  400. vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
  401. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  402. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
  403. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  404. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
  405. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  406. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  407. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  408. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  409. vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
  410. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  411. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
  412. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  413. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
  414. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
  415. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  416. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  417. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  418. vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
  419. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
  420. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
  421. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
  422. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
  423. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
  424. vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  425. vllm/model_executor/layers/fused_moe/configs/README +12 -0
  426. vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
  427. vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
  428. vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
  429. vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
  430. vllm/model_executor/layers/fused_moe/layer.py +949 -0
  431. vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
  432. vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
  433. vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
  434. vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
  435. vllm/model_executor/layers/fused_moe/utils.py +48 -0
  436. vllm/model_executor/layers/layernorm.py +277 -0
  437. vllm/model_executor/layers/lightning_attn.py +651 -0
  438. vllm/model_executor/layers/linear.py +1518 -0
  439. vllm/model_executor/layers/logits_processor.py +196 -0
  440. vllm/model_executor/layers/mamba/__init__.py +0 -0
  441. vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
  442. vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
  443. vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
  444. vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
  445. vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
  446. vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
  447. vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
  448. vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
  449. vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
  450. vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
  451. vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
  452. vllm/model_executor/layers/pooler.py +336 -0
  453. vllm/model_executor/layers/quantization/__init__.py +153 -0
  454. vllm/model_executor/layers/quantization/aqlm.py +374 -0
  455. vllm/model_executor/layers/quantization/awq.py +184 -0
  456. vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
  457. vllm/model_executor/layers/quantization/awq_triton.py +319 -0
  458. vllm/model_executor/layers/quantization/base_config.py +145 -0
  459. vllm/model_executor/layers/quantization/bitblas.py +459 -0
  460. vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
  461. vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
  462. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
  463. vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
  464. vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
  465. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
  466. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
  467. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
  468. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
  469. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
  470. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
  471. vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
  472. vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
  473. vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
  474. vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
  475. vllm/model_executor/layers/quantization/experts_int8.py +194 -0
  476. vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
  477. vllm/model_executor/layers/quantization/fp8.py +832 -0
  478. vllm/model_executor/layers/quantization/gguf.py +408 -0
  479. vllm/model_executor/layers/quantization/gptq.py +276 -0
  480. vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
  481. vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
  482. vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
  483. vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
  484. vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
  485. vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
  486. vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
  487. vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
  488. vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
  489. vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
  490. vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
  491. vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
  492. vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
  493. vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
  494. vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
  495. vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
  496. vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
  497. vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
  498. vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
  499. vllm/model_executor/layers/quantization/kv_cache.py +137 -0
  500. vllm/model_executor/layers/quantization/marlin.py +259 -0
  501. vllm/model_executor/layers/quantization/modelopt.py +410 -0
  502. vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
  503. vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
  504. vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
  505. vllm/model_executor/layers/quantization/qqq.py +273 -0
  506. vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
  507. vllm/model_executor/layers/quantization/quark/quark.py +385 -0
  508. vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
  509. vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
  510. vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
  511. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
  512. vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
  513. vllm/model_executor/layers/quantization/quark/utils.py +102 -0
  514. vllm/model_executor/layers/quantization/schema.py +85 -0
  515. vllm/model_executor/layers/quantization/torchao.py +127 -0
  516. vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
  517. vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
  518. vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
  519. vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
  520. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  521. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  522. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  523. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  524. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  525. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  526. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  527. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  528. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  529. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  530. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  531. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  532. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  533. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  534. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  535. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  536. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  537. vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  538. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  539. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  540. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  541. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  542. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  543. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  544. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  545. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  546. vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  547. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  548. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  549. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  550. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  551. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  552. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  553. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  554. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  555. vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  556. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  557. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  558. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  559. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  560. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  561. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  562. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  563. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  564. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  565. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  566. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  567. vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  568. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  569. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  570. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  571. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  572. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  573. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  574. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  575. vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  576. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  577. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  578. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  579. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  580. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  581. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  582. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  583. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  584. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  585. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  586. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  587. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  588. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  589. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  590. vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  591. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  592. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  593. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  594. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  595. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  596. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  597. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  598. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  599. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  600. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  601. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  602. vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  603. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  604. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  605. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  606. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  607. vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  608. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  609. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  610. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  611. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  612. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  613. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  614. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  615. vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  616. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  617. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  618. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  619. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  620. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  621. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  622. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  623. vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  624. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  625. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  626. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  627. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  628. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  629. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  630. vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  631. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  632. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  633. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  634. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  635. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  636. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  637. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  638. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  639. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  640. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  641. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
  642. vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  643. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  644. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  645. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  646. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  647. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  648. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  649. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  650. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  651. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  652. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  653. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  654. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  655. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  656. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  657. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  658. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  659. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  660. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  661. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  662. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  663. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  664. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  665. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  666. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  667. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  668. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  669. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  670. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  671. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  672. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  673. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  674. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  675. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  676. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  677. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  678. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  679. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  680. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  681. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  682. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  683. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  684. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  685. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  686. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  687. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  688. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  689. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  690. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  691. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  692. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  693. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  694. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  695. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  696. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  697. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  698. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  699. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  700. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  701. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  702. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  703. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  704. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  705. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  706. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  707. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  708. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  709. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  710. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  711. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  712. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  713. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
  714. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
  715. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
  716. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  717. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  718. vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  719. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  720. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  721. vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
  722. vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
  723. vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
  724. vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
  725. vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
  726. vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
  727. vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
  728. vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
  729. vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
  730. vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
  731. vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
  732. vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
  733. vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
  734. vllm/model_executor/layers/rejection_sampler.py +400 -0
  735. vllm/model_executor/layers/resampler.py +269 -0
  736. vllm/model_executor/layers/rotary_embedding.py +1598 -0
  737. vllm/model_executor/layers/sampler.py +1221 -0
  738. vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
  739. vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
  740. vllm/model_executor/layers/utils.py +99 -0
  741. vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
  742. vllm/model_executor/model_loader/__init__.py +20 -0
  743. vllm/model_executor/model_loader/loader.py +1542 -0
  744. vllm/model_executor/model_loader/neuron.py +243 -0
  745. vllm/model_executor/model_loader/tensorizer.py +468 -0
  746. vllm/model_executor/model_loader/utils.py +171 -0
  747. vllm/model_executor/model_loader/weight_utils.py +749 -0
  748. vllm/model_executor/models/__init__.py +27 -0
  749. vllm/model_executor/models/adapters.py +247 -0
  750. vllm/model_executor/models/arctic.py +559 -0
  751. vllm/model_executor/models/aria.py +656 -0
  752. vllm/model_executor/models/aya_vision.py +461 -0
  753. vllm/model_executor/models/baichuan.py +469 -0
  754. vllm/model_executor/models/bamba.py +542 -0
  755. vllm/model_executor/models/bart.py +936 -0
  756. vllm/model_executor/models/bert.py +725 -0
  757. vllm/model_executor/models/blip.py +337 -0
  758. vllm/model_executor/models/blip2.py +717 -0
  759. vllm/model_executor/models/bloom.py +358 -0
  760. vllm/model_executor/models/chameleon.py +1135 -0
  761. vllm/model_executor/models/chatglm.py +476 -0
  762. vllm/model_executor/models/clip.py +410 -0
  763. vllm/model_executor/models/commandr.py +466 -0
  764. vllm/model_executor/models/constant_size_cache.py +136 -0
  765. vllm/model_executor/models/dbrx.py +469 -0
  766. vllm/model_executor/models/deepseek.py +484 -0
  767. vllm/model_executor/models/deepseek_mtp.py +266 -0
  768. vllm/model_executor/models/deepseek_v2.py +830 -0
  769. vllm/model_executor/models/deepseek_vl2.py +647 -0
  770. vllm/model_executor/models/eagle.py +247 -0
  771. vllm/model_executor/models/exaone.py +548 -0
  772. vllm/model_executor/models/fairseq2_llama.py +153 -0
  773. vllm/model_executor/models/falcon.py +508 -0
  774. vllm/model_executor/models/florence2.py +1102 -0
  775. vllm/model_executor/models/fuyu.py +388 -0
  776. vllm/model_executor/models/gemma.py +423 -0
  777. vllm/model_executor/models/gemma2.py +423 -0
  778. vllm/model_executor/models/gemma3.py +531 -0
  779. vllm/model_executor/models/gemma3_mm.py +716 -0
  780. vllm/model_executor/models/glm.py +22 -0
  781. vllm/model_executor/models/glm4.py +303 -0
  782. vllm/model_executor/models/glm4v.py +647 -0
  783. vllm/model_executor/models/gpt2.py +313 -0
  784. vllm/model_executor/models/gpt_bigcode.py +336 -0
  785. vllm/model_executor/models/gpt_j.py +337 -0
  786. vllm/model_executor/models/gpt_neox.py +330 -0
  787. vllm/model_executor/models/granite.py +494 -0
  788. vllm/model_executor/models/granite_speech.py +777 -0
  789. vllm/model_executor/models/granitemoe.py +435 -0
  790. vllm/model_executor/models/granitemoeshared.py +339 -0
  791. vllm/model_executor/models/gritlm.py +245 -0
  792. vllm/model_executor/models/grok1.py +560 -0
  793. vllm/model_executor/models/h2ovl.py +542 -0
  794. vllm/model_executor/models/idefics2_vision_model.py +387 -0
  795. vllm/model_executor/models/idefics3.py +767 -0
  796. vllm/model_executor/models/interfaces.py +569 -0
  797. vllm/model_executor/models/interfaces_base.py +163 -0
  798. vllm/model_executor/models/intern_vit.py +476 -0
  799. vllm/model_executor/models/internlm2.py +453 -0
  800. vllm/model_executor/models/internlm2_ve.py +146 -0
  801. vllm/model_executor/models/internvl.py +945 -0
  802. vllm/model_executor/models/jais.py +371 -0
  803. vllm/model_executor/models/jamba.py +590 -0
  804. vllm/model_executor/models/kimi_vl.py +577 -0
  805. vllm/model_executor/models/llama.py +619 -0
  806. vllm/model_executor/models/llama4.py +530 -0
  807. vllm/model_executor/models/llama_eagle.py +152 -0
  808. vllm/model_executor/models/llama_eagle3.py +232 -0
  809. vllm/model_executor/models/llava.py +869 -0
  810. vllm/model_executor/models/llava_next.py +582 -0
  811. vllm/model_executor/models/llava_next_video.py +470 -0
  812. vllm/model_executor/models/llava_onevision.py +954 -0
  813. vllm/model_executor/models/mamba.py +271 -0
  814. vllm/model_executor/models/mamba2.py +302 -0
  815. vllm/model_executor/models/mamba_cache.py +76 -0
  816. vllm/model_executor/models/medusa.py +210 -0
  817. vllm/model_executor/models/minicpm.py +592 -0
  818. vllm/model_executor/models/minicpm3.py +229 -0
  819. vllm/model_executor/models/minicpmo.py +725 -0
  820. vllm/model_executor/models/minicpmv.py +1287 -0
  821. vllm/model_executor/models/minimax_cache.py +35 -0
  822. vllm/model_executor/models/minimax_text_01.py +1261 -0
  823. vllm/model_executor/models/mistral3.py +598 -0
  824. vllm/model_executor/models/mixtral.py +485 -0
  825. vllm/model_executor/models/mixtral_quant.py +447 -0
  826. vllm/model_executor/models/mllama.py +1623 -0
  827. vllm/model_executor/models/mllama4.py +838 -0
  828. vllm/model_executor/models/mlp_speculator.py +205 -0
  829. vllm/model_executor/models/modernbert.py +325 -0
  830. vllm/model_executor/models/module_mapping.py +71 -0
  831. vllm/model_executor/models/molmo.py +1567 -0
  832. vllm/model_executor/models/moonvit.py +628 -0
  833. vllm/model_executor/models/mpt.py +329 -0
  834. vllm/model_executor/models/nemotron.py +506 -0
  835. vllm/model_executor/models/nemotron_nas.py +446 -0
  836. vllm/model_executor/models/nvlm_d.py +212 -0
  837. vllm/model_executor/models/olmo.py +390 -0
  838. vllm/model_executor/models/olmo2.py +412 -0
  839. vllm/model_executor/models/olmoe.py +449 -0
  840. vllm/model_executor/models/opt.py +410 -0
  841. vllm/model_executor/models/orion.py +356 -0
  842. vllm/model_executor/models/paligemma.py +397 -0
  843. vllm/model_executor/models/persimmon.py +342 -0
  844. vllm/model_executor/models/phi.py +354 -0
  845. vllm/model_executor/models/phi3.py +18 -0
  846. vllm/model_executor/models/phi3_small.py +463 -0
  847. vllm/model_executor/models/phi3v.py +722 -0
  848. vllm/model_executor/models/phi4mm.py +1263 -0
  849. vllm/model_executor/models/phi4mm_audio.py +1232 -0
  850. vllm/model_executor/models/phi4mm_utils.py +1883 -0
  851. vllm/model_executor/models/phimoe.py +666 -0
  852. vllm/model_executor/models/pixtral.py +1281 -0
  853. vllm/model_executor/models/plamo2.py +736 -0
  854. vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
  855. vllm/model_executor/models/qwen.py +360 -0
  856. vllm/model_executor/models/qwen2.py +552 -0
  857. vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
  858. vllm/model_executor/models/qwen2_5_vl.py +1136 -0
  859. vllm/model_executor/models/qwen2_audio.py +402 -0
  860. vllm/model_executor/models/qwen2_moe.py +531 -0
  861. vllm/model_executor/models/qwen2_rm.py +130 -0
  862. vllm/model_executor/models/qwen2_vl.py +1409 -0
  863. vllm/model_executor/models/qwen3.py +319 -0
  864. vllm/model_executor/models/qwen3_moe.py +528 -0
  865. vllm/model_executor/models/qwen_vl.py +784 -0
  866. vllm/model_executor/models/registry.py +611 -0
  867. vllm/model_executor/models/roberta.py +332 -0
  868. vllm/model_executor/models/siglip.py +522 -0
  869. vllm/model_executor/models/skyworkr1v.py +949 -0
  870. vllm/model_executor/models/smolvlm.py +51 -0
  871. vllm/model_executor/models/solar.py +504 -0
  872. vllm/model_executor/models/stablelm.py +349 -0
  873. vllm/model_executor/models/starcoder2.py +355 -0
  874. vllm/model_executor/models/telechat2.py +139 -0
  875. vllm/model_executor/models/teleflm.py +78 -0
  876. vllm/model_executor/models/transformers.py +442 -0
  877. vllm/model_executor/models/ultravox.py +655 -0
  878. vllm/model_executor/models/utils.py +714 -0
  879. vllm/model_executor/models/vision.py +149 -0
  880. vllm/model_executor/models/whisper.py +746 -0
  881. vllm/model_executor/models/zamba2.py +1008 -0
  882. vllm/model_executor/parameter.py +458 -0
  883. vllm/model_executor/pooling_metadata.py +71 -0
  884. vllm/model_executor/sampling_metadata.py +596 -0
  885. vllm/model_executor/utils.py +53 -0
  886. vllm/multimodal/__init__.py +31 -0
  887. vllm/multimodal/audio.py +105 -0
  888. vllm/multimodal/base.py +218 -0
  889. vllm/multimodal/hasher.py +103 -0
  890. vllm/multimodal/image.py +77 -0
  891. vllm/multimodal/inputs.py +843 -0
  892. vllm/multimodal/parse.py +454 -0
  893. vllm/multimodal/processing.py +1760 -0
  894. vllm/multimodal/profiling.py +274 -0
  895. vllm/multimodal/registry.py +321 -0
  896. vllm/multimodal/utils.py +386 -0
  897. vllm/multimodal/video.py +166 -0
  898. vllm/outputs.py +521 -0
  899. vllm/platforms/__init__.py +286 -0
  900. vllm/platforms/cpu.py +182 -0
  901. vllm/platforms/cuda.py +463 -0
  902. vllm/platforms/hpu.py +94 -0
  903. vllm/platforms/interface.py +427 -0
  904. vllm/platforms/neuron.py +69 -0
  905. vllm/platforms/rocm.py +346 -0
  906. vllm/platforms/tpu.py +174 -0
  907. vllm/platforms/xpu.py +142 -0
  908. vllm/plugins/__init__.py +82 -0
  909. vllm/pooling_params.py +53 -0
  910. vllm/profiler/__init__.py +7 -0
  911. vllm/profiler/layerwise_profile.py +374 -0
  912. vllm/profiler/utils.py +147 -0
  913. vllm/prompt_adapter/__init__.py +0 -0
  914. vllm/prompt_adapter/layers.py +82 -0
  915. vllm/prompt_adapter/models.py +357 -0
  916. vllm/prompt_adapter/request.py +36 -0
  917. vllm/prompt_adapter/utils.py +97 -0
  918. vllm/prompt_adapter/worker_manager.py +178 -0
  919. vllm/py.typed +2 -0
  920. vllm/reasoning/__init__.py +12 -0
  921. vllm/reasoning/abs_reasoning_parsers.py +189 -0
  922. vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
  923. vllm/reasoning/granite_reasoning_parser.py +362 -0
  924. vllm/sampling_params.py +598 -0
  925. vllm/scalar_type.py +335 -0
  926. vllm/scripts.py +14 -0
  927. vllm/sequence.py +1486 -0
  928. vllm/spec_decode/__init__.py +0 -0
  929. vllm/spec_decode/batch_expansion.py +505 -0
  930. vllm/spec_decode/draft_model_runner.py +335 -0
  931. vllm/spec_decode/interfaces.py +98 -0
  932. vllm/spec_decode/medusa_worker.py +137 -0
  933. vllm/spec_decode/metrics.py +212 -0
  934. vllm/spec_decode/mlp_speculator_worker.py +93 -0
  935. vllm/spec_decode/mqa_scorer.py +159 -0
  936. vllm/spec_decode/multi_step_worker.py +416 -0
  937. vllm/spec_decode/ngram_worker.py +195 -0
  938. vllm/spec_decode/proposer_worker_base.py +58 -0
  939. vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
  940. vllm/spec_decode/spec_decode_worker.py +1324 -0
  941. vllm/spec_decode/target_model_runner.py +44 -0
  942. vllm/spec_decode/top1_proposer.py +274 -0
  943. vllm/spec_decode/util.py +276 -0
  944. vllm/test_utils.py +129 -0
  945. vllm/third_party/__init__.py +0 -0
  946. vllm/third_party/pynvml.py +6139 -0
  947. vllm/tracing.py +130 -0
  948. vllm/transformers_utils/__init__.py +19 -0
  949. vllm/transformers_utils/config.py +813 -0
  950. vllm/transformers_utils/configs/__init__.py +52 -0
  951. vllm/transformers_utils/configs/arctic.py +206 -0
  952. vllm/transformers_utils/configs/chatglm.py +71 -0
  953. vllm/transformers_utils/configs/cohere2.py +194 -0
  954. vllm/transformers_utils/configs/dbrx.py +280 -0
  955. vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
  956. vllm/transformers_utils/configs/eagle.py +65 -0
  957. vllm/transformers_utils/configs/exaone.py +191 -0
  958. vllm/transformers_utils/configs/falcon.py +89 -0
  959. vllm/transformers_utils/configs/h2ovl.py +15 -0
  960. vllm/transformers_utils/configs/internvl.py +53 -0
  961. vllm/transformers_utils/configs/jais.py +237 -0
  962. vllm/transformers_utils/configs/kimi_vl.py +36 -0
  963. vllm/transformers_utils/configs/medusa.py +62 -0
  964. vllm/transformers_utils/configs/mllama.py +30 -0
  965. vllm/transformers_utils/configs/mlp_speculator.py +67 -0
  966. vllm/transformers_utils/configs/moonvit.py +32 -0
  967. vllm/transformers_utils/configs/mpt.py +179 -0
  968. vllm/transformers_utils/configs/nemotron.py +204 -0
  969. vllm/transformers_utils/configs/nvlm_d.py +14 -0
  970. vllm/transformers_utils/configs/skyworkr1v.py +53 -0
  971. vllm/transformers_utils/configs/solar.py +246 -0
  972. vllm/transformers_utils/configs/telechat2.py +63 -0
  973. vllm/transformers_utils/configs/ultravox.py +107 -0
  974. vllm/transformers_utils/detokenizer.py +167 -0
  975. vllm/transformers_utils/detokenizer_utils.py +188 -0
  976. vllm/transformers_utils/processor.py +210 -0
  977. vllm/transformers_utils/processors/__init__.py +6 -0
  978. vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
  979. vllm/transformers_utils/s3_utils.py +161 -0
  980. vllm/transformers_utils/tokenizer.py +291 -0
  981. vllm/transformers_utils/tokenizer_base.py +146 -0
  982. vllm/transformers_utils/tokenizer_group.py +110 -0
  983. vllm/transformers_utils/tokenizers/__init__.py +9 -0
  984. vllm/transformers_utils/tokenizers/mistral.py +483 -0
  985. vllm/transformers_utils/utils.py +98 -0
  986. vllm/triton_utils/__init__.py +5 -0
  987. vllm/triton_utils/importing.py +53 -0
  988. vllm/usage/__init__.py +0 -0
  989. vllm/usage/usage_lib.py +255 -0
  990. vllm/utils.py +2692 -0
  991. vllm/v1/__init__.py +0 -0
  992. vllm/v1/attention/__init__.py +0 -0
  993. vllm/v1/attention/backends/__init__.py +0 -0
  994. vllm/v1/attention/backends/flash_attn.py +783 -0
  995. vllm/v1/attention/backends/flashinfer.py +638 -0
  996. vllm/v1/attention/backends/mla/__init__.py +0 -0
  997. vllm/v1/attention/backends/mla/common.py +974 -0
  998. vllm/v1/attention/backends/mla/flashmla.py +149 -0
  999. vllm/v1/attention/backends/mla/triton_mla.py +118 -0
  1000. vllm/v1/attention/backends/pallas.py +221 -0
  1001. vllm/v1/attention/backends/triton_attn.py +198 -0
  1002. vllm/v1/core/__init__.py +0 -0
  1003. vllm/v1/core/block_pool.py +281 -0
  1004. vllm/v1/core/encoder_cache_manager.py +149 -0
  1005. vllm/v1/core/kv_cache_manager.py +385 -0
  1006. vllm/v1/core/kv_cache_utils.py +744 -0
  1007. vllm/v1/core/sched/__init__.py +0 -0
  1008. vllm/v1/core/sched/interface.py +134 -0
  1009. vllm/v1/core/sched/output.py +126 -0
  1010. vllm/v1/core/sched/scheduler.py +838 -0
  1011. vllm/v1/core/sched/utils.py +22 -0
  1012. vllm/v1/core/specialized_manager.py +161 -0
  1013. vllm/v1/engine/__init__.py +166 -0
  1014. vllm/v1/engine/async_llm.py +532 -0
  1015. vllm/v1/engine/core.py +701 -0
  1016. vllm/v1/engine/core_client.py +942 -0
  1017. vllm/v1/engine/detokenizer.py +260 -0
  1018. vllm/v1/engine/exceptions.py +16 -0
  1019. vllm/v1/engine/llm_engine.py +285 -0
  1020. vllm/v1/engine/logprobs.py +198 -0
  1021. vllm/v1/engine/mm_input_cache.py +82 -0
  1022. vllm/v1/engine/output_processor.py +420 -0
  1023. vllm/v1/engine/parallel_sampling.py +132 -0
  1024. vllm/v1/engine/processor.py +387 -0
  1025. vllm/v1/executor/__init__.py +0 -0
  1026. vllm/v1/executor/abstract.py +112 -0
  1027. vllm/v1/executor/multiproc_executor.py +480 -0
  1028. vllm/v1/executor/ray_distributed_executor.py +61 -0
  1029. vllm/v1/kv_cache_interface.py +166 -0
  1030. vllm/v1/metrics/__init__.py +0 -0
  1031. vllm/v1/metrics/loggers.py +498 -0
  1032. vllm/v1/metrics/stats.py +238 -0
  1033. vllm/v1/outputs.py +111 -0
  1034. vllm/v1/request.py +178 -0
  1035. vllm/v1/sample/__init__.py +0 -0
  1036. vllm/v1/sample/metadata.py +43 -0
  1037. vllm/v1/sample/ops/__init__.py +0 -0
  1038. vllm/v1/sample/ops/bad_words.py +38 -0
  1039. vllm/v1/sample/ops/penalties.py +58 -0
  1040. vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
  1041. vllm/v1/sample/rejection_sampler.py +631 -0
  1042. vllm/v1/sample/sampler.py +270 -0
  1043. vllm/v1/sample/tpu/__init__.py +0 -0
  1044. vllm/v1/sample/tpu/metadata.py +118 -0
  1045. vllm/v1/sample/tpu/sampler.py +154 -0
  1046. vllm/v1/serial_utils.py +274 -0
  1047. vllm/v1/spec_decode/__init__.py +0 -0
  1048. vllm/v1/spec_decode/eagle.py +318 -0
  1049. vllm/v1/spec_decode/metadata.py +61 -0
  1050. vllm/v1/spec_decode/metrics.py +164 -0
  1051. vllm/v1/spec_decode/ngram_proposer.py +131 -0
  1052. vllm/v1/spec_decode/utils.py +18 -0
  1053. vllm/v1/stats/__init__.py +0 -0
  1054. vllm/v1/stats/common.py +453 -0
  1055. vllm/v1/structured_output/__init__.py +113 -0
  1056. vllm/v1/structured_output/backend_guidance.py +215 -0
  1057. vllm/v1/structured_output/backend_types.py +96 -0
  1058. vllm/v1/structured_output/backend_xgrammar.py +299 -0
  1059. vllm/v1/structured_output/request.py +84 -0
  1060. vllm/v1/structured_output/utils.py +174 -0
  1061. vllm/v1/utils.py +249 -0
  1062. vllm/v1/worker/__init__.py +0 -0
  1063. vllm/v1/worker/block_table.py +87 -0
  1064. vllm/v1/worker/gpu_input_batch.py +677 -0
  1065. vllm/v1/worker/gpu_model_runner.py +1776 -0
  1066. vllm/v1/worker/gpu_worker.py +349 -0
  1067. vllm/v1/worker/lora_model_runner_mixin.py +145 -0
  1068. vllm/v1/worker/tpu_model_runner.py +1419 -0
  1069. vllm/v1/worker/tpu_worker.py +260 -0
  1070. vllm/v1/worker/utils.py +74 -0
  1071. vllm/v1/worker/worker_base.py +64 -0
  1072. vllm/version.py +40 -0
  1073. vllm/vllm_flash_attn/.gitkeep +0 -0
  1074. vllm/worker/__init__.py +0 -0
  1075. vllm/worker/cache_engine.py +144 -0
  1076. vllm/worker/cpu_enc_dec_model_runner.py +323 -0
  1077. vllm/worker/cpu_model_runner.py +668 -0
  1078. vllm/worker/cpu_pooling_model_runner.py +122 -0
  1079. vllm/worker/cpu_worker.py +400 -0
  1080. vllm/worker/enc_dec_model_runner.py +542 -0
  1081. vllm/worker/hpu_model_runner.py +2221 -0
  1082. vllm/worker/hpu_worker.py +483 -0
  1083. vllm/worker/model_runner.py +2056 -0
  1084. vllm/worker/model_runner_base.py +281 -0
  1085. vllm/worker/multi_step_hpu_worker.py +122 -0
  1086. vllm/worker/multi_step_model_runner.py +908 -0
  1087. vllm/worker/multi_step_tpu_worker.py +107 -0
  1088. vllm/worker/multi_step_worker.py +196 -0
  1089. vllm/worker/neuron_model_runner.py +336 -0
  1090. vllm/worker/neuron_worker.py +138 -0
  1091. vllm/worker/pooling_model_runner.py +200 -0
  1092. vllm/worker/tpu_model_runner.py +908 -0
  1093. vllm/worker/tpu_worker.py +332 -0
  1094. vllm/worker/utils.py +52 -0
  1095. vllm/worker/worker.py +570 -0
  1096. vllm/worker/worker_base.py +644 -0
  1097. vllm/worker/xpu_model_runner.py +603 -0
  1098. vllm/worker/xpu_worker.py +185 -0
  1099. vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
  1100. vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
  1101. vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
  1102. vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
  1103. vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
vllm/utils.py ADDED
@@ -0,0 +1,2692 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import concurrent
7
+ import contextlib
8
+ import datetime
9
+ import enum
10
+ import gc
11
+ import getpass
12
+ import hashlib
13
+ import importlib
14
+ import importlib.metadata
15
+ import importlib.util
16
+ import inspect
17
+ import ipaddress
18
+ import multiprocessing
19
+ import os
20
+ import pickle
21
+ import re
22
+ import signal
23
+ import socket
24
+ import subprocess
25
+ import sys
26
+ import tempfile
27
+ import textwrap
28
+ import threading
29
+ import time
30
+ import traceback
31
+ import types
32
+ import uuid
33
+ import warnings
34
+ import weakref
35
+ from argparse import (Action, ArgumentDefaultsHelpFormatter, ArgumentParser,
36
+ ArgumentTypeError)
37
+ from asyncio import FIRST_COMPLETED, AbstractEventLoop, Task
38
+ from collections import UserDict, defaultdict
39
+ from collections.abc import (AsyncGenerator, Awaitable, Generator, Hashable,
40
+ Iterable, Iterator, KeysView, Mapping)
41
+ from concurrent.futures.process import ProcessPoolExecutor
42
+ from dataclasses import dataclass, field
43
+ from functools import cache, lru_cache, partial, wraps
44
+ from types import MappingProxyType
45
+ from typing import (TYPE_CHECKING, Any, Callable, Generic, Literal, NamedTuple,
46
+ Optional, Sequence, Tuple, Type, TypeVar, Union, cast,
47
+ overload)
48
+ from uuid import uuid4
49
+
50
+ import cachetools
51
+ import cloudpickle
52
+ import numpy as np
53
+ import numpy.typing as npt
54
+ import psutil
55
+ import torch
56
+ import torch.types
57
+ import yaml
58
+ import zmq
59
+ import zmq.asyncio
60
+ from packaging import version
61
+ from packaging.version import Version
62
+ from torch.library import Library
63
+ from typing_extensions import Never, ParamSpec, TypeIs, assert_never
64
+
65
+ import vllm.envs as envs
66
+ # NOTE: import triton_utils to make TritonPlaceholderModule work
67
+ # if triton is unavailable
68
+ import vllm.triton_utils # noqa: F401
69
+ from vllm.logger import enable_trace_function_call, init_logger
70
+
71
+ if TYPE_CHECKING:
72
+ from vllm.config import ModelConfig, VllmConfig
73
+
74
+ logger = init_logger(__name__)
75
+
76
+ # Exception strings for non-implemented encoder/decoder scenarios
77
+
78
+ # Reminder: Please update docs/source/features/compatibility_matrix.md
79
+ # If the feature combo become valid
80
+
81
+ STR_NOT_IMPL_ENC_DEC_SWA = \
82
+ "Sliding window attention for encoder/decoder models " + \
83
+ "is not currently supported."
84
+
85
+ STR_NOT_IMPL_ENC_DEC_PREFIX_CACHE = \
86
+ "Prefix caching for encoder/decoder models " + \
87
+ "is not currently supported."
88
+
89
+ STR_NOT_IMPL_ENC_DEC_CHUNKED_PREFILL = \
90
+ "Chunked prefill for encoder/decoder models " + \
91
+ "is not currently supported."
92
+
93
+ STR_NOT_IMPL_ENC_DEC_LOGIT_SOFTCAP = (
94
+ "Models with logits_soft_cap "
95
+ "require FlashInfer backend, which is "
96
+ "currently not supported for encoder/decoder "
97
+ "models.")
98
+
99
+ STR_NOT_IMPL_ENC_DEC_LORA = ("LoRA is currently not currently "
100
+ "supported with encoder/decoder "
101
+ "models.")
102
+
103
+ STR_NOT_IMPL_ENC_DEC_PP = ("Pipeline parallelism is not "
104
+ "currently supported with "
105
+ "encoder/decoder models.")
106
+
107
+ STR_NOT_IMPL_ENC_DEC_MM = ("Multimodal is not currently "
108
+ "supported with encoder/decoder "
109
+ "models.")
110
+
111
+ STR_NOT_IMPL_ENC_DEC_SPEC_DEC = ("Speculative decoding is not "
112
+ "currently supported with encoder/"
113
+ "decoder models.")
114
+
115
+ STR_NOT_IMPL_ENC_DEC_BACKEND = ("XFormers and Flash-Attention are the only "
116
+ "backends currently supported with encoder/"
117
+ "decoder models.")
118
+
119
+ STR_NOT_IMPL_ENC_DEC_PROMPT_ADAPTER = ("Prompt adapters are not "
120
+ "currently supported with encoder/"
121
+ "decoder models.")
122
+
123
+ # Efficiently import all enc/dec error strings
124
+ # rather than having to import all of the above
125
+ STR_NOT_IMPL_ENC_DEC_ERR_STRS = {
126
+ "STR_NOT_IMPL_ENC_DEC_SWA": STR_NOT_IMPL_ENC_DEC_SWA,
127
+ "STR_NOT_IMPL_ENC_DEC_PREFIX_CACHE": STR_NOT_IMPL_ENC_DEC_PREFIX_CACHE,
128
+ "STR_NOT_IMPL_ENC_DEC_CHUNKED_PREFILL":
129
+ STR_NOT_IMPL_ENC_DEC_CHUNKED_PREFILL,
130
+ "STR_NOT_IMPL_ENC_DEC_LOGIT_SOFTCAP": STR_NOT_IMPL_ENC_DEC_LOGIT_SOFTCAP,
131
+ "STR_NOT_IMPL_ENC_DEC_LORA": STR_NOT_IMPL_ENC_DEC_LORA,
132
+ "STR_NOT_IMPL_ENC_DEC_PP": STR_NOT_IMPL_ENC_DEC_PP,
133
+ "STR_NOT_IMPL_ENC_DEC_MM": STR_NOT_IMPL_ENC_DEC_MM,
134
+ "STR_NOT_IMPL_ENC_DEC_SPEC_DEC": STR_NOT_IMPL_ENC_DEC_SPEC_DEC,
135
+ "STR_NOT_IMPL_ENC_DEC_BACKEND": STR_NOT_IMPL_ENC_DEC_BACKEND,
136
+ "STR_NOT_IMPL_ENC_DEC_PROMPT_ADAPTER": STR_NOT_IMPL_ENC_DEC_PROMPT_ADAPTER,
137
+ }
138
+
139
+ # Constants related to forcing the attention backend selection
140
+
141
+ # String name of register which may be set in order to
142
+ # force auto-selection of attention backend by Attention
143
+ # wrapper
144
+ STR_BACKEND_ENV_VAR: str = "VLLM_ATTENTION_BACKEND"
145
+
146
+ # Possible string values of STR_BACKEND_ENV_VAR
147
+ # register, corresponding to possible backends
148
+ STR_FLASHINFER_ATTN_VAL: str = "FLASHINFER"
149
+ STR_TORCH_SDPA_ATTN_VAL: str = "TORCH_SDPA"
150
+ STR_ROCM_FLASH_ATTN_VAL: str = "ROCM_FLASH"
151
+ STR_XFORMERS_ATTN_VAL: str = "XFORMERS"
152
+ STR_FLASH_ATTN_VAL: str = "FLASH_ATTN"
153
+ STR_INVALID_VAL: str = "INVALID"
154
+
155
+ GB_bytes = 1_000_000_000
156
+ """The number of bytes in one gigabyte (GB)."""
157
+
158
+ GiB_bytes = 1 << 30
159
+ """The number of bytes in one gibibyte (GiB)."""
160
+
161
+ STR_DTYPE_TO_TORCH_DTYPE = {
162
+ "half": torch.half,
163
+ "bfloat16": torch.bfloat16,
164
+ "float": torch.float,
165
+ "fp8": torch.uint8,
166
+ "fp8_e4m3": torch.uint8,
167
+ "fp8_e5m2": torch.uint8,
168
+ "int8": torch.int8,
169
+ }
170
+
171
+ TORCH_DTYPE_TO_NUMPY_DTYPE = {
172
+ torch.float16: np.float16,
173
+ torch.float32: np.float32,
174
+ torch.float64: np.float64,
175
+ torch.uint8: np.uint8,
176
+ torch.int32: np.int32,
177
+ torch.int64: np.int64,
178
+ }
179
+
180
+ P = ParamSpec('P')
181
+ T = TypeVar("T")
182
+ U = TypeVar("U")
183
+
184
+ _K = TypeVar("_K", bound=Hashable)
185
+ _V = TypeVar("_V")
186
+ _T = TypeVar("_T")
187
+
188
+
189
+ class _Sentinel:
190
+ ...
191
+
192
+
193
+ ALL_PINNED_SENTINEL = _Sentinel()
194
+
195
+
196
+ class Device(enum.Enum):
197
+ GPU = enum.auto()
198
+ CPU = enum.auto()
199
+
200
+
201
+ class LayerBlockType(enum.Enum):
202
+ attention = "attention"
203
+ mamba = "mamba"
204
+
205
+
206
+ class Counter:
207
+
208
+ def __init__(self, start: int = 0) -> None:
209
+ self.counter = start
210
+
211
+ def __next__(self) -> int:
212
+ i = self.counter
213
+ self.counter += 1
214
+ return i
215
+
216
+ def reset(self) -> None:
217
+ self.counter = 0
218
+
219
+
220
+ class _MappingOrderCacheView(UserDict[_K, _V]):
221
+
222
+ def __init__(self, data: Mapping[_K, _V], ordered_keys: Mapping[_K, None]):
223
+ super().__init__(data)
224
+ self.ordered_keys = ordered_keys
225
+
226
+ def __iter__(self) -> Iterator[_K]:
227
+ return iter(self.ordered_keys)
228
+
229
+ def keys(self) -> KeysView[_K]:
230
+ return KeysView(self.ordered_keys)
231
+
232
+
233
+ class CacheInfo(NamedTuple):
234
+ hits: int
235
+ total: int
236
+
237
+ @property
238
+ def hit_ratio(self) -> float:
239
+ if self.total == 0:
240
+ return 0
241
+
242
+ return self.hits / self.total
243
+
244
+ def __sub__(self, other: CacheInfo):
245
+ return CacheInfo(
246
+ hits=self.hits - other.hits,
247
+ total=self.total - other.total,
248
+ )
249
+
250
+
251
+ class LRUCache(cachetools.LRUCache[_K, _V], Generic[_K, _V]):
252
+
253
+ def __init__(self,
254
+ capacity: float,
255
+ getsizeof: Optional[Callable[[_V], float]] = None):
256
+ super().__init__(capacity, getsizeof)
257
+
258
+ self.pinned_items = set[_K]()
259
+
260
+ self._hits = 0
261
+ self._total = 0
262
+ self._last_info = CacheInfo(hits=0, total=0)
263
+
264
+ def __getitem__(self, key: _K, *, update_info: bool = True) -> _V:
265
+ value = super().__getitem__(key)
266
+
267
+ if update_info:
268
+ self._hits += 1
269
+ self._total += 1
270
+
271
+ return value
272
+
273
+ def __delitem__(self, key: _K) -> None:
274
+ run_on_remove = key in self
275
+ value = self.__getitem__(key,
276
+ update_info=False) # type: ignore[call-arg]
277
+ super().__delitem__(key)
278
+ if key in self.pinned_items:
279
+ # Todo: add warning to inform that del pinned item
280
+ self._unpin(key)
281
+ if run_on_remove:
282
+ self._on_remove(key, value)
283
+
284
+ @property
285
+ def cache(self) -> Mapping[_K, _V]:
286
+ """Return the internal cache dictionary in order (read-only)."""
287
+ return _MappingOrderCacheView(
288
+ self._Cache__data, # type: ignore
289
+ self.order)
290
+
291
+ @property
292
+ def order(self) -> Mapping[_K, None]:
293
+ """Return the internal order dictionary (read-only)."""
294
+ return MappingProxyType(self._LRUCache__order) # type: ignore
295
+
296
+ @property
297
+ def capacity(self) -> float:
298
+ return self.maxsize
299
+
300
+ @property
301
+ def usage(self) -> float:
302
+ if self.maxsize == 0:
303
+ return 0
304
+
305
+ return self.currsize / self.maxsize
306
+
307
+ def stat(self, *, delta: bool = False) -> CacheInfo:
308
+ """
309
+ Gets the cumulative number of hits and queries against this cache.
310
+
311
+ If :code:`delta=True`, instead gets these statistics
312
+ since the last call that also passed :code:`delta=True`.
313
+ """
314
+ info = CacheInfo(hits=self._hits, total=self._total)
315
+
316
+ if delta:
317
+ info_delta = info - self._last_info
318
+ self._last_info = info
319
+ info = info_delta
320
+
321
+ return info
322
+
323
+ def touch(self, key: _K) -> None:
324
+ try:
325
+ self._LRUCache__order.move_to_end(key) # type: ignore
326
+ except KeyError:
327
+ self._LRUCache__order[key] = None # type: ignore
328
+
329
+ @overload
330
+ def get(self, key: _K, /) -> Optional[_V]:
331
+ ...
332
+
333
+ @overload
334
+ def get(self, key: _K, /, default: Union[_V, _T]) -> Union[_V, _T]:
335
+ ...
336
+
337
+ def get(self,
338
+ key: _K,
339
+ /,
340
+ default: Optional[Union[_V,
341
+ _T]] = None) -> Optional[Union[_V, _T]]:
342
+ value: Optional[Union[_V, _T]]
343
+ if key in self:
344
+ value = self.__getitem__(
345
+ key, update_info=False) # type: ignore[call-arg]
346
+
347
+ self._hits += 1
348
+ else:
349
+ value = default
350
+
351
+ self._total += 1
352
+ return value
353
+
354
+ @overload
355
+ def pop(self, key: _K) -> _V:
356
+ ...
357
+
358
+ @overload
359
+ def pop(self, key: _K, default: Union[_V, _T]) -> Union[_V, _T]:
360
+ ...
361
+
362
+ def pop(self,
363
+ key: _K,
364
+ default: Optional[Union[_V,
365
+ _T]] = None) -> Optional[Union[_V, _T]]:
366
+ value: Optional[Union[_V, _T]]
367
+ if key not in self:
368
+ return default
369
+
370
+ value = self.__getitem__(key,
371
+ update_info=False) # type: ignore[call-arg]
372
+ self.__delitem__(key)
373
+ return value
374
+
375
+ def put(self, key: _K, value: _V) -> None:
376
+ self.__setitem__(key, value)
377
+
378
+ def pin(self, key: _K) -> None:
379
+ """
380
+ Pins a key in the cache preventing it from being
381
+ evicted in the LRU order.
382
+ """
383
+ if key not in self:
384
+ raise ValueError(f"Cannot pin key: {key} not in cache.")
385
+ self.pinned_items.add(key)
386
+
387
+ def _unpin(self, key: _K) -> None:
388
+ """
389
+ Unpins a key in the cache allowing it to be
390
+ evicted in the LRU order.
391
+ """
392
+ self.pinned_items.remove(key)
393
+
394
+ def _on_remove(self, key: _K, value: Optional[_V]) -> None:
395
+ pass
396
+
397
+ def remove_oldest(self, *, remove_pinned: bool = False) -> None:
398
+ if len(self) == 0:
399
+ return
400
+
401
+ self.popitem(remove_pinned=remove_pinned)
402
+
403
+ def _remove_old_if_needed(self) -> None:
404
+ while self.currsize > self.capacity:
405
+ self.remove_oldest()
406
+
407
+ def popitem(self, remove_pinned: bool = False):
408
+ """Remove and return the `(key, value)` pair least recently used."""
409
+ if not remove_pinned:
410
+ # pop the oldest item in the cache that is not pinned
411
+ lru_key = next(
412
+ (key for key in self.order if key not in self.pinned_items),
413
+ ALL_PINNED_SENTINEL)
414
+ if lru_key is ALL_PINNED_SENTINEL:
415
+ raise RuntimeError("All items are pinned, "
416
+ "cannot remove oldest from the cache.")
417
+ else:
418
+ lru_key = next(iter(self.order))
419
+ value = self.pop(cast(_K, lru_key))
420
+ return (lru_key, value)
421
+
422
+ def clear(self) -> None:
423
+ while len(self) > 0:
424
+ self.remove_oldest(remove_pinned=True)
425
+
426
+ self._hits = 0
427
+ self._total = 0
428
+ self._last_info = CacheInfo(hits=0, total=0)
429
+
430
+
431
+ class PyObjectCache:
432
+ """Used to cache python objects to avoid object allocations
433
+ across scheduler iterations.
434
+ """
435
+
436
+ def __init__(self, obj_builder):
437
+ self._obj_builder = obj_builder
438
+ self._index = 0
439
+
440
+ self._obj_cache = []
441
+ for _ in range(128):
442
+ self._obj_cache.append(self._obj_builder())
443
+
444
+ def _grow_cache(self):
445
+ # Double the size of the cache
446
+ num_objs = len(self._obj_cache)
447
+ for _ in range(num_objs):
448
+ self._obj_cache.append(self._obj_builder())
449
+
450
+ def get_object(self):
451
+ """Returns a pre-allocated cached object. If there is not enough
452
+ objects, then the cache size will double.
453
+ """
454
+ if self._index >= len(self._obj_cache):
455
+ self._grow_cache()
456
+ assert self._index < len(self._obj_cache)
457
+
458
+ obj = self._obj_cache[self._index]
459
+ self._index += 1
460
+
461
+ return obj
462
+
463
+ def reset(self):
464
+ """Makes all cached-objects available for the next scheduler iteration.
465
+ """
466
+ self._index = 0
467
+
468
+
469
+ @cache
470
+ def get_max_shared_memory_bytes(gpu: int = 0) -> int:
471
+ """Returns the maximum shared memory per thread block in bytes."""
472
+ from vllm import _custom_ops as ops
473
+ max_shared_mem = (
474
+ ops.get_max_shared_memory_per_block_device_attribute(gpu))
475
+ # value 0 will cause MAX_SEQ_LEN become negative and test_attention.py
476
+ # will fail
477
+ assert max_shared_mem > 0, "max_shared_mem can not be zero"
478
+ return int(max_shared_mem)
479
+
480
+
481
+ def get_cpu_memory() -> int:
482
+ """Returns the total CPU memory of the node in bytes."""
483
+ return psutil.virtual_memory().total
484
+
485
+
486
+ def random_uuid() -> str:
487
+ return str(uuid.uuid4().hex)
488
+
489
+
490
+ def make_async(
491
+ func: Callable[P, T],
492
+ executor: Optional[concurrent.futures.Executor] = None
493
+ ) -> Callable[P, Awaitable[T]]:
494
+ """Take a blocking function, and run it on in an executor thread.
495
+
496
+ This function prevents the blocking function from blocking the
497
+ asyncio event loop.
498
+ The code in this function needs to be thread safe.
499
+ """
500
+
501
+ def _async_wrapper(*args: P.args, **kwargs: P.kwargs) -> asyncio.Future:
502
+ loop = asyncio.get_event_loop()
503
+ p_func = partial(func, *args, **kwargs)
504
+ return loop.run_in_executor(executor=executor, func=p_func)
505
+
506
+ return _async_wrapper
507
+
508
+
509
+ def _next_task(iterator: AsyncGenerator[T, None],
510
+ loop: AbstractEventLoop) -> Task:
511
+ # Can use anext() in python >= 3.10
512
+ return loop.create_task(iterator.__anext__()) # type: ignore[arg-type]
513
+
514
+
515
+ async def merge_async_iterators(
516
+ *iterators: AsyncGenerator[T,
517
+ None], ) -> AsyncGenerator[tuple[int, T], None]:
518
+ """Merge multiple asynchronous iterators into a single iterator.
519
+
520
+ This method handle the case where some iterators finish before others.
521
+ When it yields, it yields a tuple (i, item) where i is the index of the
522
+ iterator that yields the item.
523
+ """
524
+ if len(iterators) == 1:
525
+ # Fast-path single iterator case.
526
+ async for item in iterators[0]:
527
+ yield 0, item
528
+ return
529
+
530
+ loop = asyncio.get_running_loop()
531
+
532
+ awaits = {_next_task(pair[1], loop): pair for pair in enumerate(iterators)}
533
+ try:
534
+ while awaits:
535
+ done, _ = await asyncio.wait(awaits.keys(),
536
+ return_when=FIRST_COMPLETED)
537
+ for d in done:
538
+ pair = awaits.pop(d)
539
+ try:
540
+ item = await d
541
+ i, it = pair
542
+ awaits[_next_task(it, loop)] = pair
543
+ yield i, item
544
+ except StopAsyncIteration:
545
+ pass
546
+ finally:
547
+ # Cancel any remaining iterators
548
+ for f, (_, it) in awaits.items():
549
+ with contextlib.suppress(BaseException):
550
+ f.cancel()
551
+ await it.aclose()
552
+
553
+
554
+ async def collect_from_async_generator(
555
+ iterator: AsyncGenerator[T, None]) -> list[T]:
556
+ """Collect all items from an async generator into a list."""
557
+ items = []
558
+ async for item in iterator:
559
+ items.append(item)
560
+ return items
561
+
562
+
563
+ def get_ip() -> str:
564
+ host_ip = envs.VLLM_HOST_IP
565
+ if "HOST_IP" in os.environ and "VLLM_HOST_IP" not in os.environ:
566
+ logger.warning(
567
+ "The environment variable HOST_IP is deprecated and ignored, as"
568
+ " it is often used by Docker and other software to"
569
+ " interact with the container's network stack. Please "
570
+ "use VLLM_HOST_IP instead to set the IP address for vLLM processes"
571
+ " to communicate with each other.")
572
+ if host_ip:
573
+ return host_ip
574
+
575
+ # IP is not set, try to get it from the network interface
576
+
577
+ # try ipv4
578
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
579
+ try:
580
+ s.connect(("8.8.8.8", 80)) # Doesn't need to be reachable
581
+ return s.getsockname()[0]
582
+ except Exception:
583
+ pass
584
+
585
+ # try ipv6
586
+ try:
587
+ s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
588
+ # Google's public DNS server, see
589
+ # https://developers.google.com/speed/public-dns/docs/using#addresses
590
+ s.connect(("2001:4860:4860::8888", 80)) # Doesn't need to be reachable
591
+ return s.getsockname()[0]
592
+ except Exception:
593
+ pass
594
+
595
+ warnings.warn(
596
+ "Failed to get the IP address, using 0.0.0.0 by default."
597
+ "The value can be set by the environment variable"
598
+ " VLLM_HOST_IP or HOST_IP.",
599
+ stacklevel=2)
600
+ return "0.0.0.0"
601
+
602
+
603
+ def is_valid_ipv6_address(address: str) -> bool:
604
+ try:
605
+ ipaddress.IPv6Address(address)
606
+ return True
607
+ except ValueError:
608
+ return False
609
+
610
+
611
+ def get_distributed_init_method(ip: str, port: int) -> str:
612
+ # Brackets are not permitted in ipv4 addresses,
613
+ # see https://github.com/python/cpython/issues/103848
614
+ return f"tcp://[{ip}]:{port}" if ":" in ip else f"tcp://{ip}:{port}"
615
+
616
+
617
+ def get_open_zmq_ipc_path() -> str:
618
+ base_rpc_path = envs.VLLM_RPC_BASE_PATH
619
+ return f"ipc://{base_rpc_path}/{uuid4()}"
620
+
621
+
622
+ def get_open_zmq_inproc_path() -> str:
623
+ return f"inproc://{uuid4()}"
624
+
625
+
626
+ def get_open_port() -> int:
627
+ """
628
+ Get an open port for the vLLM process to listen on.
629
+ An edge case to handle, is when we run data parallel,
630
+ we need to avoid ports that are potentially used by
631
+ the data parallel master process.
632
+ Right now we reserve 10 ports for the data parallel master
633
+ process. Currently it uses 2 ports.
634
+ """
635
+ if "VLLM_DP_MASTER_PORT" in os.environ:
636
+ dp_master_port = envs.VLLM_DP_MASTER_PORT
637
+ reserved_port_range = range(dp_master_port, dp_master_port + 10)
638
+ while True:
639
+ candidate_port = _get_open_port()
640
+ if candidate_port not in reserved_port_range:
641
+ return candidate_port
642
+ return _get_open_port()
643
+
644
+
645
+ def _get_open_port() -> int:
646
+ port = envs.VLLM_PORT
647
+ if port is not None:
648
+ while True:
649
+ try:
650
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
651
+ s.bind(("", port))
652
+ return port
653
+ except OSError:
654
+ port += 1 # Increment port number if already in use
655
+ logger.info("Port %d is already in use, trying port %d",
656
+ port - 1, port)
657
+ # try ipv4
658
+ try:
659
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
660
+ s.bind(("", 0))
661
+ return s.getsockname()[1]
662
+ except OSError:
663
+ # try ipv6
664
+ with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
665
+ s.bind(("", 0))
666
+ return s.getsockname()[1]
667
+
668
+
669
+ def find_process_using_port(port: int) -> Optional[psutil.Process]:
670
+ # TODO: We can not check for running processes with network
671
+ # port on macOS. Therefore, we can not have a full graceful shutdown
672
+ # of vLLM. For now, let's not look for processes in this case.
673
+ # Ref: https://www.florianreinhard.de/accessdenied-in-psutil/
674
+ if sys.platform.startswith("darwin"):
675
+ return None
676
+
677
+ for conn in psutil.net_connections():
678
+ if conn.laddr.port == port:
679
+ try:
680
+ return psutil.Process(conn.pid)
681
+ except psutil.NoSuchProcess:
682
+ return None
683
+ return None
684
+
685
+
686
+ def update_environment_variables(envs: dict[str, str]):
687
+ for k, v in envs.items():
688
+ if k in os.environ and os.environ[k] != v:
689
+ logger.warning(
690
+ "Overwriting environment variable %s "
691
+ "from '%s' to '%s'", k, os.environ[k], v)
692
+ os.environ[k] = v
693
+
694
+
695
+ def chunk_list(lst: list[T], chunk_size: int):
696
+ """Yield successive chunk_size chunks from lst."""
697
+ for i in range(0, len(lst), chunk_size):
698
+ yield lst[i:i + chunk_size]
699
+
700
+
701
+ def cdiv(a: int, b: int) -> int:
702
+ """Ceiling division."""
703
+ return -(a // -b)
704
+
705
+
706
+ def round_up(x: int, y: int) -> int:
707
+ return ((x + y - 1) // y) * y
708
+
709
+
710
+ def round_down(x: int, y: int) -> int:
711
+ return (x // y) * y
712
+
713
+
714
+ def _generate_random_fp8(
715
+ tensor: torch.Tensor,
716
+ low: float,
717
+ high: float,
718
+ ) -> None:
719
+ # NOTE(zhaoyang): Due to NaN and Inf representation for fp8 data type,
720
+ # it may occur Inf or NaN if we directly use torch.randint
721
+ # to generate random data for fp8 data.
722
+ # For example, s.11111.00 in fp8e5m2 format represents Inf.
723
+ # | E4M3 | E5M2
724
+ #-----|-------------|-------------------
725
+ # Inf | N/A | s.11111.00
726
+ # NaN | s.1111.111 | s.11111.{01,10,11}
727
+ from vllm import _custom_ops as ops
728
+ tensor_tmp = torch.empty_like(tensor, dtype=torch.float16)
729
+ tensor_tmp.uniform_(low, high)
730
+ ops.convert_fp8(tensor, tensor_tmp)
731
+ del tensor_tmp
732
+
733
+
734
+ def get_kv_cache_torch_dtype(
735
+ cache_dtype: Optional[Union[str, torch.dtype]],
736
+ model_dtype: Optional[Union[str, torch.dtype]] = None) -> torch.dtype:
737
+ if isinstance(cache_dtype, str):
738
+ if cache_dtype == "auto":
739
+ if isinstance(model_dtype, str):
740
+ torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[model_dtype]
741
+ elif isinstance(model_dtype, torch.dtype):
742
+ torch_dtype = model_dtype
743
+ else:
744
+ raise ValueError(f"Invalid model dtype: {model_dtype}")
745
+ elif cache_dtype in ["half", "bfloat16", "float"]:
746
+ torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype]
747
+ elif cache_dtype == "fp8":
748
+ torch_dtype = torch.uint8
749
+ else:
750
+ raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
751
+ elif isinstance(cache_dtype, torch.dtype):
752
+ torch_dtype = cache_dtype
753
+ else:
754
+ raise ValueError(f"Invalid kv cache dtype: {cache_dtype}")
755
+ return torch_dtype
756
+
757
+
758
+ def create_kv_caches_with_random_flash(
759
+ num_blocks: int,
760
+ block_size: int,
761
+ num_layers: int,
762
+ num_heads: int,
763
+ head_size: int,
764
+ cache_dtype: Optional[Union[str, torch.dtype]],
765
+ model_dtype: Optional[Union[str, torch.dtype]] = None,
766
+ seed: Optional[int] = None,
767
+ device: Optional[str] = "cuda",
768
+ cache_layout: Optional[str] = "NHD",
769
+ ) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
770
+ from vllm.platforms import current_platform
771
+ current_platform.seed_everything(seed)
772
+
773
+ torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype)
774
+ generic_kv_cache_shape = (num_blocks, 2, block_size, num_heads, head_size)
775
+ assert cache_layout in ("NHD", "HND")
776
+ stride_order = (0, 1, 2, 3, 4) if cache_layout == "NHD" else (0, 1, 3, 2,
777
+ 4)
778
+
779
+ kv_cache_allocation_shape = tuple(generic_kv_cache_shape[i]
780
+ for i in stride_order)
781
+ scale = head_size**-0.5
782
+
783
+ key_caches: list[torch.Tensor] = []
784
+ value_caches: list[torch.Tensor] = []
785
+
786
+ for _ in range(num_layers):
787
+ key_value_cache = torch.empty(size=kv_cache_allocation_shape,
788
+ dtype=torch_dtype,
789
+ device=device).permute(*stride_order)
790
+ if cache_dtype in ["auto", "half", "bfloat16", "float"]:
791
+ key_value_cache.uniform_(-scale, scale)
792
+ elif cache_dtype == 'fp8':
793
+ _generate_random_fp8(key_value_cache, -scale, scale)
794
+ else:
795
+ raise ValueError(
796
+ f"Does not support key cache of type {cache_dtype}")
797
+ key_caches.append(key_value_cache[:, 0])
798
+ value_caches.append(key_value_cache[:, 1])
799
+ return key_caches, value_caches
800
+
801
+
802
+ def create_kv_caches_with_random(
803
+ num_blocks: int,
804
+ block_size: int,
805
+ num_layers: int,
806
+ num_heads: int,
807
+ head_size: int,
808
+ cache_dtype: Optional[Union[str, torch.dtype]],
809
+ model_dtype: Optional[Union[str, torch.dtype]] = None,
810
+ seed: Optional[int] = None,
811
+ device: Optional[str] = "cuda",
812
+ ) -> tuple[list[torch.Tensor], list[torch.Tensor]]:
813
+
814
+ if cache_dtype == "fp8" and head_size % 16:
815
+ raise ValueError(
816
+ f"Does not support key cache of type fp8 with head_size {head_size}"
817
+ )
818
+ from vllm.platforms import current_platform
819
+ current_platform.seed_everything(seed)
820
+
821
+ torch_dtype = get_kv_cache_torch_dtype(cache_dtype, model_dtype)
822
+
823
+ scale = head_size**-0.5
824
+ x = 16 // torch.tensor([], dtype=torch_dtype).element_size()
825
+ key_cache_shape = (num_blocks, num_heads, head_size // x, block_size, x)
826
+ key_caches: list[torch.Tensor] = []
827
+ for _ in range(num_layers):
828
+ key_cache = torch.empty(size=key_cache_shape,
829
+ dtype=torch_dtype,
830
+ device=device)
831
+ if cache_dtype in ["auto", "half", "bfloat16", "float"]:
832
+ key_cache.uniform_(-scale, scale)
833
+ elif cache_dtype == 'fp8':
834
+ _generate_random_fp8(key_cache, -scale, scale)
835
+ else:
836
+ raise ValueError(
837
+ f"Does not support key cache of type {cache_dtype}")
838
+ key_caches.append(key_cache)
839
+
840
+ value_cache_shape = (num_blocks, num_heads, head_size, block_size)
841
+ value_caches: list[torch.Tensor] = []
842
+ for _ in range(num_layers):
843
+ value_cache = torch.empty(size=value_cache_shape,
844
+ dtype=torch_dtype,
845
+ device=device)
846
+ if cache_dtype in ["auto", "half", "bfloat16", "float"]:
847
+ value_cache.uniform_(-scale, scale)
848
+ elif cache_dtype == 'fp8':
849
+ _generate_random_fp8(value_cache, -scale, scale)
850
+ else:
851
+ raise ValueError(
852
+ f"Does not support value cache of type {cache_dtype}")
853
+ value_caches.append(value_cache)
854
+ return key_caches, value_caches
855
+
856
+
857
+ @cache
858
+ def is_pin_memory_available() -> bool:
859
+ from vllm.platforms import current_platform
860
+ return current_platform.is_pin_memory_available()
861
+
862
+
863
+ @cache
864
+ def is_uva_available() -> bool:
865
+ """Check if Unified Virtual Addressing (UVA) is available."""
866
+ # UVA requires pinned memory.
867
+ # TODO: Add more requirements for UVA if needed.
868
+ return is_pin_memory_available()
869
+
870
+
871
+ class DeviceMemoryProfiler:
872
+
873
+ def __init__(self, device: Optional[torch.types.Device] = None):
874
+ self.device = device
875
+
876
+ def current_memory_usage(self) -> float:
877
+ # Return the memory usage in bytes.
878
+ from vllm.platforms import current_platform
879
+ return current_platform.get_current_memory_usage(self.device)
880
+
881
+ def __enter__(self):
882
+ self.initial_memory = self.current_memory_usage()
883
+ # This allows us to call methods of the context manager if needed
884
+ return self
885
+
886
+ def __exit__(self, exc_type, exc_val, exc_tb):
887
+ self.final_memory = self.current_memory_usage()
888
+ self.consumed_memory = self.final_memory - self.initial_memory
889
+
890
+ # Force garbage collection
891
+ gc.collect()
892
+
893
+
894
+ def make_ndarray_with_pad(
895
+ x: list[list[T]],
896
+ pad: T,
897
+ dtype: npt.DTypeLike,
898
+ *,
899
+ max_len: Optional[int] = None,
900
+ ) -> npt.NDArray:
901
+ """
902
+ Make a padded array from 2D inputs.
903
+
904
+ The padding is applied to the end of each inner list until it reaches
905
+ `max_len`.
906
+ """
907
+ if max_len is None:
908
+ # Unlike for most functions, map is faster than a genexpr over `len`
909
+ max_len = max(map(len, x), default=0)
910
+
911
+ padded_x = np.full((len(x), max_len), pad, dtype=dtype)
912
+ for ind, blocktb in enumerate(x):
913
+ assert len(blocktb) <= max_len
914
+ padded_x[ind, :len(blocktb)] = blocktb
915
+
916
+ return padded_x
917
+
918
+
919
+ def make_tensor_with_pad(
920
+ x: list[list[T]],
921
+ pad: T,
922
+ dtype: torch.dtype,
923
+ *,
924
+ max_len: Optional[int] = None,
925
+ device: Optional[Union[str, torch.device]] = None,
926
+ pin_memory: bool = False,
927
+ ) -> torch.Tensor:
928
+ """
929
+ Make a padded tensor from 2D inputs.
930
+
931
+ The padding is applied to the end of each inner list until it reaches
932
+ `max_len`.
933
+ """
934
+ np_dtype = TORCH_DTYPE_TO_NUMPY_DTYPE[dtype]
935
+ padded_x = make_ndarray_with_pad(x, pad, np_dtype, max_len=max_len)
936
+
937
+ tensor = torch.from_numpy(padded_x).to(device)
938
+ if pin_memory:
939
+ tensor = tensor.pin_memory()
940
+
941
+ return tensor
942
+
943
+
944
+ def async_tensor_h2d(
945
+ data: list,
946
+ dtype: torch.dtype,
947
+ target_device: Union[str, torch.device],
948
+ pin_memory: bool,
949
+ ) -> torch.Tensor:
950
+ """Asynchronously create a tensor and copy it from host to device."""
951
+ t = torch.tensor(data, dtype=dtype, pin_memory=pin_memory, device="cpu")
952
+ return t.to(device=target_device, non_blocking=True)
953
+
954
+
955
+ def get_dtype_size(dtype: torch.dtype) -> int:
956
+ """Get the size of the data type in bytes."""
957
+ return torch.tensor([], dtype=dtype).element_size()
958
+
959
+
960
+ # `collections` helpers
961
+ def is_list_of(
962
+ value: object,
963
+ typ: Union[type[T], tuple[type[T], ...]],
964
+ *,
965
+ check: Literal["first", "all"] = "first",
966
+ ) -> TypeIs[list[T]]:
967
+ if not isinstance(value, list):
968
+ return False
969
+
970
+ if check == "first":
971
+ return len(value) == 0 or isinstance(value[0], typ)
972
+ elif check == "all":
973
+ return all(isinstance(v, typ) for v in value)
974
+
975
+ assert_never(check)
976
+
977
+
978
+ def flatten_2d_lists(lists: Iterable[Iterable[T]]) -> list[T]:
979
+ """Flatten a list of lists to a single list."""
980
+ return [item for sublist in lists for item in sublist]
981
+
982
+
983
+ def full_groupby(values: Iterable[_V], *, key: Callable[[_V], _K]):
984
+ """
985
+ Unlike :class:`itertools.groupby`, groups are not broken by
986
+ non-contiguous data.
987
+ """
988
+ groups = defaultdict[_K, list[_V]](list)
989
+
990
+ for value in values:
991
+ groups[key(value)].append(value)
992
+
993
+ return groups.items()
994
+
995
+
996
+ # TODO: This function can be removed if transformer_modules classes are
997
+ # serialized by value when communicating between processes
998
+ def init_cached_hf_modules() -> None:
999
+ """
1000
+ Lazy initialization of the Hugging Face modules.
1001
+ """
1002
+ from transformers.dynamic_module_utils import init_hf_modules
1003
+ init_hf_modules()
1004
+
1005
+
1006
+ @cache
1007
+ def find_library(lib_name: str) -> str:
1008
+ """
1009
+ Find the library file in the system.
1010
+ `lib_name` is full filename, with both prefix and suffix.
1011
+ This function resolves `lib_name` to the full path of the library.
1012
+ """
1013
+ # Adapted from https://github.com/openai/triton/blob/main/third_party/nvidia/backend/driver.py#L19 # noqa
1014
+ # According to https://en.wikipedia.org/wiki/Filesystem_Hierarchy_Standard
1015
+ # `/sbin/ldconfig` should exist in all Linux systems.
1016
+ # `/sbin/ldconfig` searches the library in the system
1017
+ libs = subprocess.check_output(["/sbin/ldconfig", "-p"]).decode()
1018
+ # each line looks like the following:
1019
+ # libcuda.so.1 (libc6,x86-64) => /lib/x86_64-linux-gnu/libcuda.so.1
1020
+ locs = [line.split()[-1] for line in libs.splitlines() if lib_name in line]
1021
+ # `LD_LIBRARY_PATH` searches the library in the user-defined paths
1022
+ env_ld_library_path = envs.LD_LIBRARY_PATH
1023
+ if not locs and env_ld_library_path:
1024
+ locs = [
1025
+ os.path.join(dir, lib_name)
1026
+ for dir in env_ld_library_path.split(":")
1027
+ if os.path.exists(os.path.join(dir, lib_name))
1028
+ ]
1029
+ if not locs:
1030
+ raise ValueError(f"Cannot find {lib_name} in the system.")
1031
+ return locs[0]
1032
+
1033
+
1034
+ def find_nccl_library() -> str:
1035
+ """
1036
+ We either use the library file specified by the `VLLM_NCCL_SO_PATH`
1037
+ environment variable, or we find the library file brought by PyTorch.
1038
+ After importing `torch`, `libnccl.so.2` or `librccl.so.1` can be
1039
+ found by `ctypes` automatically.
1040
+ """
1041
+ so_file = envs.VLLM_NCCL_SO_PATH
1042
+
1043
+ # manually load the nccl library
1044
+ if so_file:
1045
+ logger.info(
1046
+ "Found nccl from environment variable VLLM_NCCL_SO_PATH=%s",
1047
+ so_file)
1048
+ else:
1049
+ if torch.version.cuda is not None:
1050
+ so_file = "libnccl.so.2"
1051
+ elif torch.version.hip is not None:
1052
+ so_file = "librccl.so.1"
1053
+ else:
1054
+ raise ValueError("NCCL only supports CUDA and ROCm backends.")
1055
+ logger.info("Found nccl from library %s", so_file)
1056
+ return so_file
1057
+
1058
+
1059
+ prev_set_stream = torch.cuda.set_stream
1060
+
1061
+ _current_stream = None
1062
+
1063
+
1064
+ def _patched_set_stream(stream: torch.cuda.Stream) -> None:
1065
+ global _current_stream
1066
+ _current_stream = stream
1067
+ prev_set_stream(stream)
1068
+
1069
+
1070
+ torch.cuda.set_stream = _patched_set_stream
1071
+
1072
+
1073
+ def current_stream() -> torch.cuda.Stream:
1074
+ """
1075
+ replace `torch.cuda.current_stream()` with `vllm.utils.current_stream()`.
1076
+ it turns out that `torch.cuda.current_stream()` is quite expensive,
1077
+ as it will construct a new stream object at each call.
1078
+ here we patch `torch.cuda.set_stream` to keep track of the current stream
1079
+ directly, so that we can avoid calling `torch.cuda.current_stream()`.
1080
+
1081
+ the underlying hypothesis is that we do not call `torch._C._cuda_setStream`
1082
+ from C/C++ code.
1083
+ """
1084
+ from vllm.platforms import current_platform
1085
+ global _current_stream
1086
+ if _current_stream is None:
1087
+ # when this function is called before any stream is set,
1088
+ # we return the default stream.
1089
+ # On ROCm using the default 0 stream in combination with RCCL
1090
+ # is hurting performance. Therefore creating a dedicated stream
1091
+ # per process
1092
+ _current_stream = torch.cuda.Stream() if current_platform.is_rocm(
1093
+ ) else torch.cuda.current_stream()
1094
+ return _current_stream
1095
+
1096
+
1097
+ def enable_trace_function_call_for_thread(vllm_config: VllmConfig) -> None:
1098
+ """Set up function tracing for the current thread,
1099
+ if enabled via the VLLM_TRACE_FUNCTION environment variable
1100
+ """
1101
+
1102
+ if envs.VLLM_TRACE_FUNCTION:
1103
+ tmp_dir = tempfile.gettempdir()
1104
+ # add username to tmp_dir to avoid permission issues
1105
+ tmp_dir = os.path.join(tmp_dir, getpass.getuser())
1106
+ filename = (f"VLLM_TRACE_FUNCTION_for_process_{os.getpid()}"
1107
+ f"_thread_{threading.get_ident()}_"
1108
+ f"at_{datetime.datetime.now()}.log").replace(" ", "_")
1109
+ log_path = os.path.join(tmp_dir, "vllm",
1110
+ f"vllm-instance-{vllm_config.instance_id}",
1111
+ filename)
1112
+ os.makedirs(os.path.dirname(log_path), exist_ok=True)
1113
+ enable_trace_function_call(log_path)
1114
+
1115
+
1116
+ # `functools` helpers
1117
+ def identity(value: T, **kwargs) -> T:
1118
+ """Returns the first provided value."""
1119
+ return value
1120
+
1121
+
1122
+ F = TypeVar('F', bound=Callable[..., Any])
1123
+
1124
+
1125
+ def deprecate_args(
1126
+ start_index: int,
1127
+ is_deprecated: Union[bool, Callable[[], bool]] = True,
1128
+ additional_message: Optional[str] = None,
1129
+ ) -> Callable[[F], F]:
1130
+
1131
+ if not callable(is_deprecated):
1132
+ is_deprecated = partial(identity, is_deprecated)
1133
+
1134
+ def wrapper(fn: F) -> F:
1135
+
1136
+ params = inspect.signature(fn).parameters
1137
+ pos_types = (
1138
+ inspect.Parameter.POSITIONAL_ONLY,
1139
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
1140
+ )
1141
+ pos_kws = [
1142
+ kw for kw, param in params.items() if param.kind in pos_types
1143
+ ]
1144
+
1145
+ @wraps(fn)
1146
+ def inner(*args, **kwargs):
1147
+ if is_deprecated():
1148
+ deprecated_args = pos_kws[start_index:len(args)]
1149
+ if deprecated_args:
1150
+ msg = (
1151
+ f"The positional arguments {deprecated_args} are "
1152
+ "deprecated and will be removed in a future update.")
1153
+ if additional_message is not None:
1154
+ msg += f" {additional_message}"
1155
+
1156
+ warnings.warn(
1157
+ DeprecationWarning(msg),
1158
+ stacklevel=3, # The inner function takes up one level
1159
+ )
1160
+
1161
+ return fn(*args, **kwargs)
1162
+
1163
+ return inner # type: ignore
1164
+
1165
+ return wrapper
1166
+
1167
+
1168
+ def deprecate_kwargs(
1169
+ *kws: str,
1170
+ is_deprecated: Union[bool, Callable[[], bool]] = True,
1171
+ additional_message: Optional[str] = None,
1172
+ ) -> Callable[[F], F]:
1173
+ deprecated_kws = set(kws)
1174
+
1175
+ if not callable(is_deprecated):
1176
+ is_deprecated = partial(identity, is_deprecated)
1177
+
1178
+ def wrapper(fn: F) -> F:
1179
+
1180
+ @wraps(fn)
1181
+ def inner(*args, **kwargs):
1182
+ if is_deprecated():
1183
+ deprecated_kwargs = kwargs.keys() & deprecated_kws
1184
+ if deprecated_kwargs:
1185
+ msg = (
1186
+ f"The keyword arguments {deprecated_kwargs} are "
1187
+ "deprecated and will be removed in a future update.")
1188
+ if additional_message is not None:
1189
+ msg += f" {additional_message}"
1190
+
1191
+ warnings.warn(
1192
+ DeprecationWarning(msg),
1193
+ stacklevel=3, # The inner function takes up one level
1194
+ )
1195
+
1196
+ return fn(*args, **kwargs)
1197
+
1198
+ return inner # type: ignore
1199
+
1200
+ return wrapper
1201
+
1202
+
1203
+ @lru_cache(maxsize=8)
1204
+ def _cuda_device_count_stateless(
1205
+ cuda_visible_devices: Optional[str] = None) -> int:
1206
+ # Note: cuda_visible_devices is not used, but we keep it as an argument for
1207
+ # LRU Cache purposes.
1208
+
1209
+ # Code below is based on
1210
+ # https://github.com/pytorch/pytorch/blob/
1211
+ # c1cd946818442aca8c7f812b16d187ce1586c3bc/
1212
+ # torch/cuda/__init__.py#L831C1-L831C17
1213
+ import torch.cuda
1214
+ import torch.version
1215
+
1216
+ from vllm.platforms import current_platform
1217
+ if not torch.cuda._is_compiled():
1218
+ return 0
1219
+ if current_platform.is_rocm():
1220
+ # ROCm uses amdsmi instead of nvml for stateless device count
1221
+ # This requires a sufficiently modern version of Torch 2.4.0
1222
+ raw_count = torch.cuda._device_count_amdsmi() if (hasattr(
1223
+ torch.cuda, "_device_count_amdsmi")) else -1
1224
+ else:
1225
+ raw_count = torch.cuda._device_count_nvml()
1226
+ r = torch._C._cuda_getDeviceCount() if raw_count < 0 else raw_count
1227
+ return r
1228
+
1229
+
1230
+ def cuda_device_count_stateless() -> int:
1231
+ """Get number of CUDA devices, caching based on the value of
1232
+ CUDA_VISIBLE_DEVICES at the time of call.
1233
+
1234
+ This should be used instead of torch.cuda.device_count()
1235
+ unless CUDA_VISIBLE_DEVICES has already been set to the desired
1236
+ value."""
1237
+
1238
+ # This can be removed and simply replaced with torch.cuda.get_device_count
1239
+ # after https://github.com/pytorch/pytorch/pull/122815 is released.
1240
+ return _cuda_device_count_stateless(envs.CUDA_VISIBLE_DEVICES)
1241
+
1242
+
1243
+ def cuda_is_initialized() -> bool:
1244
+ """Check if CUDA is initialized."""
1245
+ if not torch.cuda._is_compiled():
1246
+ return False
1247
+ return torch.cuda.is_initialized()
1248
+
1249
+
1250
+ def cuda_get_device_properties(device,
1251
+ names: Sequence[str],
1252
+ init_cuda=False) -> tuple[Any, ...]:
1253
+ """Get specified CUDA device property values without initializing CUDA in
1254
+ the current process."""
1255
+ if init_cuda or cuda_is_initialized():
1256
+ props = torch.cuda.get_device_properties(device)
1257
+ return tuple(getattr(props, name) for name in names)
1258
+
1259
+ # Run in subprocess to avoid initializing CUDA as a side effect.
1260
+ mp_ctx = multiprocessing.get_context("fork")
1261
+ with ProcessPoolExecutor(max_workers=1, mp_context=mp_ctx) as executor:
1262
+ return executor.submit(cuda_get_device_properties, device, names,
1263
+ True).result()
1264
+
1265
+
1266
+ def weak_bind(bound_method: Callable[..., Any], ) -> Callable[..., None]:
1267
+ """Make an instance method that weakly references
1268
+ its associated instance and no-ops once that
1269
+ instance is collected."""
1270
+ ref = weakref.ref(bound_method.__self__) # type: ignore[attr-defined]
1271
+ unbound = bound_method.__func__ # type: ignore[attr-defined]
1272
+
1273
+ def weak_bound(*args, **kwargs) -> None:
1274
+ if inst := ref():
1275
+ unbound(inst, *args, **kwargs)
1276
+
1277
+ return weak_bound
1278
+
1279
+
1280
+ #From: https://stackoverflow.com/a/4104188/2749989
1281
+ def run_once(f: Callable[P, None]) -> Callable[P, None]:
1282
+
1283
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> None:
1284
+ if not wrapper.has_run: # type: ignore[attr-defined]
1285
+ wrapper.has_run = True # type: ignore[attr-defined]
1286
+ return f(*args, **kwargs)
1287
+
1288
+ wrapper.has_run = False # type: ignore[attr-defined]
1289
+ return wrapper
1290
+
1291
+
1292
+ class StoreBoolean(Action):
1293
+
1294
+ def __call__(self, parser, namespace, values, option_string=None):
1295
+ if values.lower() == "true":
1296
+ setattr(namespace, self.dest, True)
1297
+ elif values.lower() == "false":
1298
+ setattr(namespace, self.dest, False)
1299
+ else:
1300
+ raise ValueError(f"Invalid boolean value: {values}. "
1301
+ "Expected 'true' or 'false'.")
1302
+
1303
+
1304
+ class SortedHelpFormatter(ArgumentDefaultsHelpFormatter):
1305
+ """SortedHelpFormatter that sorts arguments by their option strings."""
1306
+
1307
+ def _split_lines(self, text, width):
1308
+ """
1309
+ 1. Sentences split across lines have their single newlines removed.
1310
+ 2. Paragraphs and explicit newlines are split into separate lines.
1311
+ 3. Each line is wrapped to the specified width (width of terminal).
1312
+ """
1313
+ # The patterns also include whitespace after the newline
1314
+ single_newline = re.compile(r"(?<!\n)\n(?!\n)\s*")
1315
+ multiple_newlines = re.compile(r"\n{2,}\s*")
1316
+ text = single_newline.sub(' ', text)
1317
+ lines = re.split(multiple_newlines, text)
1318
+ return sum([textwrap.wrap(line, width) for line in lines], [])
1319
+
1320
+ def add_arguments(self, actions):
1321
+ actions = sorted(actions, key=lambda x: x.option_strings)
1322
+ super().add_arguments(actions)
1323
+
1324
+
1325
+ class FlexibleArgumentParser(ArgumentParser):
1326
+ """ArgumentParser that allows both underscore and dash in names."""
1327
+
1328
+ def __init__(self, *args, **kwargs):
1329
+ # Set the default 'formatter_class' to SortedHelpFormatter
1330
+ if 'formatter_class' not in kwargs:
1331
+ kwargs['formatter_class'] = SortedHelpFormatter
1332
+ super().__init__(*args, **kwargs)
1333
+
1334
+ def parse_args(self, args=None, namespace=None):
1335
+ if args is None:
1336
+ args = sys.argv[1:]
1337
+
1338
+ # Check for --model in command line arguments first
1339
+ if args and args[0] == "serve":
1340
+ model_in_cli_args = any(arg == '--model' for arg in args)
1341
+
1342
+ if model_in_cli_args:
1343
+ raise ValueError(
1344
+ "With `vllm serve`, you should provide the model as a "
1345
+ "positional argument or in a config file instead of via "
1346
+ "the `--model` option.")
1347
+
1348
+ if '--config' in args:
1349
+ args = self._pull_args_from_config(args)
1350
+
1351
+ # Convert underscores to dashes and vice versa in argument names
1352
+ processed_args = []
1353
+ for arg in args:
1354
+ if arg.startswith('--'):
1355
+ if '=' in arg:
1356
+ key, value = arg.split('=', 1)
1357
+ key = '--' + key[len('--'):].replace('_', '-')
1358
+ processed_args.append(f'{key}={value}')
1359
+ else:
1360
+ processed_args.append('--' +
1361
+ arg[len('--'):].replace('_', '-'))
1362
+ elif arg.startswith('-O') and arg != '-O' and len(arg) == 2:
1363
+ # allow -O flag to be used without space, e.g. -O3
1364
+ processed_args.append('-O')
1365
+ processed_args.append(arg[2:])
1366
+ else:
1367
+ processed_args.append(arg)
1368
+
1369
+ return super().parse_args(processed_args, namespace)
1370
+
1371
+ def check_port(self, value):
1372
+ try:
1373
+ value = int(value)
1374
+ except ValueError:
1375
+ msg = "Port must be an integer"
1376
+ raise ArgumentTypeError(msg) from None
1377
+
1378
+ if not (1024 <= value <= 65535):
1379
+ raise ArgumentTypeError("Port must be between 1024 and 65535")
1380
+
1381
+ return value
1382
+
1383
+ def _pull_args_from_config(self, args: list[str]) -> list[str]:
1384
+ """Method to pull arguments specified in the config file
1385
+ into the command-line args variable.
1386
+
1387
+ The arguments in config file will be inserted between
1388
+ the argument list.
1389
+
1390
+ example:
1391
+ ```yaml
1392
+ port: 12323
1393
+ tensor-parallel-size: 4
1394
+ ```
1395
+ ```python
1396
+ $: vllm {serve,chat,complete} "facebook/opt-12B" \
1397
+ --config config.yaml -tp 2
1398
+ $: args = [
1399
+ "serve,chat,complete",
1400
+ "facebook/opt-12B",
1401
+ '--config', 'config.yaml',
1402
+ '-tp', '2'
1403
+ ]
1404
+ $: args = [
1405
+ "serve,chat,complete",
1406
+ "facebook/opt-12B",
1407
+ '--port', '12323',
1408
+ '--tensor-parallel-size', '4',
1409
+ '-tp', '2'
1410
+ ]
1411
+ ```
1412
+
1413
+ Please note how the config args are inserted after the sub command.
1414
+ this way the order of priorities is maintained when these are args
1415
+ parsed by super().
1416
+ """
1417
+ assert args.count(
1418
+ '--config') <= 1, "More than one config file specified!"
1419
+
1420
+ index = args.index('--config')
1421
+ if index == len(args) - 1:
1422
+ raise ValueError("No config file specified! \
1423
+ Please check your command-line arguments.")
1424
+
1425
+ file_path = args[index + 1]
1426
+
1427
+ config_args = self._load_config_file(file_path)
1428
+
1429
+ # 0th index is for {serve,chat,complete}
1430
+ # optionally followed by model_tag (only for serve)
1431
+ # followed by config args
1432
+ # followed by rest of cli args.
1433
+ # maintaining this order will enforce the precedence
1434
+ # of cli > config > defaults
1435
+ if args[0] == "serve":
1436
+ model_in_cli = len(args) > 1 and not args[1].startswith('-')
1437
+ model_in_config = any(arg == '--model' for arg in config_args)
1438
+
1439
+ if not model_in_cli and not model_in_config:
1440
+ raise ValueError(
1441
+ "No model specified! Please specify model either "
1442
+ "as a positional argument or in a config file.")
1443
+
1444
+ if model_in_cli:
1445
+ # Model specified as positional arg, keep CLI version
1446
+ args = [args[0]] + [
1447
+ args[1]
1448
+ ] + config_args + args[2:index] + args[index + 2:]
1449
+ else:
1450
+ # No model in CLI, use config if available
1451
+ args = [args[0]
1452
+ ] + config_args + args[1:index] + args[index + 2:]
1453
+ else:
1454
+ args = [args[0]] + config_args + args[1:index] + args[index + 2:]
1455
+
1456
+ return args
1457
+
1458
+ def _load_config_file(self, file_path: str) -> list[str]:
1459
+ """Loads a yaml file and returns the key value pairs as a
1460
+ flattened list with argparse like pattern
1461
+ ```yaml
1462
+ port: 12323
1463
+ tensor-parallel-size: 4
1464
+ ```
1465
+ returns:
1466
+ processed_args: list[str] = [
1467
+ '--port': '12323',
1468
+ '--tensor-parallel-size': '4'
1469
+ ]
1470
+ """
1471
+ extension: str = file_path.split('.')[-1]
1472
+ if extension not in ('yaml', 'yml'):
1473
+ raise ValueError(
1474
+ "Config file must be of a yaml/yml type.\
1475
+ %s supplied", extension)
1476
+
1477
+ # only expecting a flat dictionary of atomic types
1478
+ processed_args: list[str] = []
1479
+
1480
+ config: dict[str, Union[int, str]] = {}
1481
+ try:
1482
+ with open(file_path) as config_file:
1483
+ config = yaml.safe_load(config_file)
1484
+ except Exception as ex:
1485
+ logger.error(
1486
+ "Unable to read the config file at %s. \
1487
+ Make sure path is correct", file_path)
1488
+ raise ex
1489
+
1490
+ store_boolean_arguments = [
1491
+ action.dest for action in self._actions
1492
+ if isinstance(action, StoreBoolean)
1493
+ ]
1494
+
1495
+ for key, value in config.items():
1496
+ if isinstance(value, bool) and key not in store_boolean_arguments:
1497
+ if value:
1498
+ processed_args.append('--' + key)
1499
+ else:
1500
+ processed_args.append('--' + key)
1501
+ processed_args.append(str(value))
1502
+
1503
+ return processed_args
1504
+
1505
+
1506
+ async def _run_task_with_lock(task: Callable, lock: asyncio.Lock, *args,
1507
+ **kwargs):
1508
+ """Utility function to run async task in a lock"""
1509
+ async with lock:
1510
+ return await task(*args, **kwargs)
1511
+
1512
+
1513
+ def supports_kw(
1514
+ callable: Callable[..., object],
1515
+ kw_name: str,
1516
+ *,
1517
+ requires_kw_only: bool = False,
1518
+ allow_var_kwargs: bool = True,
1519
+ ) -> bool:
1520
+ """Check if a keyword is a valid kwarg for a callable; if requires_kw_only
1521
+ disallows kwargs names that can also be positional arguments.
1522
+ """
1523
+ params = inspect.signature(callable).parameters
1524
+ if not params:
1525
+ return False
1526
+
1527
+ param_val = params.get(kw_name)
1528
+
1529
+ # Types where the it may be valid, i.e., explicitly defined & nonvariadic
1530
+ passable_kw_types = set((inspect.Parameter.POSITIONAL_ONLY,
1531
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
1532
+ inspect.Parameter.KEYWORD_ONLY))
1533
+
1534
+ if param_val:
1535
+ is_sig_param = param_val.kind in passable_kw_types
1536
+ # We want kwargs only, but this is passable as a positional arg
1537
+ if (requires_kw_only and is_sig_param
1538
+ and param_val.kind != inspect.Parameter.KEYWORD_ONLY):
1539
+ return False
1540
+ if ((requires_kw_only
1541
+ and param_val.kind == inspect.Parameter.KEYWORD_ONLY)
1542
+ or (not requires_kw_only and is_sig_param)):
1543
+ return True
1544
+
1545
+ # If we're okay with var-kwargs, it's supported as long as
1546
+ # the kw_name isn't something like *args, **kwargs
1547
+ if allow_var_kwargs:
1548
+ # Get the last param; type is ignored here because params is a proxy
1549
+ # mapping, but it wraps an ordered dict, and they appear in order.
1550
+ # Ref: https://docs.python.org/3/library/inspect.html#inspect.Signature.parameters
1551
+ last_param = params[next(reversed(params))] # type: ignore
1552
+ return (last_param.kind == inspect.Parameter.VAR_KEYWORD
1553
+ and last_param.name != kw_name)
1554
+ return False
1555
+
1556
+
1557
+ def resolve_mm_processor_kwargs(
1558
+ init_kwargs: Optional[Mapping[str, object]],
1559
+ inference_kwargs: Optional[Mapping[str, object]],
1560
+ callable: Callable[..., object],
1561
+ *,
1562
+ requires_kw_only: bool = True,
1563
+ allow_var_kwargs: bool = False,
1564
+ ) -> dict[str, Any]:
1565
+ """Applies filtering to eliminate invalid mm_processor_kwargs, i.e.,
1566
+ those who are not explicit keywords to the given callable (of one is
1567
+ given; otherwise no filtering is done), then merges the kwarg dicts,
1568
+ giving priority to inference_kwargs if there are any collisions.
1569
+
1570
+ In the case that no kwarg overrides are provided, returns an empty
1571
+ dict so that it can still be kwarg expanded into the callable later on.
1572
+
1573
+ If allow_var_kwargs=True, allows for things that can be expanded into
1574
+ kwargs as long as they aren't naming collision for var_kwargs or potential
1575
+ positional arguments.
1576
+ """
1577
+ # Filter inference time multimodal processor kwargs provided
1578
+ runtime_mm_kwargs = get_allowed_kwarg_only_overrides(
1579
+ callable,
1580
+ overrides=inference_kwargs,
1581
+ requires_kw_only=requires_kw_only,
1582
+ allow_var_kwargs=allow_var_kwargs,
1583
+ )
1584
+
1585
+ # Filter init time multimodal processor kwargs provided
1586
+ init_mm_kwargs = get_allowed_kwarg_only_overrides(
1587
+ callable,
1588
+ overrides=init_kwargs,
1589
+ requires_kw_only=requires_kw_only,
1590
+ allow_var_kwargs=allow_var_kwargs,
1591
+ )
1592
+
1593
+ # Merge the final processor kwargs, prioritizing inference
1594
+ # time values over the initialization time values.
1595
+ mm_processor_kwargs = {**init_mm_kwargs, **runtime_mm_kwargs}
1596
+ return mm_processor_kwargs
1597
+
1598
+
1599
+ def get_allowed_kwarg_only_overrides(
1600
+ callable: Callable[..., object],
1601
+ overrides: Optional[Mapping[str, object]],
1602
+ *,
1603
+ requires_kw_only: bool = True,
1604
+ allow_var_kwargs: bool = False,
1605
+ ) -> dict[str, Any]:
1606
+ """
1607
+ Given a callable which has one or more keyword only params and a dict
1608
+ mapping param names to values, drop values that can be not be kwarg
1609
+ expanded to overwrite one or more keyword-only args. This is used in a
1610
+ few places to handle custom processor overrides for multimodal models,
1611
+ e.g., for profiling when processor options provided by the user
1612
+ may affect the number of mm tokens per instance.
1613
+
1614
+ Args:
1615
+ callable: Callable which takes 0 or more keyword only arguments.
1616
+ If None is provided, all overrides names are allowed.
1617
+ overrides: Potential overrides to be used when invoking the callable.
1618
+ allow_var_kwargs: Allows overrides that are expandable for var kwargs.
1619
+
1620
+ Returns:
1621
+ Dictionary containing the kwargs to be leveraged which may be used
1622
+ to overwrite one or more keyword only arguments when invoking the
1623
+ callable.
1624
+ """
1625
+ if not overrides:
1626
+ return {}
1627
+
1628
+ # Drop any mm_processor_kwargs provided by the user that
1629
+ # are not kwargs, unless it can fit it var_kwargs param
1630
+ filtered_overrides = {
1631
+ kwarg_name: val
1632
+ for kwarg_name, val in overrides.items()
1633
+ if supports_kw(callable,
1634
+ kwarg_name,
1635
+ requires_kw_only=requires_kw_only,
1636
+ allow_var_kwargs=allow_var_kwargs)
1637
+ }
1638
+
1639
+ # If anything is dropped, log a warning
1640
+ dropped_keys = overrides.keys() - filtered_overrides.keys()
1641
+ if dropped_keys:
1642
+ if requires_kw_only:
1643
+ logger.warning(
1644
+ "The following intended overrides are not keyword-only args "
1645
+ "and will be dropped: %s", dropped_keys)
1646
+ else:
1647
+ logger.warning(
1648
+ "The following intended overrides are not keyword args "
1649
+ "and will be dropped: %s", dropped_keys)
1650
+
1651
+ return filtered_overrides
1652
+
1653
+
1654
+ # Using dynamo with vLLM doesn't really work well with PyTorch versions < 2.4.0.
1655
+ # In particular, the FakeScalarType is not supported for earlier versions of
1656
+ # PyTorch which breaks dynamo for any ops registered using ScalarType.
1657
+ def supports_dynamo() -> bool:
1658
+ base_torch_version = Version(Version(torch.__version__).base_version)
1659
+ return base_torch_version >= Version("2.4.0")
1660
+
1661
+
1662
+ # Some backends use pytorch version < 2.4.0 which doesn't
1663
+ # support `torch.library.custom_op`.
1664
+ def supports_custom_op() -> bool:
1665
+ return hasattr(torch.library, "custom_op")
1666
+
1667
+
1668
+ class AtomicCounter:
1669
+ """An atomic, thread-safe counter"""
1670
+
1671
+ def __init__(self, initial=0):
1672
+ """Initialize a new atomic counter to given initial value"""
1673
+ self._value = initial
1674
+ self._lock = threading.Lock()
1675
+
1676
+ def inc(self, num=1):
1677
+ """Atomically increment the counter by num and return the new value"""
1678
+ with self._lock:
1679
+ self._value += num
1680
+ return self._value
1681
+
1682
+ def dec(self, num=1):
1683
+ """Atomically decrement the counter by num and return the new value"""
1684
+ with self._lock:
1685
+ self._value -= num
1686
+ return self._value
1687
+
1688
+ @property
1689
+ def value(self):
1690
+ return self._value
1691
+
1692
+
1693
+ # Adapted from: https://stackoverflow.com/a/47212782/5082708
1694
+ class LazyDict(Mapping[str, T], Generic[T]):
1695
+
1696
+ def __init__(self, factory: dict[str, Callable[[], T]]):
1697
+ self._factory = factory
1698
+ self._dict: dict[str, T] = {}
1699
+
1700
+ def __getitem__(self, key: str) -> T:
1701
+ if key not in self._dict:
1702
+ if key not in self._factory:
1703
+ raise KeyError(key)
1704
+ self._dict[key] = self._factory[key]()
1705
+ return self._dict[key]
1706
+
1707
+ def __setitem__(self, key: str, value: Callable[[], T]):
1708
+ self._factory[key] = value
1709
+
1710
+ def __iter__(self):
1711
+ return iter(self._factory)
1712
+
1713
+ def __len__(self):
1714
+ return len(self._factory)
1715
+
1716
+
1717
+ class ClassRegistry(UserDict[Type[T], _V]):
1718
+
1719
+ def __getitem__(self, key: Type[T]) -> _V:
1720
+ for cls in key.mro():
1721
+ if cls in self.data:
1722
+ return self.data[cls]
1723
+
1724
+ raise KeyError(key)
1725
+
1726
+ def __contains__(self, key: object) -> bool:
1727
+ return self.contains(key)
1728
+
1729
+ def contains(self, key: object, *, strict: bool = False) -> bool:
1730
+ if not isinstance(key, type):
1731
+ return False
1732
+
1733
+ if strict:
1734
+ return key in self.data
1735
+
1736
+ return any(cls in self.data for cls in key.mro())
1737
+
1738
+
1739
+ def weak_ref_tensor(tensor: Any) -> Any:
1740
+ """
1741
+ Create a weak reference to a tensor.
1742
+ The new tensor will share the same data as the original tensor,
1743
+ but will not keep the original tensor alive.
1744
+ """
1745
+ if isinstance(tensor, torch.Tensor):
1746
+ return torch.ops._C.weak_ref_tensor(tensor)
1747
+ else:
1748
+ return tensor
1749
+
1750
+
1751
+ def weak_ref_tensors(
1752
+ tensors: Union[torch.Tensor, list[torch.Tensor], tuple[torch.Tensor]]
1753
+ ) -> Union[torch.Tensor, list[Any], tuple[Any], Any]:
1754
+ """
1755
+ Convenience function to create weak references to tensors,
1756
+ for single tensor, list of tensors or tuple of tensors.
1757
+ """
1758
+ if isinstance(tensors, torch.Tensor):
1759
+ return weak_ref_tensor(tensors)
1760
+ if isinstance(tensors, list):
1761
+ return [weak_ref_tensor(t) for t in tensors]
1762
+ if isinstance(tensors, tuple):
1763
+ return tuple(weak_ref_tensor(t) for t in tensors)
1764
+ raise ValueError("Invalid type for tensors")
1765
+
1766
+
1767
+ def get_cuda_view_from_cpu_tensor(cpu_tensor: torch.Tensor) -> torch.Tensor:
1768
+ """
1769
+ Get a CUDA view of a CPU tensor using Unified Virtual Addressing (UVA).
1770
+ """
1771
+ assert cpu_tensor.is_pinned(), "CPU tensor must be pinned"
1772
+ return torch.ops._C.get_cuda_view_from_cpu_tensor(cpu_tensor)
1773
+
1774
+
1775
+ def is_in_doc_build() -> bool:
1776
+ try:
1777
+ from sphinx.ext.autodoc.mock import _MockModule
1778
+ return isinstance(torch, _MockModule)
1779
+ except ModuleNotFoundError:
1780
+ return False
1781
+
1782
+
1783
+ def import_from_path(module_name: str, file_path: Union[str, os.PathLike]):
1784
+ """
1785
+ Import a Python file according to its file path.
1786
+
1787
+ Based on the official recipe:
1788
+ https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
1789
+ """
1790
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
1791
+ if spec is None:
1792
+ raise ModuleNotFoundError(f"No module named '{module_name}'")
1793
+
1794
+ assert spec.loader is not None
1795
+
1796
+ module = importlib.util.module_from_spec(spec)
1797
+ sys.modules[module_name] = module
1798
+ spec.loader.exec_module(module)
1799
+ return module
1800
+
1801
+
1802
+ @cache
1803
+ def get_vllm_optional_dependencies():
1804
+ metadata = importlib.metadata.metadata("vllm")
1805
+ requirements = metadata.get_all("Requires-Dist", [])
1806
+ extras = metadata.get_all("Provides-Extra", [])
1807
+
1808
+ return {
1809
+ extra: [
1810
+ re.split(r";|>=|<=|==", req)[0] for req in requirements
1811
+ if req.endswith(f'extra == "{extra}"')
1812
+ ]
1813
+ for extra in extras
1814
+ }
1815
+
1816
+
1817
+ class _PlaceholderBase:
1818
+ """
1819
+ Disallows downstream usage of placeholder modules.
1820
+
1821
+ We need to explicitly override each dunder method because
1822
+ :meth:`__getattr__` is not called when they are accessed.
1823
+
1824
+ See also:
1825
+ [Special method lookup](https://docs.python.org/3/reference/datamodel.html#special-lookup)
1826
+ """
1827
+
1828
+ def __getattr__(self, key: str) -> Never:
1829
+ """
1830
+ The main class should implement this to throw an error
1831
+ for attribute accesses representing downstream usage.
1832
+ """
1833
+ raise NotImplementedError
1834
+
1835
+ # [Basic customization]
1836
+
1837
+ def __lt__(self, other: object):
1838
+ return self.__getattr__("__lt__")
1839
+
1840
+ def __le__(self, other: object):
1841
+ return self.__getattr__("__le__")
1842
+
1843
+ def __eq__(self, other: object):
1844
+ return self.__getattr__("__eq__")
1845
+
1846
+ def __ne__(self, other: object):
1847
+ return self.__getattr__("__ne__")
1848
+
1849
+ def __gt__(self, other: object):
1850
+ return self.__getattr__("__gt__")
1851
+
1852
+ def __ge__(self, other: object):
1853
+ return self.__getattr__("__ge__")
1854
+
1855
+ def __hash__(self):
1856
+ return self.__getattr__("__hash__")
1857
+
1858
+ def __bool__(self):
1859
+ return self.__getattr__("__bool__")
1860
+
1861
+ # [Callable objects]
1862
+
1863
+ def __call__(self, *args: object, **kwargs: object):
1864
+ return self.__getattr__("__call__")
1865
+
1866
+ # [Container types]
1867
+
1868
+ def __len__(self):
1869
+ return self.__getattr__("__len__")
1870
+
1871
+ def __getitem__(self, key: object):
1872
+ return self.__getattr__("__getitem__")
1873
+
1874
+ def __setitem__(self, key: object, value: object):
1875
+ return self.__getattr__("__setitem__")
1876
+
1877
+ def __delitem__(self, key: object):
1878
+ return self.__getattr__("__delitem__")
1879
+
1880
+ # __missing__ is optional according to __getitem__ specification,
1881
+ # so it is skipped
1882
+
1883
+ # __iter__ and __reversed__ have a default implementation
1884
+ # based on __len__ and __getitem__, so they are skipped.
1885
+
1886
+ # [Numeric Types]
1887
+
1888
+ def __add__(self, other: object):
1889
+ return self.__getattr__("__add__")
1890
+
1891
+ def __sub__(self, other: object):
1892
+ return self.__getattr__("__sub__")
1893
+
1894
+ def __mul__(self, other: object):
1895
+ return self.__getattr__("__mul__")
1896
+
1897
+ def __matmul__(self, other: object):
1898
+ return self.__getattr__("__matmul__")
1899
+
1900
+ def __truediv__(self, other: object):
1901
+ return self.__getattr__("__truediv__")
1902
+
1903
+ def __floordiv__(self, other: object):
1904
+ return self.__getattr__("__floordiv__")
1905
+
1906
+ def __mod__(self, other: object):
1907
+ return self.__getattr__("__mod__")
1908
+
1909
+ def __divmod__(self, other: object):
1910
+ return self.__getattr__("__divmod__")
1911
+
1912
+ def __pow__(self, other: object, modulo: object = ...):
1913
+ return self.__getattr__("__pow__")
1914
+
1915
+ def __lshift__(self, other: object):
1916
+ return self.__getattr__("__lshift__")
1917
+
1918
+ def __rshift__(self, other: object):
1919
+ return self.__getattr__("__rshift__")
1920
+
1921
+ def __and__(self, other: object):
1922
+ return self.__getattr__("__and__")
1923
+
1924
+ def __xor__(self, other: object):
1925
+ return self.__getattr__("__xor__")
1926
+
1927
+ def __or__(self, other: object):
1928
+ return self.__getattr__("__or__")
1929
+
1930
+ # r* and i* methods have lower priority than
1931
+ # the methods for left operand so they are skipped
1932
+
1933
+ def __neg__(self):
1934
+ return self.__getattr__("__neg__")
1935
+
1936
+ def __pos__(self):
1937
+ return self.__getattr__("__pos__")
1938
+
1939
+ def __abs__(self):
1940
+ return self.__getattr__("__abs__")
1941
+
1942
+ def __invert__(self):
1943
+ return self.__getattr__("__invert__")
1944
+
1945
+ # __complex__, __int__ and __float__ have a default implementation
1946
+ # based on __index__, so they are skipped.
1947
+
1948
+ def __index__(self):
1949
+ return self.__getattr__("__index__")
1950
+
1951
+ def __round__(self, ndigits: object = ...):
1952
+ return self.__getattr__("__round__")
1953
+
1954
+ def __trunc__(self):
1955
+ return self.__getattr__("__trunc__")
1956
+
1957
+ def __floor__(self):
1958
+ return self.__getattr__("__floor__")
1959
+
1960
+ def __ceil__(self):
1961
+ return self.__getattr__("__ceil__")
1962
+
1963
+ # [Context managers]
1964
+
1965
+ def __enter__(self):
1966
+ return self.__getattr__("__enter__")
1967
+
1968
+ def __exit__(self, *args: object, **kwargs: object):
1969
+ return self.__getattr__("__exit__")
1970
+
1971
+
1972
+ class PlaceholderModule(_PlaceholderBase):
1973
+ """
1974
+ A placeholder object to use when a module does not exist.
1975
+
1976
+ This enables more informative errors when trying to access attributes
1977
+ of a module that does not exists.
1978
+ """
1979
+
1980
+ def __init__(self, name: str) -> None:
1981
+ super().__init__()
1982
+
1983
+ # Apply name mangling to avoid conflicting with module attributes
1984
+ self.__name = name
1985
+
1986
+ def placeholder_attr(self, attr_path: str):
1987
+ return _PlaceholderModuleAttr(self, attr_path)
1988
+
1989
+ def __getattr__(self, key: str):
1990
+ name = self.__name
1991
+
1992
+ try:
1993
+ importlib.import_module(name)
1994
+ except ImportError as exc:
1995
+ for extra, names in get_vllm_optional_dependencies().items():
1996
+ if name in names:
1997
+ msg = f"Please install vllm[{extra}] for {extra} support"
1998
+ raise ImportError(msg) from exc
1999
+
2000
+ raise exc
2001
+
2002
+ raise AssertionError("PlaceholderModule should not be used "
2003
+ "when the original module can be imported")
2004
+
2005
+
2006
+ class _PlaceholderModuleAttr(_PlaceholderBase):
2007
+
2008
+ def __init__(self, module: PlaceholderModule, attr_path: str) -> None:
2009
+ super().__init__()
2010
+
2011
+ # Apply name mangling to avoid conflicting with module attributes
2012
+ self.__module = module
2013
+ self.__attr_path = attr_path
2014
+
2015
+ def placeholder_attr(self, attr_path: str):
2016
+ return _PlaceholderModuleAttr(self.__module,
2017
+ f"{self.__attr_path}.{attr_path}")
2018
+
2019
+ def __getattr__(self, key: str):
2020
+ getattr(self.__module, f"{self.__attr_path}.{key}")
2021
+
2022
+ raise AssertionError("PlaceholderModule should not be used "
2023
+ "when the original module can be imported")
2024
+
2025
+
2026
+ # create a library to hold the custom op
2027
+ vllm_lib = Library("vllm", "FRAGMENT") # noqa
2028
+
2029
+
2030
+ def direct_register_custom_op(
2031
+ op_name: str,
2032
+ op_func: Callable,
2033
+ mutates_args: list[str],
2034
+ fake_impl: Optional[Callable] = None,
2035
+ target_lib: Optional[Library] = None,
2036
+ dispatch_key: str = "CUDA",
2037
+ tags: Tuple[torch.Tag, ...] = (),
2038
+ ):
2039
+ """
2040
+ `torch.library.custom_op` can have significant overhead because it
2041
+ needs to consider complicated dispatching logic. This function
2042
+ directly registers a custom op and dispatches it to the CUDA backend.
2043
+ See https://gist.github.com/youkaichao/ecbea9ec9fc79a45d2adce1784d7a9a5
2044
+ for more details.
2045
+
2046
+ By default, the custom op is registered to the vLLM library. If you
2047
+ want to register it to a different library, you can pass the library
2048
+ object to the `target_lib` argument.
2049
+
2050
+ IMPORTANT: the lifetime of the operator is tied to the lifetime of the
2051
+ library object. If you want to bind the operator to a different library,
2052
+ make sure the library object is alive when the operator is used.
2053
+ """
2054
+ if is_in_doc_build():
2055
+ return
2056
+
2057
+ if not supports_custom_op():
2058
+ from vllm.platforms import current_platform
2059
+ assert not current_platform.is_cuda_alike(), (
2060
+ "cuda platform needs torch>=2.4 to support custom op, "
2061
+ "chances are you are using an old version of pytorch "
2062
+ "or a custom build of pytorch. It is recommended to "
2063
+ "use vLLM in a fresh new environment and let it install "
2064
+ "the required dependencies.")
2065
+ return
2066
+
2067
+ import torch.library
2068
+ if hasattr(torch.library, "infer_schema"):
2069
+ schema_str = torch.library.infer_schema(op_func,
2070
+ mutates_args=mutates_args)
2071
+ else:
2072
+ # for pytorch 2.4
2073
+ import torch._custom_op.impl
2074
+ schema_str = torch._custom_op.impl.infer_schema(op_func, mutates_args)
2075
+ my_lib = target_lib or vllm_lib
2076
+ my_lib.define(op_name + schema_str, tags=tags)
2077
+ my_lib.impl(op_name, op_func, dispatch_key=dispatch_key)
2078
+ if fake_impl is not None:
2079
+ my_lib._register_fake(op_name, fake_impl)
2080
+
2081
+
2082
+ def resolve_obj_by_qualname(qualname: str) -> Any:
2083
+ """
2084
+ Resolve an object by its fully qualified name.
2085
+ """
2086
+ module_name, obj_name = qualname.rsplit(".", 1)
2087
+ module = importlib.import_module(module_name)
2088
+ return getattr(module, obj_name)
2089
+
2090
+
2091
+ def kill_process_tree(pid: int):
2092
+ """
2093
+ Kills all descendant processes of the given pid by sending SIGKILL.
2094
+
2095
+ Args:
2096
+ pid (int): Process ID of the parent process
2097
+ """
2098
+ try:
2099
+ parent = psutil.Process(pid)
2100
+ except psutil.NoSuchProcess:
2101
+ return
2102
+
2103
+ # Get all children recursively
2104
+ children = parent.children(recursive=True)
2105
+
2106
+ # Send SIGKILL to all children first
2107
+ for child in children:
2108
+ with contextlib.suppress(ProcessLookupError):
2109
+ os.kill(child.pid, signal.SIGKILL)
2110
+
2111
+ # Finally kill the parent
2112
+ with contextlib.suppress(ProcessLookupError):
2113
+ os.kill(pid, signal.SIGKILL)
2114
+
2115
+
2116
+ @dataclass
2117
+ class MemorySnapshot:
2118
+ """Memory snapshot."""
2119
+ torch_peak: int = 0
2120
+ cuda_memory: int = 0
2121
+ torch_memory: int = 0
2122
+ non_torch_memory: int = 0
2123
+ timestamp: float = 0.0
2124
+ auto_measure: bool = True
2125
+
2126
+ def __post_init__(self):
2127
+ if self.auto_measure:
2128
+ self.measure()
2129
+
2130
+ def measure(self):
2131
+ # we measure the torch peak memory usage via allocated_bytes,
2132
+ # rather than `torch.cuda.memory_reserved()` .
2133
+ # After `torch.cuda.reset_peak_memory_stats()`,
2134
+ # `torch.cuda.memory_reserved()` will keep growing, and only shrink
2135
+ # when we call `torch.cuda.empty_cache()` or OOM happens.
2136
+ self.torch_peak = torch.cuda.memory_stats().get(
2137
+ "allocated_bytes.all.peak", 0)
2138
+
2139
+ self.cuda_memory = torch.cuda.mem_get_info(
2140
+ )[1] - torch.cuda.mem_get_info()[0]
2141
+
2142
+ # torch.cuda.memory_reserved() is how many bytes
2143
+ # PyTorch gets from cuda (by calling cudaMalloc, etc.)
2144
+ # this is used to measure the non-torch memory usage
2145
+ self.torch_memory = torch.cuda.memory_reserved()
2146
+
2147
+ self.non_torch_memory = self.cuda_memory - self.torch_memory
2148
+ self.timestamp = time.time()
2149
+
2150
+ def __sub__(self, other: MemorySnapshot) -> MemorySnapshot:
2151
+ return MemorySnapshot(
2152
+ torch_peak=self.torch_peak - other.torch_peak,
2153
+ cuda_memory=self.cuda_memory - other.cuda_memory,
2154
+ torch_memory=self.torch_memory - other.torch_memory,
2155
+ non_torch_memory=self.non_torch_memory - other.non_torch_memory,
2156
+ timestamp=self.timestamp - other.timestamp,
2157
+ auto_measure=False,
2158
+ )
2159
+
2160
+
2161
+ @dataclass
2162
+ class MemoryProfilingResult:
2163
+ """Memory profiling result. All numbers are in bytes.
2164
+ """
2165
+ non_kv_cache_memory: int = 0
2166
+ torch_peak_increase: int = 0
2167
+ non_torch_increase: int = 0
2168
+ weights_memory: float = 0
2169
+ before_create: MemorySnapshot = field(default_factory=MemorySnapshot)
2170
+ before_profile: MemorySnapshot = field(default_factory=MemorySnapshot)
2171
+ after_profile: MemorySnapshot = field(default_factory=MemorySnapshot)
2172
+ profile_time: float = 0.0
2173
+
2174
+
2175
+ @contextlib.contextmanager
2176
+ def memory_profiling(
2177
+ baseline_snapshot: MemorySnapshot,
2178
+ weights_memory: int) -> Generator[MemoryProfilingResult, None, None]:
2179
+ """Memory profiling context manager.
2180
+ baseline_snapshot: the memory snapshot before the current vLLM instance.
2181
+ weights_memory: memory used by PyTorch when loading the model weights.
2182
+ Note that, before loading the model weights, we also initialize the device
2183
+ and distributed environment, which may consume some memory. This part is not
2184
+ included in the weights_memory because PyTorch does not control it.
2185
+
2186
+ The memory in one GPU can be classified into 3 categories:
2187
+ 1. memory used by anything other than the current vLLM instance.
2188
+ 2. memory used by torch in the current vLLM instance.
2189
+ 3. memory used in the current vLLM instance, but not by torch.
2190
+
2191
+ A quantitive example:
2192
+
2193
+ Before creating the current vLLM instance:
2194
+ category 1: 1 GiB
2195
+ category 2: 0 GiB
2196
+ category 3: 0 GiB
2197
+
2198
+ After creating the current vLLM instance and loading the model,
2199
+ (i.e. before profiling):
2200
+ category 1: 1 GiB
2201
+ category 2: 2 GiB (model weights take 2 GiB)
2202
+ category 3: 0.5 GiB (memory used by NCCL)
2203
+
2204
+ During profiling (peak):
2205
+ category 1: 1 GiB
2206
+ category 2: 4 GiB (peak activation tensors take 2 GiB)
2207
+ category 3: 1 GiB (memory used by NCCL + buffers for some attention backends)
2208
+
2209
+ After profiling:
2210
+ category 1: 1 GiB
2211
+ category 2: 3 GiB (after garbage-collecting activation tensors)
2212
+ category 3: 1 GiB (memory used by NCCL + buffers for some attention backends)
2213
+
2214
+ In this case, non-kv cache takes 5 GiB in total, including:
2215
+ a. 2 GiB used by the model weights (category 2)
2216
+ b. 2 GiB reserved for the peak activation tensors (category 2)
2217
+ c. 1 GiB used by non-torch components (category 3)
2218
+
2219
+ The memory used for loading weights (a.) is directly given from the argument `weights_memory`.
2220
+
2221
+ The increase of `torch.cuda.memory_stats()["allocated_bytes.all.peak"]` during profiling gives (b.).
2222
+
2223
+ The increase of `non_torch_memory` from creating the current vLLM instance until after profiling to get (c.).
2224
+ """ # noqa
2225
+ gc.collect()
2226
+ torch.cuda.empty_cache()
2227
+ torch.cuda.reset_peak_memory_stats()
2228
+
2229
+ result = MemoryProfilingResult()
2230
+
2231
+ result.before_create = baseline_snapshot
2232
+ # the part of memory used for holding the model weights
2233
+ result.weights_memory = weights_memory
2234
+
2235
+ result.before_profile.measure()
2236
+
2237
+ yield result
2238
+
2239
+ gc.collect()
2240
+ torch.cuda.empty_cache()
2241
+
2242
+ result.after_profile.measure()
2243
+
2244
+ diff_profile = result.after_profile - result.before_profile
2245
+ diff_from_create = result.after_profile - result.before_create
2246
+ result.torch_peak_increase = diff_profile.torch_peak
2247
+ result.non_torch_increase = diff_from_create.non_torch_memory
2248
+ result.profile_time = diff_profile.timestamp
2249
+ result.non_kv_cache_memory = result.non_torch_increase + result.torch_peak_increase + result.weights_memory # noqa
2250
+
2251
+
2252
+ # Adapted from: https://github.com/sgl-project/sglang/blob/v0.4.1/python/sglang/srt/utils.py#L630 # noqa: E501
2253
+ def set_ulimit(target_soft_limit=65535):
2254
+ if sys.platform.startswith('win'):
2255
+ logger.info("Windows detected, skipping ulimit adjustment.")
2256
+ return
2257
+
2258
+ import resource
2259
+ resource_type = resource.RLIMIT_NOFILE
2260
+ current_soft, current_hard = resource.getrlimit(resource_type)
2261
+
2262
+ if current_soft < target_soft_limit:
2263
+ try:
2264
+ resource.setrlimit(resource_type,
2265
+ (target_soft_limit, current_hard))
2266
+ except ValueError as e:
2267
+ logger.warning(
2268
+ "Found ulimit of %s and failed to automatically increase "
2269
+ "with error %s. This can cause fd limit errors like "
2270
+ "`OSError: [Errno 24] Too many open files`. Consider "
2271
+ "increasing with ulimit -n", current_soft, e)
2272
+
2273
+
2274
+ # Adapted from: https://github.com/sgl-project/sglang/blob/v0.4.1/python/sglang/utils.py#L28 # noqa: E501
2275
+ def get_exception_traceback():
2276
+ etype, value, tb = sys.exc_info()
2277
+ err_str = "".join(traceback.format_exception(etype, value, tb))
2278
+ return err_str
2279
+
2280
+
2281
+ # Adapted from: https://github.com/sgl-project/sglang/blob/v0.4.1/python/sglang/srt/utils.py#L783 # noqa: E501
2282
+ def make_zmq_socket(
2283
+ ctx: Union[zmq.asyncio.Context, zmq.Context], # type: ignore[name-defined]
2284
+ path: str,
2285
+ socket_type: Any,
2286
+ bind: Optional[bool] = None,
2287
+ identity: Optional[bytes] = None,
2288
+ ) -> Union[zmq.Socket, zmq.asyncio.Socket]: # type: ignore[name-defined]
2289
+ """Make a ZMQ socket with the proper bind/connect semantics."""
2290
+
2291
+ mem = psutil.virtual_memory()
2292
+ socket = ctx.socket(socket_type)
2293
+
2294
+ # Calculate buffer size based on system memory
2295
+ total_mem = mem.total / 1024**3
2296
+ available_mem = mem.available / 1024**3
2297
+ # For systems with substantial memory (>32GB total, >16GB available):
2298
+ # - Set a large 0.5GB buffer to improve throughput
2299
+ # For systems with less memory:
2300
+ # - Use system default (-1) to avoid excessive memory consumption
2301
+ if total_mem > 32 and available_mem > 16:
2302
+ buf_size = int(0.5 * 1024**3) # 0.5GB in bytes
2303
+ else:
2304
+ buf_size = -1 # Use system default buffer size
2305
+
2306
+ if bind is None:
2307
+ bind = socket_type != zmq.PUSH
2308
+
2309
+ if socket_type in (zmq.PULL, zmq.DEALER, zmq.ROUTER):
2310
+ socket.setsockopt(zmq.RCVHWM, 0)
2311
+ socket.setsockopt(zmq.RCVBUF, buf_size)
2312
+
2313
+ if socket_type in (zmq.PUSH, zmq.DEALER, zmq.ROUTER):
2314
+ socket.setsockopt(zmq.SNDHWM, 0)
2315
+ socket.setsockopt(zmq.SNDBUF, buf_size)
2316
+
2317
+ if identity is not None:
2318
+ socket.setsockopt(zmq.IDENTITY, identity)
2319
+
2320
+ if bind:
2321
+ socket.bind(path)
2322
+ else:
2323
+ socket.connect(path)
2324
+
2325
+ return socket
2326
+
2327
+
2328
+ @contextlib.contextmanager
2329
+ def zmq_socket_ctx(
2330
+ path: str,
2331
+ socket_type: Any,
2332
+ bind: Optional[bool] = None,
2333
+ linger: int = 0,
2334
+ identity: Optional[bytes] = None,
2335
+ ) -> Iterator[zmq.Socket]:
2336
+ """Context manager for a ZMQ socket"""
2337
+
2338
+ ctx = zmq.Context() # type: ignore[attr-defined]
2339
+ try:
2340
+ yield make_zmq_socket(ctx,
2341
+ path,
2342
+ socket_type,
2343
+ bind=bind,
2344
+ identity=identity)
2345
+ except KeyboardInterrupt:
2346
+ logger.debug("Got Keyboard Interrupt.")
2347
+
2348
+ finally:
2349
+ ctx.destroy(linger=linger)
2350
+
2351
+
2352
+ def is_in_ray_actor():
2353
+ """Check if we are in a Ray actor."""
2354
+
2355
+ try:
2356
+ import ray
2357
+ return (ray.is_initialized()
2358
+ and ray.get_runtime_context().get_actor_id() is not None)
2359
+ except ImportError:
2360
+ return False
2361
+
2362
+
2363
+ def _maybe_force_spawn():
2364
+ """Check if we need to force the use of the `spawn` multiprocessing start
2365
+ method.
2366
+ """
2367
+ if os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") == "spawn":
2368
+ return
2369
+
2370
+ reason = None
2371
+ if cuda_is_initialized():
2372
+ reason = "CUDA is initialized"
2373
+ elif is_in_ray_actor():
2374
+ # even if we choose to spawn, we need to pass the ray address
2375
+ # to the subprocess so that it knows how to connect to the ray cluster.
2376
+ # env vars are inherited by subprocesses, even if we use spawn.
2377
+ import ray
2378
+ os.environ["RAY_ADDRESS"] = ray.get_runtime_context().gcs_address
2379
+ reason = "In a Ray actor and can only be spawned"
2380
+
2381
+ if reason is not None:
2382
+ logger.warning(
2383
+ "We must use the `spawn` multiprocessing start method. "
2384
+ "Overriding VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. "
2385
+ "See https://docs.vllm.ai/en/latest/getting_started/"
2386
+ "troubleshooting.html#python-multiprocessing "
2387
+ "for more information. Reason: %s", reason)
2388
+ os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
2389
+
2390
+
2391
+ def get_mp_context():
2392
+ """Get a multiprocessing context with a particular method (spawn or fork).
2393
+ By default we follow the value of the VLLM_WORKER_MULTIPROC_METHOD to
2394
+ determine the multiprocessing method (default is fork). However, under
2395
+ certain conditions, we may enforce spawn and override the value of
2396
+ VLLM_WORKER_MULTIPROC_METHOD.
2397
+ """
2398
+ _maybe_force_spawn()
2399
+ mp_method = envs.VLLM_WORKER_MULTIPROC_METHOD
2400
+ return multiprocessing.get_context(mp_method)
2401
+
2402
+
2403
+ def bind_kv_cache(
2404
+ ctx: dict[str, Any],
2405
+ kv_cache: list[list[torch.Tensor]], # [virtual_engine][layer_index]
2406
+ ) -> None:
2407
+ # Bind the kv_cache tensor to Attention modules, similar to
2408
+ # ctx[layer_name].kv_cache[ve]=kv_cache[ve][extract_layer_index(layer_name)]
2409
+ # Special things handled here:
2410
+ # 1. Some models have non-attention layers, e.g., Jamba
2411
+ # 2. Pipeline parallelism, each rank only has a subset of layers
2412
+ # 3. Encoder attention has no kv cache
2413
+ # 4. Encoder-decoder models, encoder-decoder attention and decoder-only
2414
+ # attention of the same layer (e.g., bart's decoder.layers.1.self_attn
2415
+ # and decoder.layers.1.encoder_attn) is mapped to the same kv cache
2416
+ # tensor
2417
+ from vllm.attention import AttentionType
2418
+ from vllm.model_executor.models.utils import extract_layer_index
2419
+ layer_need_kv_cache = [
2420
+ layer_name for layer_name in ctx
2421
+ if (hasattr(ctx[layer_name], 'attn_type') and ctx[layer_name].attn_type
2422
+ in (AttentionType.DECODER, AttentionType.ENCODER_DECODER))
2423
+ ]
2424
+ layer_index_sorted = sorted(
2425
+ set(
2426
+ extract_layer_index(layer_name)
2427
+ for layer_name in layer_need_kv_cache))
2428
+ for layer_name in layer_need_kv_cache:
2429
+ kv_cache_idx = layer_index_sorted.index(
2430
+ extract_layer_index(layer_name))
2431
+ forward_ctx = ctx[layer_name]
2432
+ assert len(forward_ctx.kv_cache) == len(kv_cache)
2433
+ for ve, ve_kv_cache in enumerate(kv_cache):
2434
+ forward_ctx.kv_cache[ve] = ve_kv_cache[kv_cache_idx]
2435
+
2436
+
2437
+ def run_method(obj: Any, method: Union[str, bytes, Callable], args: tuple[Any],
2438
+ kwargs: dict[str, Any]) -> Any:
2439
+ """
2440
+ Run a method of an object with the given arguments and keyword arguments.
2441
+ If the method is string, it will be converted to a method using getattr.
2442
+ If the method is serialized bytes and will be deserialized using
2443
+ cloudpickle.
2444
+ If the method is a callable, it will be called directly.
2445
+ """
2446
+ if isinstance(method, bytes):
2447
+ func = partial(cloudpickle.loads(method), obj)
2448
+ elif isinstance(method, str):
2449
+ try:
2450
+ func = getattr(obj, method)
2451
+ except AttributeError:
2452
+ raise NotImplementedError(f"Method {method!r} is not"
2453
+ " implemented.") from None
2454
+ else:
2455
+ func = partial(method, obj) # type: ignore
2456
+ return func(*args, **kwargs)
2457
+
2458
+
2459
+ def import_pynvml():
2460
+ """
2461
+ Historical comments:
2462
+
2463
+ libnvml.so is the library behind nvidia-smi, and
2464
+ pynvml is a Python wrapper around it. We use it to get GPU
2465
+ status without initializing CUDA context in the current process.
2466
+ Historically, there are two packages that provide pynvml:
2467
+ - `nvidia-ml-py` (https://pypi.org/project/nvidia-ml-py/): The official
2468
+ wrapper. It is a dependency of vLLM, and is installed when users
2469
+ install vLLM. It provides a Python module named `pynvml`.
2470
+ - `pynvml` (https://pypi.org/project/pynvml/): An unofficial wrapper.
2471
+ Prior to version 12.0, it also provides a Python module `pynvml`,
2472
+ and therefore conflicts with the official one. What's worse,
2473
+ the module is a Python package, and has higher priority than
2474
+ the official one which is a standalone Python file.
2475
+ This causes errors when both of them are installed.
2476
+ Starting from version 12.0, it migrates to a new module
2477
+ named `pynvml_utils` to avoid the conflict.
2478
+ It is so confusing that many packages in the community use the
2479
+ unofficial one by mistake, and we have to handle this case.
2480
+ For example, `nvcr.io/nvidia/pytorch:24.12-py3` uses the unofficial
2481
+ one, and it will cause errors, see the issue
2482
+ https://github.com/vllm-project/vllm/issues/12847 for example.
2483
+ After all the troubles, we decide to copy the official `pynvml`
2484
+ module to our codebase, and use it directly.
2485
+ """
2486
+ import vllm.third_party.pynvml as pynvml
2487
+ return pynvml
2488
+
2489
+
2490
+ def warn_for_unimplemented_methods(cls: type[T]) -> type[T]:
2491
+ """
2492
+ A replacement for `abc.ABC`.
2493
+ When we use `abc.ABC`, subclasses will fail to instantiate
2494
+ if they do not implement all abstract methods.
2495
+ Here, we only require `raise NotImplementedError` in the
2496
+ base class, and log a warning if the method is not implemented
2497
+ in the subclass.
2498
+ """
2499
+
2500
+ original_init = cls.__init__
2501
+
2502
+ def find_unimplemented_methods(self: object):
2503
+ unimplemented_methods = []
2504
+ for attr_name in dir(self):
2505
+ # bypass inner method
2506
+ if attr_name.startswith('_'):
2507
+ continue
2508
+
2509
+ try:
2510
+ attr = getattr(self, attr_name)
2511
+ # get the func of callable method
2512
+ if callable(attr):
2513
+ attr_func = attr.__func__
2514
+ except AttributeError:
2515
+ continue
2516
+ src = inspect.getsource(attr_func)
2517
+ if "NotImplementedError" in src:
2518
+ unimplemented_methods.append(attr_name)
2519
+ if unimplemented_methods:
2520
+ method_names = ','.join(unimplemented_methods)
2521
+ msg = (f"Methods {method_names} not implemented in {self}")
2522
+ logger.warning(msg)
2523
+
2524
+ @wraps(original_init)
2525
+ def wrapped_init(self, *args, **kwargs) -> None:
2526
+ original_init(self, *args, **kwargs)
2527
+ find_unimplemented_methods(self)
2528
+
2529
+ type.__setattr__(cls, '__init__', wrapped_init)
2530
+ return cls
2531
+
2532
+
2533
+ class LazyLoader(types.ModuleType):
2534
+ """
2535
+ LazyLoader module borrowed from Tensorflow
2536
+ https://github.com/tensorflow/tensorflow/blob/main/tensorflow/python/util/lazy_loader.py
2537
+ with a addition of "module caching".
2538
+
2539
+ Lazily import a module, mainly to avoid pulling in large dependencies.
2540
+ Modules such as `xgrammar` might do additional side effects, so we
2541
+ only want to use this when it is needed, delaying all eager effects
2542
+ """
2543
+
2544
+ def __init__(
2545
+ self,
2546
+ local_name: str,
2547
+ parent_module_globals: dict[str, Any],
2548
+ name: str,
2549
+ ):
2550
+ self._local_name = local_name
2551
+ self._parent_module_globals = parent_module_globals
2552
+ self._module: types.ModuleType | None = None
2553
+
2554
+ super().__init__(str(name))
2555
+
2556
+ def _load(self) -> types.ModuleType:
2557
+ # Import the target module and insert it into the parent's namespace
2558
+ try:
2559
+ module = importlib.import_module(self.__name__)
2560
+ self._parent_module_globals[self._local_name] = module
2561
+ # The additional add to sys.modules
2562
+ # ensures library is actually loaded.
2563
+ sys.modules[self._local_name] = module
2564
+ except ModuleNotFoundError as err:
2565
+ raise err from None
2566
+
2567
+ # Update this object's dict so that if someone keeps a
2568
+ # reference to the LazyLoader, lookups are efficient
2569
+ # (__getattr__ is only called on lookups that fail).
2570
+ self.__dict__.update(module.__dict__)
2571
+ return module
2572
+
2573
+ def __getattr__(self, item: Any) -> Any:
2574
+ if self._module is None:
2575
+ self._module = self._load()
2576
+ return getattr(self._module, item)
2577
+
2578
+ def __dir__(self) -> list[str]:
2579
+ if self._module is None:
2580
+ self._module = self._load()
2581
+ return dir(self._module)
2582
+
2583
+
2584
+ def swap_dict_values(obj: dict[_K, _V], key1: _K, key2: _K) -> None:
2585
+ """
2586
+ Helper function to swap values for two keys
2587
+ """
2588
+ v1 = obj.get(key1)
2589
+ v2 = obj.get(key2)
2590
+ if v1 is not None:
2591
+ obj[key2] = v1
2592
+ else:
2593
+ obj.pop(key2, None)
2594
+ if v2 is not None:
2595
+ obj[key1] = v2
2596
+ else:
2597
+ obj.pop(key1, None)
2598
+
2599
+
2600
+ @contextlib.contextmanager
2601
+ def cprofile_context(save_file: Optional[str] = None):
2602
+ """Run a cprofile
2603
+
2604
+ Args:
2605
+ save_file: path to save the profile result. "1" or
2606
+ None will result in printing to stdout.
2607
+ """
2608
+ import cProfile
2609
+
2610
+ prof = cProfile.Profile()
2611
+ prof.enable()
2612
+
2613
+ try:
2614
+ yield
2615
+ finally:
2616
+ prof.disable()
2617
+ if save_file and save_file != "1":
2618
+ prof.dump_stats(save_file)
2619
+ else:
2620
+ prof.print_stats(sort="cumtime")
2621
+
2622
+
2623
+ def cprofile(save_file: Optional[str] = None, enabled: bool = True):
2624
+ """Decorator to profile a Python method using cProfile.
2625
+
2626
+ Args:
2627
+ save_file: Path to save the profile result.
2628
+ If "1", None, or "", results will be printed to stdout.
2629
+ enabled: Set to false to turn this into a no-op
2630
+ """
2631
+
2632
+ def decorator(func: Callable):
2633
+
2634
+ @wraps(func)
2635
+ def wrapper(*args, **kwargs):
2636
+ if not enabled:
2637
+ # If profiling is disabled, just call the function directly.
2638
+ return func(*args, **kwargs)
2639
+
2640
+ with cprofile_context(save_file):
2641
+ return func(*args, **kwargs)
2642
+
2643
+ return wrapper
2644
+
2645
+ return decorator
2646
+
2647
+
2648
+ # Only relevant for models using ALiBi (e.g, MPT)
2649
+ def check_use_alibi(model_config: ModelConfig) -> bool:
2650
+ return (getattr(model_config.hf_text_config, "alibi", False) # Falcon
2651
+ or ("BloomForCausalLM" in getattr(model_config.hf_config,
2652
+ "architectures", [])) # Bloom
2653
+ or getattr(model_config.hf_text_config, "position_encoding_type",
2654
+ "") == "alibi" # codellm_1b_alibi
2655
+ or
2656
+ (hasattr(model_config.hf_text_config, "attn_config") # MPT
2657
+ and model_config.hf_text_config.attn_config.get("alibi", False)))
2658
+
2659
+
2660
+ def sha256(input) -> int:
2661
+ """Hash any picklable Python object using SHA-256.
2662
+
2663
+ The input is serialized using pickle before hashing, which allows
2664
+ arbitrary Python objects to be used. Note that this function does
2665
+ not use a hash seed—if you need one, prepend it explicitly to the input.
2666
+
2667
+ Args:
2668
+ input: Any picklable Python object.
2669
+
2670
+ Returns:
2671
+ An integer representing the SHA-256 hash of the serialized input.
2672
+ """
2673
+ input_bytes = pickle.dumps(input, protocol=pickle.HIGHEST_PROTOCOL)
2674
+ return int.from_bytes(hashlib.sha256(input_bytes).digest(),
2675
+ byteorder="big")
2676
+
2677
+
2678
+ def is_torch_equal_or_newer(target: str) -> bool:
2679
+ """Check if the installed torch version is >= the target version.
2680
+
2681
+ Args:
2682
+ target: a version string, like "2.6.0".
2683
+
2684
+ Returns:
2685
+ Whether the condition meets.
2686
+ """
2687
+ try:
2688
+ torch_version = version.parse(str(torch.__version__))
2689
+ return torch_version >= version.parse(target)
2690
+ except Exception:
2691
+ # Fallback to PKG-INFO to load the package info, needed by the doc gen.
2692
+ return Version(importlib.metadata.version('torch')) >= Version(target)