vllm-cpu 0.12.0__cp313-cp313-manylinux_2_17_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +107 -0
- vllm/_aiter_ops.py +1018 -0
- vllm/_bc_linter.py +54 -0
- vllm/_custom_ops.py +2925 -0
- vllm/_ipex_ops.py +457 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +43 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +59 -0
- vllm/assets/video.py +149 -0
- vllm/attention/__init__.py +0 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +434 -0
- vllm/attention/backends/registry.py +286 -0
- vllm/attention/backends/utils.py +33 -0
- vllm/attention/layer.py +975 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +120 -0
- vllm/attention/layers/cross_attention.py +178 -0
- vllm/attention/layers/encoder_only_attention.py +103 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
- vllm/attention/ops/common.py +469 -0
- vllm/attention/ops/flashmla.py +251 -0
- vllm/attention/ops/merge_attn_states.py +47 -0
- vllm/attention/ops/paged_attn.py +51 -0
- vllm/attention/ops/pallas_kv_cache_update.py +130 -0
- vllm/attention/ops/prefix_prefill.py +814 -0
- vllm/attention/ops/rocm_aiter_mla_sparse.py +210 -0
- vllm/attention/ops/triton_decode_attention.py +712 -0
- vllm/attention/ops/triton_merge_attn_states.py +116 -0
- vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
- vllm/attention/ops/triton_unified_attention.py +941 -0
- vllm/attention/ops/vit_attn_wrappers.py +136 -0
- vllm/attention/selector.py +268 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +117 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/attention/utils/kv_transfer_utils.py +60 -0
- vllm/beam_search.py +88 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +3222 -0
- vllm/benchmarks/latency.py +172 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +777 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +79 -0
- vllm/benchmarks/serve.py +1531 -0
- vllm/benchmarks/sweep/__init__.py +0 -0
- vllm/benchmarks/sweep/cli.py +41 -0
- vllm/benchmarks/sweep/param_sweep.py +91 -0
- vllm/benchmarks/sweep/plot.py +580 -0
- vllm/benchmarks/sweep/plot_pareto.py +393 -0
- vllm/benchmarks/sweep/serve.py +448 -0
- vllm/benchmarks/sweep/serve_sla.py +492 -0
- vllm/benchmarks/sweep/server.py +114 -0
- vllm/benchmarks/sweep/sla_sweep.py +132 -0
- vllm/benchmarks/sweep/utils.py +4 -0
- vllm/benchmarks/throughput.py +799 -0
- vllm/collect_env.py +857 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +209 -0
- vllm/compilation/backends.py +827 -0
- vllm/compilation/base_static_graph.py +57 -0
- vllm/compilation/caching.py +180 -0
- vllm/compilation/collective_fusion.py +1234 -0
- vllm/compilation/compiler_interface.py +639 -0
- vllm/compilation/counter.py +48 -0
- vllm/compilation/cuda_graph.py +208 -0
- vllm/compilation/decorators.py +614 -0
- vllm/compilation/fix_functionalization.py +253 -0
- vllm/compilation/fusion.py +374 -0
- vllm/compilation/fusion_attn.py +359 -0
- vllm/compilation/fx_utils.py +91 -0
- vllm/compilation/inductor_pass.py +133 -0
- vllm/compilation/matcher_utils.py +315 -0
- vllm/compilation/monitor.py +62 -0
- vllm/compilation/noop_elimination.py +134 -0
- vllm/compilation/partition_rules.py +72 -0
- vllm/compilation/pass_manager.py +136 -0
- vllm/compilation/piecewise_backend.py +121 -0
- vllm/compilation/post_cleanup.py +21 -0
- vllm/compilation/qk_norm_rope_fusion.py +238 -0
- vllm/compilation/sequence_parallelism.py +363 -0
- vllm/compilation/torch25_custom_graph_pass.py +44 -0
- vllm/compilation/vllm_inductor_pass.py +173 -0
- vllm/compilation/wrapper.py +260 -0
- vllm/config/__init__.py +102 -0
- vllm/config/cache.py +220 -0
- vllm/config/compilation.py +1154 -0
- vllm/config/device.py +75 -0
- vllm/config/ec_transfer.py +110 -0
- vllm/config/kv_events.py +56 -0
- vllm/config/kv_transfer.py +114 -0
- vllm/config/load.py +124 -0
- vllm/config/lora.py +96 -0
- vllm/config/model.py +2274 -0
- vllm/config/multimodal.py +247 -0
- vllm/config/observability.py +131 -0
- vllm/config/parallel.py +653 -0
- vllm/config/pooler.py +124 -0
- vllm/config/scheduler.py +297 -0
- vllm/config/speculative.py +643 -0
- vllm/config/speech_to_text.py +38 -0
- vllm/config/structured_outputs.py +94 -0
- vllm/config/utils.py +324 -0
- vllm/config/vllm.py +1353 -0
- vllm/connections.py +189 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +327 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +43 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +490 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
- vllm/distributed/device_communicators/base_device_communicator.py +297 -0
- vllm/distributed/device_communicators/cpu_communicator.py +209 -0
- vllm/distributed/device_communicators/cuda_communicator.py +340 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
- vllm/distributed/device_communicators/pynccl.py +386 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
- vllm/distributed/device_communicators/ray_communicator.py +259 -0
- vllm/distributed/device_communicators/shm_broadcast.py +733 -0
- vllm/distributed/device_communicators/shm_object_storage.py +697 -0
- vllm/distributed/device_communicators/symm_mem.py +156 -0
- vllm/distributed/device_communicators/tpu_communicator.py +99 -0
- vllm/distributed/device_communicators/xpu_communicator.py +95 -0
- vllm/distributed/ec_transfer/__init__.py +14 -0
- vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
- vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
- vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
- vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
- vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/async_worker.py +115 -0
- vllm/distributed/eplb/eplb_state.py +1154 -0
- vllm/distributed/eplb/rebalance_algo.py +260 -0
- vllm/distributed/eplb/rebalance_execute.py +532 -0
- vllm/distributed/kv_events.py +371 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +20 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +575 -0
- vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +378 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +895 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2480 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +538 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
- vllm/distributed/parallel_state.py +1790 -0
- vllm/distributed/tpu_distributed_utils.py +188 -0
- vllm/distributed/utils.py +545 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +2106 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/protocol.py +188 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/anthropic/__init__.py +0 -0
- vllm/entrypoints/anthropic/protocol.py +162 -0
- vllm/entrypoints/anthropic/serving_messages.py +460 -0
- vllm/entrypoints/api_server.py +184 -0
- vllm/entrypoints/chat_utils.py +1837 -0
- vllm/entrypoints/cli/__init__.py +13 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +56 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/sweep.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +38 -0
- vllm/entrypoints/cli/main.py +79 -0
- vllm/entrypoints/cli/openai.py +256 -0
- vllm/entrypoints/cli/run_batch.py +68 -0
- vllm/entrypoints/cli/serve.py +249 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +10 -0
- vllm/entrypoints/context.py +572 -0
- vllm/entrypoints/dynamic_lora.py +57 -0
- vllm/entrypoints/harmony_utils.py +535 -0
- vllm/entrypoints/launcher.py +175 -0
- vllm/entrypoints/llm.py +1762 -0
- vllm/entrypoints/logger.py +84 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1891 -0
- vllm/entrypoints/openai/cli_args.py +302 -0
- vllm/entrypoints/openai/orca_metrics.py +120 -0
- vllm/entrypoints/openai/protocol.py +2465 -0
- vllm/entrypoints/openai/run_batch.py +631 -0
- vllm/entrypoints/openai/serving_chat.py +1782 -0
- vllm/entrypoints/openai/serving_completion.py +716 -0
- vllm/entrypoints/openai/serving_engine.py +1478 -0
- vllm/entrypoints/openai/serving_models.py +304 -0
- vllm/entrypoints/openai/serving_responses.py +2032 -0
- vllm/entrypoints/openai/serving_tokenization.py +203 -0
- vllm/entrypoints/openai/serving_tokens.py +281 -0
- vllm/entrypoints/openai/serving_transcription.py +168 -0
- vllm/entrypoints/openai/speech_to_text.py +559 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
- vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
- vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +322 -0
- vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +324 -0
- vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
- vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
- vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
- vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
- vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
- vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
- vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
- vllm/entrypoints/openai/utils.py +49 -0
- vllm/entrypoints/pooling/__init__.py +16 -0
- vllm/entrypoints/pooling/classify/__init__.py +0 -0
- vllm/entrypoints/pooling/classify/api_router.py +50 -0
- vllm/entrypoints/pooling/classify/protocol.py +181 -0
- vllm/entrypoints/pooling/classify/serving.py +237 -0
- vllm/entrypoints/pooling/embed/__init__.py +0 -0
- vllm/entrypoints/pooling/embed/api_router.py +67 -0
- vllm/entrypoints/pooling/embed/protocol.py +208 -0
- vllm/entrypoints/pooling/embed/serving.py +697 -0
- vllm/entrypoints/pooling/pooling/__init__.py +0 -0
- vllm/entrypoints/pooling/pooling/api_router.py +63 -0
- vllm/entrypoints/pooling/pooling/protocol.py +148 -0
- vllm/entrypoints/pooling/pooling/serving.py +348 -0
- vllm/entrypoints/pooling/score/__init__.py +0 -0
- vllm/entrypoints/pooling/score/api_router.py +149 -0
- vllm/entrypoints/pooling/score/protocol.py +145 -0
- vllm/entrypoints/pooling/score/serving.py +505 -0
- vllm/entrypoints/renderer.py +409 -0
- vllm/entrypoints/responses_utils.py +148 -0
- vllm/entrypoints/sagemaker/__init__.py +4 -0
- vllm/entrypoints/sagemaker/routes.py +118 -0
- vllm/entrypoints/score_utils.py +240 -0
- vllm/entrypoints/ssl.py +78 -0
- vllm/entrypoints/tool.py +143 -0
- vllm/entrypoints/tool_server.py +234 -0
- vllm/entrypoints/utils.py +319 -0
- vllm/env_override.py +378 -0
- vllm/envs.py +1710 -0
- vllm/forward_context.py +358 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +359 -0
- vllm/inputs/parse.py +137 -0
- vllm/inputs/preprocess.py +716 -0
- vllm/logger.py +298 -0
- vllm/logging_utils/__init__.py +13 -0
- vllm/logging_utils/dump_input.py +83 -0
- vllm/logging_utils/formatter.py +127 -0
- vllm/logging_utils/lazy.py +20 -0
- vllm/logging_utils/log_time.py +34 -0
- vllm/logits_process.py +121 -0
- vllm/logprobs.py +206 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +42 -0
- vllm/lora/layers/base.py +66 -0
- vllm/lora/layers/base_linear.py +165 -0
- vllm/lora/layers/column_parallel_linear.py +577 -0
- vllm/lora/layers/fused_moe.py +747 -0
- vllm/lora/layers/logits_processor.py +203 -0
- vllm/lora/layers/replicated_linear.py +70 -0
- vllm/lora/layers/row_parallel_linear.py +176 -0
- vllm/lora/layers/utils.py +74 -0
- vllm/lora/layers/vocal_parallel_embedding.py +140 -0
- vllm/lora/lora_weights.py +227 -0
- vllm/lora/models.py +903 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +6 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
- vllm/lora/ops/torch_ops/__init__.py +20 -0
- vllm/lora/ops/torch_ops/lora_ops.py +128 -0
- vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
- vllm/lora/ops/triton_ops/__init__.py +21 -0
- vllm/lora/ops/triton_ops/fused_moe_lora_op.py +661 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
- vllm/lora/ops/triton_ops/utils.py +295 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +141 -0
- vllm/lora/peft_helper.py +128 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +493 -0
- vllm/lora/punica_wrapper/punica_cpu.py +351 -0
- vllm/lora/punica_wrapper/punica_gpu.py +412 -0
- vllm/lora/punica_wrapper/punica_selector.py +21 -0
- vllm/lora/punica_wrapper/punica_tpu.py +358 -0
- vllm/lora/punica_wrapper/punica_xpu.py +276 -0
- vllm/lora/punica_wrapper/utils.py +150 -0
- vllm/lora/request.py +100 -0
- vllm/lora/resolver.py +88 -0
- vllm/lora/utils.py +306 -0
- vllm/lora/worker_manager.py +268 -0
- vllm/model_executor/__init__.py +11 -0
- vllm/model_executor/custom_op.py +194 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +595 -0
- vllm/model_executor/layers/attention_layer_base.py +32 -0
- vllm/model_executor/layers/batch_invariant.py +1058 -0
- vllm/model_executor/layers/conv.py +256 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +240 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
- vllm/model_executor/layers/fla/ops/index.py +41 -0
- vllm/model_executor/layers/fla/ops/kda.py +1351 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
- vllm/model_executor/layers/fla/ops/op.py +60 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
- vllm/model_executor/layers/fla/ops/utils.py +194 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
- vllm/model_executor/layers/fused_moe/__init__.py +110 -0
- vllm/model_executor/layers/fused_moe/all2all_utils.py +171 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
- vllm/model_executor/layers/fused_moe/config.py +938 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +292 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +434 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +376 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +821 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2172 -0
- vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +121 -0
- vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +136 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +524 -0
- vllm/model_executor/layers/fused_moe/layer.py +2152 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1332 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +78 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
- vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
- vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +559 -0
- vllm/model_executor/layers/fused_moe/utils.py +332 -0
- vllm/model_executor/layers/kda.py +442 -0
- vllm/model_executor/layers/layernorm.py +442 -0
- vllm/model_executor/layers/lightning_attn.py +735 -0
- vllm/model_executor/layers/linear.py +1424 -0
- vllm/model_executor/layers/logits_processor.py +106 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +68 -0
- vllm/model_executor/layers/mamba/linear_attn.py +388 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +527 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +930 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
- vllm/model_executor/layers/mamba/short_conv.py +255 -0
- vllm/model_executor/layers/mla.py +176 -0
- vllm/model_executor/layers/pooler.py +817 -0
- vllm/model_executor/layers/quantization/__init__.py +179 -0
- vllm/model_executor/layers/quantization/auto_round.py +454 -0
- vllm/model_executor/layers/quantization/awq.py +277 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +718 -0
- vllm/model_executor/layers/quantization/awq_triton.py +337 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +502 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +644 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +963 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2387 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/cpu_wna16.py +625 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
- vllm/model_executor/layers/quantization/experts_int8.py +225 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
- vllm/model_executor/layers/quantization/fp8.py +1348 -0
- vllm/model_executor/layers/quantization/fp_quant.py +420 -0
- vllm/model_executor/layers/quantization/gguf.py +687 -0
- vllm/model_executor/layers/quantization/gptq.py +393 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +842 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
- vllm/model_executor/layers/quantization/inc.py +65 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +470 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +146 -0
- vllm/model_executor/layers/quantization/modelopt.py +1637 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +528 -0
- vllm/model_executor/layers/quantization/mxfp4.py +1175 -0
- vllm/model_executor/layers/quantization/petit.py +319 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +136 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +527 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +653 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
- vllm/model_executor/layers/quantization/rtn.py +639 -0
- vllm/model_executor/layers/quantization/schema.py +90 -0
- vllm/model_executor/layers/quantization/torchao.py +380 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
- vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +333 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +311 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +674 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +452 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +183 -0
- vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
- vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
- vllm/model_executor/layers/resampler.py +283 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +292 -0
- vllm/model_executor/layers/rotary_embedding/base.py +240 -0
- vllm/model_executor/layers/rotary_embedding/common.py +188 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
- vllm/model_executor/layers/rotary_embedding/xdrope.py +102 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
- vllm/model_executor/layers/utils.py +251 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
- vllm/model_executor/model_loader/__init__.py +150 -0
- vllm/model_executor/model_loader/base_loader.py +57 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
- vllm/model_executor/model_loader/default_loader.py +321 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +349 -0
- vllm/model_executor/model_loader/online_quantization.py +275 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
- vllm/model_executor/model_loader/tensorizer.py +790 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
- vllm/model_executor/model_loader/tpu.py +118 -0
- vllm/model_executor/model_loader/utils.py +296 -0
- vllm/model_executor/model_loader/weight_utils.py +1147 -0
- vllm/model_executor/models/__init__.py +44 -0
- vllm/model_executor/models/adapters.py +543 -0
- vllm/model_executor/models/afmoe.py +697 -0
- vllm/model_executor/models/aimv2.py +248 -0
- vllm/model_executor/models/apertus.py +569 -0
- vllm/model_executor/models/arcee.py +428 -0
- vllm/model_executor/models/arctic.py +634 -0
- vllm/model_executor/models/aria.py +655 -0
- vllm/model_executor/models/aya_vision.py +450 -0
- vllm/model_executor/models/baichuan.py +494 -0
- vllm/model_executor/models/bailing_moe.py +645 -0
- vllm/model_executor/models/bamba.py +516 -0
- vllm/model_executor/models/bee.py +157 -0
- vllm/model_executor/models/bert.py +925 -0
- vllm/model_executor/models/bert_with_rope.py +732 -0
- vllm/model_executor/models/blip.py +350 -0
- vllm/model_executor/models/blip2.py +695 -0
- vllm/model_executor/models/bloom.py +390 -0
- vllm/model_executor/models/chameleon.py +1098 -0
- vllm/model_executor/models/chatglm.py +499 -0
- vllm/model_executor/models/clip.py +1005 -0
- vllm/model_executor/models/cohere2_vision.py +472 -0
- vllm/model_executor/models/commandr.py +470 -0
- vllm/model_executor/models/config.py +510 -0
- vllm/model_executor/models/dbrx.py +485 -0
- vllm/model_executor/models/deepencoder.py +676 -0
- vllm/model_executor/models/deepseek_eagle.py +252 -0
- vllm/model_executor/models/deepseek_mtp.py +446 -0
- vllm/model_executor/models/deepseek_ocr.py +593 -0
- vllm/model_executor/models/deepseek_v2.py +1715 -0
- vllm/model_executor/models/deepseek_vl2.py +644 -0
- vllm/model_executor/models/dots1.py +566 -0
- vllm/model_executor/models/dots_ocr.py +874 -0
- vllm/model_executor/models/ernie45.py +53 -0
- vllm/model_executor/models/ernie45_moe.py +755 -0
- vllm/model_executor/models/ernie45_vl.py +1710 -0
- vllm/model_executor/models/ernie45_vl_moe.py +800 -0
- vllm/model_executor/models/ernie_mtp.py +279 -0
- vllm/model_executor/models/exaone.py +525 -0
- vllm/model_executor/models/exaone4.py +517 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +544 -0
- vllm/model_executor/models/falcon_h1.py +680 -0
- vllm/model_executor/models/flex_olmo.py +155 -0
- vllm/model_executor/models/fuyu.py +373 -0
- vllm/model_executor/models/gemma.py +426 -0
- vllm/model_executor/models/gemma2.py +436 -0
- vllm/model_executor/models/gemma3.py +577 -0
- vllm/model_executor/models/gemma3_mm.py +665 -0
- vllm/model_executor/models/gemma3n.py +1167 -0
- vllm/model_executor/models/gemma3n_mm.py +811 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +298 -0
- vllm/model_executor/models/glm4_1v.py +1854 -0
- vllm/model_executor/models/glm4_moe.py +738 -0
- vllm/model_executor/models/glm4_moe_mtp.py +359 -0
- vllm/model_executor/models/glm4v.py +785 -0
- vllm/model_executor/models/gpt2.py +397 -0
- vllm/model_executor/models/gpt_bigcode.py +339 -0
- vllm/model_executor/models/gpt_j.py +345 -0
- vllm/model_executor/models/gpt_neox.py +343 -0
- vllm/model_executor/models/gpt_oss.py +745 -0
- vllm/model_executor/models/granite.py +476 -0
- vllm/model_executor/models/granite_speech.py +913 -0
- vllm/model_executor/models/granitemoe.py +561 -0
- vllm/model_executor/models/granitemoehybrid.py +704 -0
- vllm/model_executor/models/granitemoeshared.py +328 -0
- vllm/model_executor/models/gritlm.py +245 -0
- vllm/model_executor/models/grok1.py +555 -0
- vllm/model_executor/models/h2ovl.py +554 -0
- vllm/model_executor/models/hunyuan_v1.py +1042 -0
- vllm/model_executor/models/hunyuan_vision.py +1028 -0
- vllm/model_executor/models/hyperclovax_vision.py +1166 -0
- vllm/model_executor/models/idefics2_vision_model.py +427 -0
- vllm/model_executor/models/idefics3.py +718 -0
- vllm/model_executor/models/interfaces.py +1148 -0
- vllm/model_executor/models/interfaces_base.py +243 -0
- vllm/model_executor/models/intern_vit.py +454 -0
- vllm/model_executor/models/internlm2.py +454 -0
- vllm/model_executor/models/internlm2_ve.py +139 -0
- vllm/model_executor/models/interns1.py +830 -0
- vllm/model_executor/models/interns1_vit.py +433 -0
- vllm/model_executor/models/internvl.py +1452 -0
- vllm/model_executor/models/jais.py +397 -0
- vllm/model_executor/models/jamba.py +609 -0
- vllm/model_executor/models/jina_vl.py +147 -0
- vllm/model_executor/models/keye.py +1765 -0
- vllm/model_executor/models/keye_vl1_5.py +726 -0
- vllm/model_executor/models/kimi_linear.py +658 -0
- vllm/model_executor/models/kimi_vl.py +578 -0
- vllm/model_executor/models/lfm2.py +516 -0
- vllm/model_executor/models/lfm2_moe.py +746 -0
- vllm/model_executor/models/lightonocr.py +195 -0
- vllm/model_executor/models/llama.py +704 -0
- vllm/model_executor/models/llama4.py +857 -0
- vllm/model_executor/models/llama4_eagle.py +216 -0
- vllm/model_executor/models/llama_eagle.py +213 -0
- vllm/model_executor/models/llama_eagle3.py +375 -0
- vllm/model_executor/models/llava.py +842 -0
- vllm/model_executor/models/llava_next.py +583 -0
- vllm/model_executor/models/llava_next_video.py +467 -0
- vllm/model_executor/models/llava_onevision.py +923 -0
- vllm/model_executor/models/longcat_flash.py +743 -0
- vllm/model_executor/models/longcat_flash_mtp.py +349 -0
- vllm/model_executor/models/mamba.py +276 -0
- vllm/model_executor/models/mamba2.py +288 -0
- vllm/model_executor/models/medusa.py +179 -0
- vllm/model_executor/models/midashenglm.py +828 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +294 -0
- vllm/model_executor/models/minicpm.py +657 -0
- vllm/model_executor/models/minicpm3.py +234 -0
- vllm/model_executor/models/minicpm_eagle.py +385 -0
- vllm/model_executor/models/minicpmo.py +768 -0
- vllm/model_executor/models/minicpmv.py +1744 -0
- vllm/model_executor/models/minimax_m2.py +546 -0
- vllm/model_executor/models/minimax_text_01.py +1010 -0
- vllm/model_executor/models/minimax_vl_01.py +396 -0
- vllm/model_executor/models/mistral3.py +637 -0
- vllm/model_executor/models/mistral_large_3.py +63 -0
- vllm/model_executor/models/mistral_large_3_eagle.py +165 -0
- vllm/model_executor/models/mixtral.py +599 -0
- vllm/model_executor/models/mllama4.py +1151 -0
- vllm/model_executor/models/mlp_speculator.py +235 -0
- vllm/model_executor/models/modernbert.py +452 -0
- vllm/model_executor/models/module_mapping.py +74 -0
- vllm/model_executor/models/molmo.py +1553 -0
- vllm/model_executor/models/moonvit.py +686 -0
- vllm/model_executor/models/mpt.py +335 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1732 -0
- vllm/model_executor/models/nemotron.py +502 -0
- vllm/model_executor/models/nemotron_h.py +850 -0
- vllm/model_executor/models/nemotron_nas.py +473 -0
- vllm/model_executor/models/nemotron_vl.py +653 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +413 -0
- vllm/model_executor/models/olmo2.py +455 -0
- vllm/model_executor/models/olmoe.py +494 -0
- vllm/model_executor/models/opencua.py +271 -0
- vllm/model_executor/models/openpangu.py +1051 -0
- vllm/model_executor/models/openpangu_mtp.py +265 -0
- vllm/model_executor/models/opt.py +426 -0
- vllm/model_executor/models/orion.py +366 -0
- vllm/model_executor/models/ouro.py +508 -0
- vllm/model_executor/models/ovis.py +559 -0
- vllm/model_executor/models/ovis2_5.py +673 -0
- vllm/model_executor/models/paddleocr_vl.py +1380 -0
- vllm/model_executor/models/paligemma.py +412 -0
- vllm/model_executor/models/persimmon.py +376 -0
- vllm/model_executor/models/phi.py +370 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3v.py +737 -0
- vllm/model_executor/models/phi4_multimodal.py +1447 -0
- vllm/model_executor/models/phi4mm.py +1253 -0
- vllm/model_executor/models/phi4mm_audio.py +1296 -0
- vllm/model_executor/models/phi4mm_utils.py +1907 -0
- vllm/model_executor/models/phimoe.py +670 -0
- vllm/model_executor/models/pixtral.py +1380 -0
- vllm/model_executor/models/plamo2.py +966 -0
- vllm/model_executor/models/plamo3.py +441 -0
- vllm/model_executor/models/qwen.py +363 -0
- vllm/model_executor/models/qwen2.py +569 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +1220 -0
- vllm/model_executor/models/qwen2_5_vl.py +1594 -0
- vllm/model_executor/models/qwen2_audio.py +473 -0
- vllm/model_executor/models/qwen2_moe.py +590 -0
- vllm/model_executor/models/qwen2_rm.py +123 -0
- vllm/model_executor/models/qwen2_vl.py +1593 -0
- vllm/model_executor/models/qwen3.py +332 -0
- vllm/model_executor/models/qwen3_moe.py +738 -0
- vllm/model_executor/models/qwen3_next.py +1390 -0
- vllm/model_executor/models/qwen3_next_mtp.py +296 -0
- vllm/model_executor/models/qwen3_omni_moe_thinker.py +1765 -0
- vllm/model_executor/models/qwen3_vl.py +1686 -0
- vllm/model_executor/models/qwen3_vl_moe.py +470 -0
- vllm/model_executor/models/qwen_vl.py +803 -0
- vllm/model_executor/models/radio.py +555 -0
- vllm/model_executor/models/registry.py +1183 -0
- vllm/model_executor/models/roberta.py +259 -0
- vllm/model_executor/models/rvl.py +107 -0
- vllm/model_executor/models/seed_oss.py +493 -0
- vllm/model_executor/models/siglip.py +1245 -0
- vllm/model_executor/models/siglip2navit.py +723 -0
- vllm/model_executor/models/skyworkr1v.py +953 -0
- vllm/model_executor/models/smolvlm.py +38 -0
- vllm/model_executor/models/solar.py +485 -0
- vllm/model_executor/models/stablelm.py +359 -0
- vllm/model_executor/models/starcoder2.py +366 -0
- vllm/model_executor/models/step3_text.py +555 -0
- vllm/model_executor/models/step3_vl.py +1149 -0
- vllm/model_executor/models/swin.py +514 -0
- vllm/model_executor/models/tarsier.py +619 -0
- vllm/model_executor/models/telechat2.py +153 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/terratorch.py +319 -0
- vllm/model_executor/models/transformers/__init__.py +127 -0
- vllm/model_executor/models/transformers/base.py +464 -0
- vllm/model_executor/models/transformers/causal.py +65 -0
- vllm/model_executor/models/transformers/legacy.py +90 -0
- vllm/model_executor/models/transformers/moe.py +325 -0
- vllm/model_executor/models/transformers/multimodal.py +411 -0
- vllm/model_executor/models/transformers/pooling.py +119 -0
- vllm/model_executor/models/transformers/utils.py +213 -0
- vllm/model_executor/models/ultravox.py +686 -0
- vllm/model_executor/models/utils.py +832 -0
- vllm/model_executor/models/vision.py +552 -0
- vllm/model_executor/models/voxtral.py +842 -0
- vllm/model_executor/models/whisper.py +963 -0
- vllm/model_executor/models/zamba2.py +980 -0
- vllm/model_executor/parameter.py +642 -0
- vllm/model_executor/utils.py +94 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
- vllm/model_executor/warmup/kernel_warmup.py +98 -0
- vllm/multimodal/__init__.py +40 -0
- vllm/multimodal/audio.py +142 -0
- vllm/multimodal/base.py +26 -0
- vllm/multimodal/cache.py +830 -0
- vllm/multimodal/evs.py +294 -0
- vllm/multimodal/hasher.py +106 -0
- vllm/multimodal/image.py +130 -0
- vllm/multimodal/inputs.py +1036 -0
- vllm/multimodal/parse.py +544 -0
- vllm/multimodal/processing.py +2240 -0
- vllm/multimodal/profiling.py +369 -0
- vllm/multimodal/registry.py +357 -0
- vllm/multimodal/utils.py +523 -0
- vllm/multimodal/video.py +333 -0
- vllm/outputs.py +345 -0
- vllm/platforms/__init__.py +277 -0
- vllm/platforms/cpu.py +410 -0
- vllm/platforms/cuda.py +642 -0
- vllm/platforms/interface.py +656 -0
- vllm/platforms/rocm.py +513 -0
- vllm/platforms/tpu.py +275 -0
- vllm/platforms/xpu.py +261 -0
- vllm/plugins/__init__.py +81 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +77 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
- vllm/pooling_params.py +230 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/gpu_profiler.py +216 -0
- vllm/profiler/layerwise_profile.py +392 -0
- vllm/profiler/utils.py +151 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +30 -0
- vllm/ray/ray_env.py +79 -0
- vllm/reasoning/__init__.py +92 -0
- vllm/reasoning/abs_reasoning_parsers.py +290 -0
- vllm/reasoning/basic_parsers.py +162 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
- vllm/reasoning/ernie45_reasoning_parser.py +165 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
- vllm/reasoning/gptoss_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
- vllm/reasoning/identity_reasoning_parser.py +58 -0
- vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
- vllm/reasoning/mistral_reasoning_parser.py +55 -0
- vllm/reasoning/olmo3_reasoning_parser.py +302 -0
- vllm/reasoning/qwen3_reasoning_parser.py +67 -0
- vllm/reasoning/seedoss_reasoning_parser.py +27 -0
- vllm/reasoning/step3_reasoning_parser.py +107 -0
- vllm/sampling_params.py +597 -0
- vllm/scalar_type.py +355 -0
- vllm/scripts.py +17 -0
- vllm/sequence.py +98 -0
- vllm/tasks.py +13 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tokenizers/__init__.py +24 -0
- vllm/tokenizers/detokenizer_utils.py +198 -0
- vllm/tokenizers/hf.py +124 -0
- vllm/tokenizers/mistral.py +554 -0
- vllm/tokenizers/protocol.py +111 -0
- vllm/tokenizers/registry.py +233 -0
- vllm/tracing.py +135 -0
- vllm/transformers_utils/__init__.py +26 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +73 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1081 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +84 -0
- vllm/transformers_utils/configs/afmoe.py +87 -0
- vllm/transformers_utils/configs/arctic.py +216 -0
- vllm/transformers_utils/configs/chatglm.py +75 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
- vllm/transformers_utils/configs/dotsocr.py +71 -0
- vllm/transformers_utils/configs/eagle.py +90 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/flex_olmo.py +82 -0
- vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
- vllm/transformers_utils/configs/jais.py +243 -0
- vllm/transformers_utils/configs/kimi_linear.py +148 -0
- vllm/transformers_utils/configs/kimi_vl.py +38 -0
- vllm/transformers_utils/configs/lfm2_moe.py +163 -0
- vllm/transformers_utils/configs/medusa.py +65 -0
- vllm/transformers_utils/configs/midashenglm.py +103 -0
- vllm/transformers_utils/configs/mistral.py +235 -0
- vllm/transformers_utils/configs/mlp_speculator.py +69 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +214 -0
- vllm/transformers_utils/configs/nemotron_h.py +282 -0
- vllm/transformers_utils/configs/olmo3.py +83 -0
- vllm/transformers_utils/configs/ovis.py +182 -0
- vllm/transformers_utils/configs/qwen3_next.py +275 -0
- vllm/transformers_utils/configs/radio.py +89 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +38 -0
- vllm/transformers_utils/configs/speculators/base.py +114 -0
- vllm/transformers_utils/configs/step3_vl.py +178 -0
- vllm/transformers_utils/configs/ultravox.py +118 -0
- vllm/transformers_utils/dynamic_module.py +59 -0
- vllm/transformers_utils/gguf_utils.py +209 -0
- vllm/transformers_utils/processor.py +423 -0
- vllm/transformers_utils/processors/__init__.py +23 -0
- vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
- vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
- vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
- vllm/transformers_utils/processors/ovis.py +453 -0
- vllm/transformers_utils/processors/ovis2_5.py +468 -0
- vllm/transformers_utils/repo_utils.py +287 -0
- vllm/transformers_utils/runai_utils.py +104 -0
- vllm/transformers_utils/s3_utils.py +95 -0
- vllm/transformers_utils/tokenizer.py +127 -0
- vllm/transformers_utils/tokenizer_base.py +33 -0
- vllm/transformers_utils/utils.py +184 -0
- vllm/triton_utils/__init__.py +20 -0
- vllm/triton_utils/importing.py +103 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +294 -0
- vllm/utils/__init__.py +66 -0
- vllm/utils/argparse_utils.py +504 -0
- vllm/utils/async_utils.py +310 -0
- vllm/utils/cache.py +214 -0
- vllm/utils/collection_utils.py +112 -0
- vllm/utils/counter.py +45 -0
- vllm/utils/deep_gemm.py +399 -0
- vllm/utils/flashinfer.py +532 -0
- vllm/utils/func_utils.py +236 -0
- vllm/utils/gc_utils.py +151 -0
- vllm/utils/hashing.py +81 -0
- vllm/utils/import_utils.py +449 -0
- vllm/utils/jsontree.py +158 -0
- vllm/utils/math_utils.py +32 -0
- vllm/utils/mem_constants.py +13 -0
- vllm/utils/mem_utils.py +232 -0
- vllm/utils/nccl.py +64 -0
- vllm/utils/network_utils.py +331 -0
- vllm/utils/platform_utils.py +59 -0
- vllm/utils/profiling.py +56 -0
- vllm/utils/registry.py +51 -0
- vllm/utils/serial_utils.py +169 -0
- vllm/utils/system_utils.py +265 -0
- vllm/utils/tensor_schema.py +255 -0
- vllm/utils/torch_utils.py +647 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +497 -0
- vllm/v1/attention/backends/flash_attn.py +1050 -0
- vllm/v1/attention/backends/flashinfer.py +1572 -0
- vllm/v1/attention/backends/flex_attention.py +945 -0
- vllm/v1/attention/backends/gdn_attn.py +387 -0
- vllm/v1/attention/backends/linear_attn.py +77 -0
- vllm/v1/attention/backends/mamba1_attn.py +165 -0
- vllm/v1/attention/backends/mamba2_attn.py +354 -0
- vllm/v1/attention/backends/mamba_attn.py +117 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/aiter_triton_mla.py +74 -0
- vllm/v1/attention/backends/mla/common.py +2069 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +340 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +174 -0
- vllm/v1/attention/backends/mla/flashmla.py +317 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +551 -0
- vllm/v1/attention/backends/mla/indexer.py +369 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +275 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +325 -0
- vllm/v1/attention/backends/mla/triton_mla.py +171 -0
- vllm/v1/attention/backends/pallas.py +436 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
- vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
- vllm/v1/attention/backends/rocm_attn.py +359 -0
- vllm/v1/attention/backends/short_conv_attn.py +105 -0
- vllm/v1/attention/backends/tree_attn.py +428 -0
- vllm/v1/attention/backends/triton_attn.py +377 -0
- vllm/v1/attention/backends/utils.py +1149 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +466 -0
- vllm/v1/core/encoder_cache_manager.py +343 -0
- vllm/v1/core/kv_cache_coordinator.py +570 -0
- vllm/v1/core/kv_cache_manager.py +408 -0
- vllm/v1/core/kv_cache_metrics.py +96 -0
- vllm/v1/core/kv_cache_utils.py +1471 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +68 -0
- vllm/v1/core/sched/interface.py +187 -0
- vllm/v1/core/sched/output.py +230 -0
- vllm/v1/core/sched/request_queue.py +217 -0
- vllm/v1/core/sched/scheduler.py +1726 -0
- vllm/v1/core/sched/utils.py +72 -0
- vllm/v1/core/single_type_kv_cache_manager.py +801 -0
- vllm/v1/cudagraph_dispatcher.py +183 -0
- vllm/v1/engine/__init__.py +214 -0
- vllm/v1/engine/async_llm.py +874 -0
- vllm/v1/engine/coordinator.py +377 -0
- vllm/v1/engine/core.py +1421 -0
- vllm/v1/engine/core_client.py +1406 -0
- vllm/v1/engine/detokenizer.py +351 -0
- vllm/v1/engine/exceptions.py +18 -0
- vllm/v1/engine/input_processor.py +636 -0
- vllm/v1/engine/llm_engine.py +416 -0
- vllm/v1/engine/logprobs.py +189 -0
- vllm/v1/engine/output_processor.py +658 -0
- vllm/v1/engine/parallel_sampling.py +145 -0
- vllm/v1/engine/processor.py +20 -0
- vllm/v1/engine/utils.py +1068 -0
- vllm/v1/executor/__init__.py +6 -0
- vllm/v1/executor/abstract.py +352 -0
- vllm/v1/executor/multiproc_executor.py +888 -0
- vllm/v1/executor/ray_distributed_executor.py +8 -0
- vllm/v1/executor/ray_executor.py +626 -0
- vllm/v1/executor/ray_utils.py +465 -0
- vllm/v1/executor/uniproc_executor.py +183 -0
- vllm/v1/kv_cache_interface.py +404 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +161 -0
- vllm/v1/kv_offload/arc_manager.py +237 -0
- vllm/v1/kv_offload/backend.py +97 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +62 -0
- vllm/v1/kv_offload/cpu.py +86 -0
- vllm/v1/kv_offload/factory.py +56 -0
- vllm/v1/kv_offload/lru_manager.py +139 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +66 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +191 -0
- vllm/v1/kv_offload/worker/worker.py +144 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +1268 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +194 -0
- vllm/v1/metrics/reader.py +257 -0
- vllm/v1/metrics/stats.py +431 -0
- vllm/v1/outputs.py +237 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +82 -0
- vllm/v1/request.py +280 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +352 -0
- vllm/v1/sample/logits_processor/builtin.py +278 -0
- vllm/v1/sample/logits_processor/interface.py +106 -0
- vllm/v1/sample/logits_processor/state.py +165 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +52 -0
- vllm/v1/sample/ops/logprobs.py +25 -0
- vllm/v1/sample/ops/penalties.py +57 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +384 -0
- vllm/v1/sample/rejection_sampler.py +805 -0
- vllm/v1/sample/sampler.py +319 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +120 -0
- vllm/v1/sample/tpu/sampler.py +215 -0
- vllm/v1/serial_utils.py +532 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1325 -0
- vllm/v1/spec_decode/medusa.py +73 -0
- vllm/v1/spec_decode/metadata.py +66 -0
- vllm/v1/spec_decode/metrics.py +225 -0
- vllm/v1/spec_decode/ngram_proposer.py +291 -0
- vllm/v1/spec_decode/suffix_decoding.py +101 -0
- vllm/v1/spec_decode/utils.py +121 -0
- vllm/v1/structured_output/__init__.py +338 -0
- vllm/v1/structured_output/backend_guidance.py +265 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
- vllm/v1/structured_output/backend_outlines.py +324 -0
- vllm/v1/structured_output/backend_types.py +136 -0
- vllm/v1/structured_output/backend_xgrammar.py +362 -0
- vllm/v1/structured_output/request.py +94 -0
- vllm/v1/structured_output/utils.py +469 -0
- vllm/v1/utils.py +414 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +343 -0
- vllm/v1/worker/cpu_model_runner.py +122 -0
- vllm/v1/worker/cpu_worker.py +210 -0
- vllm/v1/worker/dp_utils.py +250 -0
- vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
- vllm/v1/worker/gpu/README.md +4 -0
- vllm/v1/worker/gpu/__init__.py +0 -0
- vllm/v1/worker/gpu/async_utils.py +97 -0
- vllm/v1/worker/gpu/attn_utils.py +189 -0
- vllm/v1/worker/gpu/block_table.py +314 -0
- vllm/v1/worker/gpu/cudagraph_utils.py +259 -0
- vllm/v1/worker/gpu/dp_utils.py +31 -0
- vllm/v1/worker/gpu/input_batch.py +430 -0
- vllm/v1/worker/gpu/model_runner.py +1007 -0
- vllm/v1/worker/gpu/sample/__init__.py +0 -0
- vllm/v1/worker/gpu/sample/gumbel.py +101 -0
- vllm/v1/worker/gpu/sample/logprob.py +167 -0
- vllm/v1/worker/gpu/sample/metadata.py +179 -0
- vllm/v1/worker/gpu/sample/penalties.py +154 -0
- vllm/v1/worker/gpu/sample/sampler.py +75 -0
- vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
- vllm/v1/worker/gpu/spec_decode/eagle.py +565 -0
- vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
- vllm/v1/worker/gpu/spec_decode/rejection_sample.py +83 -0
- vllm/v1/worker/gpu/states.py +309 -0
- vllm/v1/worker/gpu/structured_outputs.py +76 -0
- vllm/v1/worker/gpu_input_batch.py +971 -0
- vllm/v1/worker/gpu_model_runner.py +5360 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +472 -0
- vllm/v1/worker/gpu_worker.py +922 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +309 -0
- vllm/v1/worker/lora_model_runner_mixin.py +212 -0
- vllm/v1/worker/tpu_input_batch.py +583 -0
- vllm/v1/worker/tpu_model_runner.py +2196 -0
- vllm/v1/worker/tpu_worker.py +351 -0
- vllm/v1/worker/ubatch_utils.py +73 -0
- vllm/v1/worker/ubatching.py +231 -0
- vllm/v1/worker/utils.py +365 -0
- vllm/v1/worker/worker_base.py +377 -0
- vllm/v1/worker/xpu_model_runner.py +48 -0
- vllm/v1/worker/xpu_worker.py +198 -0
- vllm/version.py +39 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm_cpu-0.12.0.dist-info/METADATA +300 -0
- vllm_cpu-0.12.0.dist-info/RECORD +1600 -0
- vllm_cpu-0.12.0.dist-info/WHEEL +5 -0
- vllm_cpu-0.12.0.dist-info/entry_points.txt +5 -0
- vllm_cpu-0.12.0.dist-info/top_level.txt +1 -0
vllm/config/model.py
ADDED
|
@@ -0,0 +1,2274 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
import warnings
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import InitVar, field
|
|
7
|
+
from importlib.util import find_spec
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Literal, cast, get_args
|
|
9
|
+
|
|
10
|
+
import torch
|
|
11
|
+
from pydantic import ConfigDict, SkipValidation, field_validator, model_validator
|
|
12
|
+
from pydantic.dataclasses import dataclass
|
|
13
|
+
from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE
|
|
14
|
+
from transformers.configuration_utils import ALLOWED_LAYER_TYPES
|
|
15
|
+
|
|
16
|
+
import vllm.envs as envs
|
|
17
|
+
from vllm.attention.backends.registry import AttentionBackendEnum
|
|
18
|
+
from vllm.config.multimodal import MMCacheType, MMEncoderTPMode, MultiModalConfig
|
|
19
|
+
from vllm.config.pooler import PoolerConfig
|
|
20
|
+
from vllm.config.scheduler import RunnerType
|
|
21
|
+
from vllm.config.utils import config, getattr_iter
|
|
22
|
+
from vllm.logger import init_logger
|
|
23
|
+
from vllm.platforms import current_platform
|
|
24
|
+
from vllm.transformers_utils.config import (
|
|
25
|
+
ConfigFormat,
|
|
26
|
+
get_config,
|
|
27
|
+
get_hf_image_processor_config,
|
|
28
|
+
get_hf_text_config,
|
|
29
|
+
get_pooling_config,
|
|
30
|
+
get_sentence_transformer_tokenizer_config,
|
|
31
|
+
is_encoder_decoder,
|
|
32
|
+
try_get_dense_modules,
|
|
33
|
+
try_get_generation_config,
|
|
34
|
+
try_get_safetensors_metadata,
|
|
35
|
+
try_get_tokenizer_config,
|
|
36
|
+
uses_mrope,
|
|
37
|
+
uses_xdrope_dim,
|
|
38
|
+
)
|
|
39
|
+
from vllm.transformers_utils.gguf_utils import (
|
|
40
|
+
maybe_patch_hf_config_from_gguf,
|
|
41
|
+
)
|
|
42
|
+
from vllm.transformers_utils.runai_utils import ObjectStorageModel, is_runai_obj_uri
|
|
43
|
+
from vllm.transformers_utils.utils import (
|
|
44
|
+
is_gguf,
|
|
45
|
+
is_remote_gguf,
|
|
46
|
+
maybe_model_redirect,
|
|
47
|
+
split_remote_gguf,
|
|
48
|
+
)
|
|
49
|
+
from vllm.utils.import_utils import LazyLoader
|
|
50
|
+
from vllm.utils.torch_utils import common_broadcastable_dtype
|
|
51
|
+
|
|
52
|
+
if TYPE_CHECKING:
|
|
53
|
+
from transformers import PretrainedConfig
|
|
54
|
+
|
|
55
|
+
import vllm.model_executor.layers.quantization as me_quant
|
|
56
|
+
import vllm.model_executor.models as me_models
|
|
57
|
+
from vllm.config.load import LoadConfig
|
|
58
|
+
from vllm.config.parallel import ParallelConfig
|
|
59
|
+
from vllm.model_executor.layers.quantization import QuantizationMethods
|
|
60
|
+
from vllm.v1.sample.logits_processor import LogitsProcessor
|
|
61
|
+
else:
|
|
62
|
+
PretrainedConfig = Any
|
|
63
|
+
|
|
64
|
+
me_quant = LazyLoader(
|
|
65
|
+
"model_executor", globals(), "vllm.model_executor.layers.quantization"
|
|
66
|
+
)
|
|
67
|
+
me_models = LazyLoader("model_executor", globals(), "vllm.model_executor.models")
|
|
68
|
+
LoadConfig = Any
|
|
69
|
+
ParallelConfig = Any
|
|
70
|
+
QuantizationMethods = Any
|
|
71
|
+
LogitsProcessor = Any
|
|
72
|
+
|
|
73
|
+
logger = init_logger(__name__)
|
|
74
|
+
|
|
75
|
+
RunnerOption = Literal["auto", RunnerType]
|
|
76
|
+
ConvertType = Literal["none", "embed", "classify", "reward"]
|
|
77
|
+
ConvertOption = Literal["auto", ConvertType]
|
|
78
|
+
TaskOption = Literal[
|
|
79
|
+
"auto",
|
|
80
|
+
"generate",
|
|
81
|
+
"embedding",
|
|
82
|
+
"embed",
|
|
83
|
+
"classify",
|
|
84
|
+
"score",
|
|
85
|
+
"reward",
|
|
86
|
+
"transcription",
|
|
87
|
+
"draft",
|
|
88
|
+
]
|
|
89
|
+
TokenizerMode = Literal["auto", "hf", "slow", "mistral"]
|
|
90
|
+
ModelDType = Literal["auto", "half", "float16", "bfloat16", "float", "float32"]
|
|
91
|
+
LogprobsMode = Literal[
|
|
92
|
+
"raw_logits", "raw_logprobs", "processed_logits", "processed_logprobs"
|
|
93
|
+
]
|
|
94
|
+
HfOverrides = dict[str, Any] | Callable[[PretrainedConfig], PretrainedConfig]
|
|
95
|
+
ModelImpl = Literal["auto", "vllm", "transformers", "terratorch"]
|
|
96
|
+
LayerBlockType = Literal["attention", "linear_attention", "mamba"]
|
|
97
|
+
|
|
98
|
+
_RUNNER_TASKS: dict[RunnerType, list[TaskOption]] = {
|
|
99
|
+
"generate": ["generate", "transcription"],
|
|
100
|
+
"pooling": ["embedding", "embed", "classify", "score", "reward"],
|
|
101
|
+
"draft": ["draft"],
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
_RUNNER_CONVERTS: dict[RunnerType, list[ConvertType]] = {
|
|
105
|
+
"generate": [],
|
|
106
|
+
"pooling": ["embed", "classify", "reward"],
|
|
107
|
+
"draft": [],
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
AttnTypeStr = Literal[
|
|
111
|
+
"decoder", "encoder", "encoder_only", "encoder_decoder", "attention_free", "hybrid"
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@config
|
|
116
|
+
@dataclass(config=ConfigDict(arbitrary_types_allowed=True))
|
|
117
|
+
class ModelConfig:
|
|
118
|
+
"""Configuration for the model."""
|
|
119
|
+
|
|
120
|
+
model: str = "Qwen/Qwen3-0.6B"
|
|
121
|
+
"""Name or path of the Hugging Face model to use. It is also used as the
|
|
122
|
+
content for `model_name` tag in metrics output when `served_model_name` is
|
|
123
|
+
not specified."""
|
|
124
|
+
runner: RunnerOption = "auto"
|
|
125
|
+
"""The type of model runner to use. Each vLLM instance only supports one
|
|
126
|
+
model runner, even if the same model can be used for multiple types."""
|
|
127
|
+
convert: ConvertOption = "auto"
|
|
128
|
+
"""Convert the model using adapters defined in
|
|
129
|
+
[vllm.model_executor.models.adapters][]. The most common use case is to
|
|
130
|
+
adapt a text generation model to be used for pooling tasks."""
|
|
131
|
+
task: TaskOption | None = None
|
|
132
|
+
"""[DEPRECATED] The task to use the model for. If the model supports more
|
|
133
|
+
than one model runner, this is used to select which model runner to run.
|
|
134
|
+
|
|
135
|
+
Note that the model may support other tasks using the same model runner.
|
|
136
|
+
"""
|
|
137
|
+
tokenizer: SkipValidation[str] = None # type: ignore
|
|
138
|
+
"""Name or path of the Hugging Face tokenizer to use. If unspecified, model
|
|
139
|
+
name or path will be used."""
|
|
140
|
+
tokenizer_mode: TokenizerMode | str = "auto"
|
|
141
|
+
"""Tokenizer mode:\n
|
|
142
|
+
- "auto" will use "hf" tokenizer if Mistral's tokenizer is not available.\n
|
|
143
|
+
- "hf" will use the fast tokenizer if available.\n
|
|
144
|
+
- "slow" will always use the slow tokenizer.\n
|
|
145
|
+
- "mistral" will always use the tokenizer from `mistral_common`.\n
|
|
146
|
+
- Other custom values can be supported via plugins."""
|
|
147
|
+
trust_remote_code: bool = False
|
|
148
|
+
"""Trust remote code (e.g., from HuggingFace) when downloading the model
|
|
149
|
+
and tokenizer."""
|
|
150
|
+
dtype: ModelDType | torch.dtype = "auto"
|
|
151
|
+
"""Data type for model weights and activations:\n
|
|
152
|
+
- "auto" will use FP16 precision for FP32 and FP16 models, and BF16
|
|
153
|
+
precision for BF16 models.\n
|
|
154
|
+
- "half" for FP16. Recommended for AWQ quantization.\n
|
|
155
|
+
- "float16" is the same as "half".\n
|
|
156
|
+
- "bfloat16" for a balance between precision and range.\n
|
|
157
|
+
- "float" is shorthand for FP32 precision.\n
|
|
158
|
+
- "float32" for FP32 precision."""
|
|
159
|
+
seed: int = 0
|
|
160
|
+
"""Random seed for reproducibility.
|
|
161
|
+
|
|
162
|
+
We must set the global seed because otherwise,
|
|
163
|
+
different tensor parallel workers would sample different tokens,
|
|
164
|
+
leading to inconsistent results."""
|
|
165
|
+
hf_config: PretrainedConfig = field(init=False)
|
|
166
|
+
"""The Hugging Face config of the model."""
|
|
167
|
+
hf_text_config: PretrainedConfig = field(init=False)
|
|
168
|
+
"""The Hugging Face config of the text model (same as hf_config for text models)."""
|
|
169
|
+
hf_config_path: str | None = None
|
|
170
|
+
"""Name or path of the Hugging Face config to use. If unspecified, model
|
|
171
|
+
name or path will be used."""
|
|
172
|
+
allowed_local_media_path: str = ""
|
|
173
|
+
"""Allowing API requests to read local images or videos from directories
|
|
174
|
+
specified by the server file system. This is a security risk. Should only
|
|
175
|
+
be enabled in trusted environments."""
|
|
176
|
+
allowed_media_domains: list[str] | None = None
|
|
177
|
+
"""If set, only media URLs that belong to this domain can be used for
|
|
178
|
+
multi-modal inputs. """
|
|
179
|
+
revision: str | None = None
|
|
180
|
+
"""The specific model version to use. It can be a branch name, a tag name,
|
|
181
|
+
or a commit id. If unspecified, will use the default version."""
|
|
182
|
+
code_revision: str | None = None
|
|
183
|
+
"""The specific revision to use for the model code on the Hugging Face Hub.
|
|
184
|
+
It can be a branch name, a tag name, or a commit id. If unspecified, will
|
|
185
|
+
use the default version."""
|
|
186
|
+
tokenizer_revision: str | None = None
|
|
187
|
+
"""The specific revision to use for the tokenizer on the Hugging Face Hub.
|
|
188
|
+
It can be a branch name, a tag name, or a commit id. If unspecified, will
|
|
189
|
+
use the default version."""
|
|
190
|
+
max_model_len: SkipValidation[int] = None # type: ignore
|
|
191
|
+
"""Model context length (prompt and output). If unspecified, will be
|
|
192
|
+
automatically derived from the model config.
|
|
193
|
+
|
|
194
|
+
When passing via `--max-model-len`, supports k/m/g/K/M/G in human-readable
|
|
195
|
+
format. Examples:\n
|
|
196
|
+
- 1k -> 1000\n
|
|
197
|
+
- 1K -> 1024\n
|
|
198
|
+
- 25.6k -> 25,600"""
|
|
199
|
+
spec_target_max_model_len: int | None = None
|
|
200
|
+
"""Specify the maximum length for spec decoding draft models."""
|
|
201
|
+
quantization: SkipValidation[QuantizationMethods | None] = None
|
|
202
|
+
"""Method used to quantize the weights. If `None`, we first check the
|
|
203
|
+
`quantization_config` attribute in the model config file. If that is
|
|
204
|
+
`None`, we assume the model weights are not quantized and use `dtype` to
|
|
205
|
+
determine the data type of the weights."""
|
|
206
|
+
enforce_eager: bool = False
|
|
207
|
+
"""Whether to always use eager-mode PyTorch. If True, we will disable CUDA
|
|
208
|
+
graph and always execute the model in eager mode. If False, we will use
|
|
209
|
+
CUDA graph and eager execution in hybrid for maximal performance and
|
|
210
|
+
flexibility."""
|
|
211
|
+
max_logprobs: int = 20
|
|
212
|
+
"""Maximum number of log probabilities to return when `logprobs` is
|
|
213
|
+
specified in `SamplingParams`. The default value comes the default for the
|
|
214
|
+
OpenAI Chat Completions API. -1 means no cap, i.e. all (output_length *
|
|
215
|
+
vocab_size) logprobs are allowed to be returned and it may cause OOM."""
|
|
216
|
+
logprobs_mode: LogprobsMode = "raw_logprobs"
|
|
217
|
+
"""Indicates the content returned in the logprobs and prompt_logprobs.
|
|
218
|
+
Supported mode:
|
|
219
|
+
1) raw_logprobs, 2) processed_logprobs, 3) raw_logits, 4) processed_logits.
|
|
220
|
+
Raw means the values before applying any logit processors, like bad words.
|
|
221
|
+
Processed means the values after applying all processors, including
|
|
222
|
+
temperature and top_k/top_p.
|
|
223
|
+
"""
|
|
224
|
+
disable_sliding_window: bool = False
|
|
225
|
+
"""Whether to disable sliding window. If True, we will disable the sliding
|
|
226
|
+
window functionality of the model, capping to sliding window size. If the
|
|
227
|
+
model does not support sliding window, this argument is ignored."""
|
|
228
|
+
disable_cascade_attn: bool = False
|
|
229
|
+
"""Disable cascade attention for V1. While cascade attention does not
|
|
230
|
+
change the mathematical correctness, disabling it could be useful for
|
|
231
|
+
preventing potential numerical issues. Note that even if this is set to
|
|
232
|
+
False, cascade attention will be only used when the heuristic tells that
|
|
233
|
+
it's beneficial."""
|
|
234
|
+
skip_tokenizer_init: bool = False
|
|
235
|
+
"""Skip initialization of tokenizer and detokenizer. Expects valid
|
|
236
|
+
`prompt_token_ids` and `None` for prompt from the input. The generated
|
|
237
|
+
output will contain token ids."""
|
|
238
|
+
enable_prompt_embeds: bool = False
|
|
239
|
+
"""If `True`, enables passing text embeddings as inputs via the
|
|
240
|
+
`prompt_embeds` key.
|
|
241
|
+
|
|
242
|
+
WARNING: The vLLM engine may crash if incorrect shape of embeddings is passed.
|
|
243
|
+
Only enable this flag for trusted users!"""
|
|
244
|
+
served_model_name: str | list[str] | None = None
|
|
245
|
+
"""The model name(s) used in the API. If multiple names are provided, the
|
|
246
|
+
server will respond to any of the provided names. The model name in the
|
|
247
|
+
model field of a response will be the first name in this list. If not
|
|
248
|
+
specified, the model name will be the same as the `--model` argument. Noted
|
|
249
|
+
that this name(s) will also be used in `model_name` tag content of
|
|
250
|
+
prometheus metrics, if multiple names provided, metrics tag will take the
|
|
251
|
+
first one."""
|
|
252
|
+
config_format: str | ConfigFormat = "auto"
|
|
253
|
+
"""The format of the model config to load:\n
|
|
254
|
+
- "auto" will try to load the config in hf format if available after trying
|
|
255
|
+
to load in mistral format.\n
|
|
256
|
+
- "hf" will load the config in hf format.\n
|
|
257
|
+
- "mistral" will load the config in mistral format."""
|
|
258
|
+
hf_token: bool | str | None = None
|
|
259
|
+
"""The token to use as HTTP bearer authorization for remote files . If
|
|
260
|
+
`True`, will use the token generated when running `huggingface-cli login`
|
|
261
|
+
(stored in `~/.huggingface`)."""
|
|
262
|
+
hf_overrides: HfOverrides = field(default_factory=dict)
|
|
263
|
+
"""If a dictionary, contains arguments to be forwarded to the Hugging Face
|
|
264
|
+
config. If a callable, it is called to update the HuggingFace config."""
|
|
265
|
+
logits_processor_pattern: str | None = None
|
|
266
|
+
"""Optional regex pattern specifying valid logits processor qualified names
|
|
267
|
+
that can be passed with the `logits_processors` extra completion argument.
|
|
268
|
+
Defaults to `None`, which allows no processors."""
|
|
269
|
+
generation_config: str = "auto"
|
|
270
|
+
"""The folder path to the generation config. Defaults to `"auto"`, the
|
|
271
|
+
generation config will be loaded from model path. If set to `"vllm"`, no
|
|
272
|
+
generation config is loaded, vLLM defaults will be used. If set to a folder
|
|
273
|
+
path, the generation config will be loaded from the specified folder path.
|
|
274
|
+
If `max_new_tokens` is specified in generation config, then it sets a
|
|
275
|
+
server-wide limit on the number of output tokens for all requests."""
|
|
276
|
+
override_generation_config: dict[str, Any] = field(default_factory=dict)
|
|
277
|
+
"""Overrides or sets generation config. e.g. `{"temperature": 0.5}`. If
|
|
278
|
+
used with `--generation-config auto`, the override parameters will be
|
|
279
|
+
merged with the default config from the model. If used with
|
|
280
|
+
`--generation-config vllm`, only the override parameters are used."""
|
|
281
|
+
enable_sleep_mode: bool = False
|
|
282
|
+
"""Enable sleep mode for the engine (only cuda and
|
|
283
|
+
hip platforms are supported)."""
|
|
284
|
+
model_impl: str | ModelImpl = "auto"
|
|
285
|
+
"""Which implementation of the model to use:\n
|
|
286
|
+
- "auto" will try to use the vLLM implementation, if it exists, and fall
|
|
287
|
+
back to the Transformers implementation if no vLLM implementation is
|
|
288
|
+
available.\n
|
|
289
|
+
- "vllm" will use the vLLM model implementation.\n
|
|
290
|
+
- "transformers" will use the Transformers model implementation.\n
|
|
291
|
+
- "terratorch" will use the TerraTorch model implementation.
|
|
292
|
+
"""
|
|
293
|
+
override_attention_dtype: str | None = None
|
|
294
|
+
"""Override dtype for attention"""
|
|
295
|
+
logits_processors: list[str | type[LogitsProcessor]] | None = None
|
|
296
|
+
"""One or more logits processors' fully-qualified class names or class
|
|
297
|
+
definitions"""
|
|
298
|
+
io_processor_plugin: str | None = None
|
|
299
|
+
"""IOProcessor plugin name to load at model startup"""
|
|
300
|
+
|
|
301
|
+
# Pooler config
|
|
302
|
+
pooler_config: PoolerConfig | None = None
|
|
303
|
+
"""Pooler config which controls the behaviour of output pooling in pooling
|
|
304
|
+
models."""
|
|
305
|
+
|
|
306
|
+
# Multimodal config and init vars
|
|
307
|
+
multimodal_config: MultiModalConfig | None = None
|
|
308
|
+
"""Configuration for multimodal model. If `None`, this will be inferred
|
|
309
|
+
from the architecture of `self.model`."""
|
|
310
|
+
limit_mm_per_prompt: InitVar[dict[str, int | dict[str, int]] | None] = None
|
|
311
|
+
enable_mm_embeds: InitVar[bool | None] = None
|
|
312
|
+
media_io_kwargs: InitVar[dict[str, dict[str, Any]] | None] = None
|
|
313
|
+
mm_processor_kwargs: InitVar[dict[str, Any] | None] = None
|
|
314
|
+
mm_processor_cache_gb: InitVar[float | None] = None
|
|
315
|
+
mm_processor_cache_type: InitVar[MMCacheType | None] = None
|
|
316
|
+
mm_shm_cache_max_object_size_mb: InitVar[int | None] = None
|
|
317
|
+
mm_encoder_tp_mode: InitVar[MMEncoderTPMode | None] = None
|
|
318
|
+
mm_encoder_attn_backend: InitVar[AttentionBackendEnum | str | None] = None
|
|
319
|
+
interleave_mm_strings: InitVar[bool | None] = None
|
|
320
|
+
skip_mm_profiling: InitVar[bool | None] = None
|
|
321
|
+
video_pruning_rate: InitVar[float | None] = None
|
|
322
|
+
|
|
323
|
+
def compute_hash(self) -> str:
|
|
324
|
+
"""
|
|
325
|
+
WARNING: Whenever a new field is added to this config,
|
|
326
|
+
ensure that it is included in the factors list if
|
|
327
|
+
it affects the computation graph.
|
|
328
|
+
|
|
329
|
+
Provide a hash that uniquely identifies all the configs
|
|
330
|
+
that affect the structure of the computation
|
|
331
|
+
graph from input ids/embeddings to the final hidden states,
|
|
332
|
+
excluding anything before input ids/embeddings and after
|
|
333
|
+
the final hidden states.
|
|
334
|
+
"""
|
|
335
|
+
ignored_factors = {
|
|
336
|
+
"runner",
|
|
337
|
+
"convert",
|
|
338
|
+
"task",
|
|
339
|
+
"tokenizer",
|
|
340
|
+
"tokenizer_mode",
|
|
341
|
+
"seed",
|
|
342
|
+
"hf_config_path",
|
|
343
|
+
"allowed_local_media_path",
|
|
344
|
+
"allowed_media_domains",
|
|
345
|
+
"tokenizer_revision",
|
|
346
|
+
"spec_target_max_model_len",
|
|
347
|
+
"enforce_eager",
|
|
348
|
+
"logprobs_mode",
|
|
349
|
+
"disable_cascade_attn",
|
|
350
|
+
"skip_tokenizer_init",
|
|
351
|
+
"served_model_name",
|
|
352
|
+
"config_format",
|
|
353
|
+
"hf_token",
|
|
354
|
+
"hf_overrides",
|
|
355
|
+
"logits_processor_pattern",
|
|
356
|
+
"override_attention_dtype",
|
|
357
|
+
"logits_processors",
|
|
358
|
+
"io_processor_plugin",
|
|
359
|
+
"pooler_config",
|
|
360
|
+
"multimodal_config",
|
|
361
|
+
"limit_mm_per_prompt",
|
|
362
|
+
"media_io_kwargs",
|
|
363
|
+
"mm_processor_kwargs",
|
|
364
|
+
"mm_processor_cache_gb",
|
|
365
|
+
"mm_processor_cache_type",
|
|
366
|
+
"mm_shm_cache_max_object_size_mb",
|
|
367
|
+
"mm_encoder_tp_mode",
|
|
368
|
+
"interleave_mm_strings",
|
|
369
|
+
"skip_mm_profiling",
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
from vllm.config.utils import get_hash_factors, hash_factors
|
|
373
|
+
|
|
374
|
+
factors = get_hash_factors(self, ignored_factors)
|
|
375
|
+
return hash_factors(factors)
|
|
376
|
+
|
|
377
|
+
def _update_nested(
|
|
378
|
+
self,
|
|
379
|
+
target: PretrainedConfig | dict[str, Any],
|
|
380
|
+
updates: dict[str, Any],
|
|
381
|
+
) -> None:
|
|
382
|
+
"""Recursively updates a config or dict with nested updates."""
|
|
383
|
+
for key, value in updates.items():
|
|
384
|
+
if isinstance(value, dict):
|
|
385
|
+
# Get the nested target
|
|
386
|
+
if isinstance(target, dict):
|
|
387
|
+
nested_target = target.get(key)
|
|
388
|
+
else:
|
|
389
|
+
nested_target = getattr(target, key, None)
|
|
390
|
+
|
|
391
|
+
# If nested target exists and can be updated recursively
|
|
392
|
+
if nested_target is not None and (
|
|
393
|
+
isinstance(nested_target, dict)
|
|
394
|
+
or hasattr(nested_target, "__dict__")
|
|
395
|
+
):
|
|
396
|
+
self._update_nested(nested_target, value)
|
|
397
|
+
continue
|
|
398
|
+
|
|
399
|
+
# Set the value (base case)
|
|
400
|
+
if isinstance(target, dict):
|
|
401
|
+
target[key] = value
|
|
402
|
+
else:
|
|
403
|
+
setattr(target, key, value)
|
|
404
|
+
|
|
405
|
+
def _apply_dict_overrides(
|
|
406
|
+
self,
|
|
407
|
+
config: PretrainedConfig,
|
|
408
|
+
overrides: dict[str, Any],
|
|
409
|
+
) -> None:
|
|
410
|
+
"""Apply dict overrides, handling both nested configs and dict values."""
|
|
411
|
+
from transformers import PretrainedConfig
|
|
412
|
+
|
|
413
|
+
for key, value in overrides.items():
|
|
414
|
+
attr = getattr(config, key, None)
|
|
415
|
+
if attr is not None and isinstance(attr, PretrainedConfig):
|
|
416
|
+
# It's a nested config - recursively update it
|
|
417
|
+
self._update_nested(attr, value)
|
|
418
|
+
else:
|
|
419
|
+
# It's a dict-valued parameter - set it directly
|
|
420
|
+
setattr(config, key, value)
|
|
421
|
+
|
|
422
|
+
def __post_init__(
|
|
423
|
+
self,
|
|
424
|
+
# Multimodal config init vars
|
|
425
|
+
limit_mm_per_prompt: dict[str, int | dict[str, int]] | None,
|
|
426
|
+
enable_mm_embeds: bool | None,
|
|
427
|
+
media_io_kwargs: dict[str, dict[str, Any]] | None,
|
|
428
|
+
mm_processor_kwargs: dict[str, Any] | None,
|
|
429
|
+
mm_processor_cache_gb: float | None,
|
|
430
|
+
mm_processor_cache_type: MMCacheType | None,
|
|
431
|
+
mm_shm_cache_max_object_size_mb: int | None,
|
|
432
|
+
mm_encoder_tp_mode: MMEncoderTPMode | None,
|
|
433
|
+
mm_encoder_attn_backend: AttentionBackendEnum | str | None,
|
|
434
|
+
interleave_mm_strings: bool | None,
|
|
435
|
+
skip_mm_profiling: bool | None,
|
|
436
|
+
video_pruning_rate: float | None,
|
|
437
|
+
) -> None:
|
|
438
|
+
# Keep set served_model_name before maybe_model_redirect(self.model)
|
|
439
|
+
self.served_model_name = get_served_model_name(
|
|
440
|
+
self.model, self.served_model_name
|
|
441
|
+
)
|
|
442
|
+
self.model = maybe_model_redirect(self.model)
|
|
443
|
+
# The tokenizer is consistent with the model by default.
|
|
444
|
+
if self.tokenizer is None:
|
|
445
|
+
self.tokenizer = self.model
|
|
446
|
+
if self.tokenizer_revision is None:
|
|
447
|
+
self.tokenizer_revision = self.revision
|
|
448
|
+
self.tokenizer = maybe_model_redirect(self.tokenizer)
|
|
449
|
+
|
|
450
|
+
if isinstance(self.hf_config_path, str):
|
|
451
|
+
self.hf_config_path = maybe_model_redirect(self.hf_config_path)
|
|
452
|
+
|
|
453
|
+
if callable(self.hf_overrides):
|
|
454
|
+
hf_overrides_kw = {}
|
|
455
|
+
hf_overrides_fn = self.hf_overrides
|
|
456
|
+
dict_overrides: dict[str, Any] = {}
|
|
457
|
+
else:
|
|
458
|
+
# Separate dict overrides from flat ones
|
|
459
|
+
# We'll determine how to apply dict overrides after loading the config
|
|
460
|
+
hf_overrides_kw = {}
|
|
461
|
+
dict_overrides = {}
|
|
462
|
+
for key, value in self.hf_overrides.items():
|
|
463
|
+
if isinstance(value, dict):
|
|
464
|
+
dict_overrides[key] = value
|
|
465
|
+
else:
|
|
466
|
+
hf_overrides_kw[key] = value
|
|
467
|
+
hf_overrides_fn = None
|
|
468
|
+
|
|
469
|
+
self.maybe_pull_model_tokenizer_for_runai(self.model, self.tokenizer)
|
|
470
|
+
|
|
471
|
+
if (
|
|
472
|
+
(backend := envs.VLLM_ATTENTION_BACKEND)
|
|
473
|
+
and backend == "FLASHINFER"
|
|
474
|
+
and find_spec("flashinfer") is None
|
|
475
|
+
):
|
|
476
|
+
raise ValueError(
|
|
477
|
+
"VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
|
|
478
|
+
"module was not found. See "
|
|
479
|
+
"https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile " # noqa: E501
|
|
480
|
+
"for instructions on how to install it."
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
from vllm.platforms import current_platform
|
|
484
|
+
|
|
485
|
+
if self.override_attention_dtype is not None and not current_platform.is_rocm():
|
|
486
|
+
warnings.warn(
|
|
487
|
+
"override-attention-dtype is set but not using ROCm platform",
|
|
488
|
+
stacklevel=2,
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
if self.enable_sleep_mode and not current_platform.is_sleep_mode_available():
|
|
492
|
+
raise ValueError("Sleep mode is not supported on current platform.")
|
|
493
|
+
|
|
494
|
+
hf_config = get_config(
|
|
495
|
+
self.hf_config_path or self.model,
|
|
496
|
+
self.trust_remote_code,
|
|
497
|
+
self.revision,
|
|
498
|
+
self.code_revision,
|
|
499
|
+
self.config_format,
|
|
500
|
+
hf_overrides_kw=hf_overrides_kw,
|
|
501
|
+
hf_overrides_fn=hf_overrides_fn,
|
|
502
|
+
)
|
|
503
|
+
hf_config = maybe_patch_hf_config_from_gguf(
|
|
504
|
+
self.model,
|
|
505
|
+
hf_config,
|
|
506
|
+
)
|
|
507
|
+
|
|
508
|
+
self.hf_config = hf_config
|
|
509
|
+
if dict_overrides:
|
|
510
|
+
self._apply_dict_overrides(hf_config, dict_overrides)
|
|
511
|
+
self.hf_text_config = get_hf_text_config(self.hf_config)
|
|
512
|
+
self.attention_chunk_size = getattr(
|
|
513
|
+
self.hf_text_config, "attention_chunk_size", None
|
|
514
|
+
)
|
|
515
|
+
self.encoder_config = self._get_encoder_config()
|
|
516
|
+
self.hf_image_processor_config = get_hf_image_processor_config(
|
|
517
|
+
self.model, hf_token=self.hf_token, revision=self.revision
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
architectures = self.architectures
|
|
521
|
+
registry = self.registry
|
|
522
|
+
is_generative_model = registry.is_text_generation_model(architectures, self)
|
|
523
|
+
is_pooling_model = registry.is_pooling_model(architectures, self)
|
|
524
|
+
|
|
525
|
+
def _task_to_convert(task: TaskOption) -> ConvertType:
|
|
526
|
+
if task == "embedding" or task == "embed":
|
|
527
|
+
return "embed"
|
|
528
|
+
if task == "classify":
|
|
529
|
+
return "classify"
|
|
530
|
+
if task == "reward":
|
|
531
|
+
return "reward"
|
|
532
|
+
if task == "score":
|
|
533
|
+
new_task = self._get_default_pooling_task(architectures)
|
|
534
|
+
return "classify" if new_task == "classify" else "embed"
|
|
535
|
+
|
|
536
|
+
return "none"
|
|
537
|
+
|
|
538
|
+
if self.task is not None:
|
|
539
|
+
runner: RunnerOption = "auto"
|
|
540
|
+
convert: ConvertOption = "auto"
|
|
541
|
+
msg_prefix = (
|
|
542
|
+
"The 'task' option has been deprecated and will be "
|
|
543
|
+
"removed in v0.13.0 or v1.0, whichever comes first."
|
|
544
|
+
)
|
|
545
|
+
msg_hint = "Please remove this option."
|
|
546
|
+
|
|
547
|
+
is_generative_task = self.task in _RUNNER_TASKS["generate"]
|
|
548
|
+
is_pooling_task = self.task in _RUNNER_TASKS["pooling"]
|
|
549
|
+
|
|
550
|
+
if is_generative_model and is_pooling_model:
|
|
551
|
+
if is_generative_task:
|
|
552
|
+
runner = "generate"
|
|
553
|
+
convert = "auto"
|
|
554
|
+
msg_hint = (
|
|
555
|
+
"Please replace this option with `--runner "
|
|
556
|
+
"generate` to continue using this model "
|
|
557
|
+
"as a generative model."
|
|
558
|
+
)
|
|
559
|
+
elif is_pooling_task:
|
|
560
|
+
runner = "pooling"
|
|
561
|
+
convert = "auto"
|
|
562
|
+
msg_hint = (
|
|
563
|
+
"Please replace this option with `--runner "
|
|
564
|
+
"pooling` to continue using this model "
|
|
565
|
+
"as a pooling model."
|
|
566
|
+
)
|
|
567
|
+
else: # task == "auto"
|
|
568
|
+
pass
|
|
569
|
+
elif is_generative_model or is_pooling_model:
|
|
570
|
+
if is_generative_task:
|
|
571
|
+
runner = "generate"
|
|
572
|
+
convert = "auto"
|
|
573
|
+
msg_hint = "Please remove this option"
|
|
574
|
+
elif is_pooling_task:
|
|
575
|
+
runner = "pooling"
|
|
576
|
+
convert = _task_to_convert(self.task)
|
|
577
|
+
msg_hint = (
|
|
578
|
+
"Please replace this option with `--convert "
|
|
579
|
+
f"{convert}` to continue using this model "
|
|
580
|
+
"as a pooling model."
|
|
581
|
+
)
|
|
582
|
+
else: # task == "auto"
|
|
583
|
+
pass
|
|
584
|
+
else:
|
|
585
|
+
# Neither generative nor pooling model - try to convert if possible
|
|
586
|
+
if is_pooling_task:
|
|
587
|
+
runner = "pooling"
|
|
588
|
+
convert = _task_to_convert(self.task)
|
|
589
|
+
msg_hint = (
|
|
590
|
+
"Please replace this option with `--runner pooling "
|
|
591
|
+
f"--convert {convert}` to continue using this model "
|
|
592
|
+
"as a pooling model."
|
|
593
|
+
)
|
|
594
|
+
else:
|
|
595
|
+
debug_info = {
|
|
596
|
+
"architectures": architectures,
|
|
597
|
+
"is_generative_model": is_generative_model,
|
|
598
|
+
"is_pooling_model": is_pooling_model,
|
|
599
|
+
}
|
|
600
|
+
raise AssertionError(
|
|
601
|
+
"The model should be a generative or "
|
|
602
|
+
"pooling model when task is set to "
|
|
603
|
+
f"{self.task!r}. Found: {debug_info}"
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
self.runner = runner
|
|
607
|
+
self.convert = convert
|
|
608
|
+
|
|
609
|
+
msg = f"{msg_prefix} {msg_hint}"
|
|
610
|
+
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
|
611
|
+
|
|
612
|
+
self.runner_type = self._get_runner_type(architectures, self.runner)
|
|
613
|
+
self.convert_type = self._get_convert_type(
|
|
614
|
+
architectures, self.runner_type, self.convert
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
if self.runner_type == "generate" and not is_generative_model:
|
|
618
|
+
generate_converts = _RUNNER_CONVERTS["generate"]
|
|
619
|
+
if self.convert_type not in generate_converts:
|
|
620
|
+
# Currently we don't have any converters for generative models
|
|
621
|
+
raise ValueError("This model does not support `--runner generate`.")
|
|
622
|
+
if self.runner_type == "pooling" and not is_pooling_model:
|
|
623
|
+
pooling_converts = _RUNNER_CONVERTS["pooling"]
|
|
624
|
+
if self.convert_type not in pooling_converts:
|
|
625
|
+
convert_option = "<" + "|".join(pooling_converts) + ">"
|
|
626
|
+
raise ValueError(
|
|
627
|
+
"This model does not support `--runner pooling`. "
|
|
628
|
+
f"You can pass `--convert {convert_option} to adapt "
|
|
629
|
+
"it into a pooling model."
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
# Note: Initialize these attributes early because transformers fallback
|
|
633
|
+
# may fail to load dynamic modules in child processes
|
|
634
|
+
model_info, arch = registry.inspect_model_cls(architectures, self)
|
|
635
|
+
self._model_info = model_info
|
|
636
|
+
self._architecture = arch
|
|
637
|
+
logger.info("Resolved architecture: %s", arch)
|
|
638
|
+
|
|
639
|
+
# Init pooler config if needed
|
|
640
|
+
if self.runner_type == "pooling":
|
|
641
|
+
if self.pooler_config is None:
|
|
642
|
+
self.pooler_config = PoolerConfig()
|
|
643
|
+
|
|
644
|
+
base_config = get_pooling_config(self.model, self.revision)
|
|
645
|
+
if base_config is not None:
|
|
646
|
+
# Only set values that are not overridden by the user
|
|
647
|
+
for k, v in base_config.items():
|
|
648
|
+
if getattr(self.pooler_config, k) is None:
|
|
649
|
+
setattr(self.pooler_config, k, v)
|
|
650
|
+
|
|
651
|
+
default_pooling_type = self._model_info.default_pooling_type
|
|
652
|
+
if self.pooler_config.pooling_type is None:
|
|
653
|
+
self.pooler_config.pooling_type = default_pooling_type
|
|
654
|
+
|
|
655
|
+
self.dtype: torch.dtype = _get_and_verify_dtype(
|
|
656
|
+
self.model,
|
|
657
|
+
self.hf_config,
|
|
658
|
+
self.dtype,
|
|
659
|
+
is_pooling_model=self.runner_type == "pooling",
|
|
660
|
+
revision=self.revision,
|
|
661
|
+
)
|
|
662
|
+
|
|
663
|
+
self.original_max_model_len = self.max_model_len
|
|
664
|
+
self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
|
|
665
|
+
# Init multimodal config if needed
|
|
666
|
+
if self._model_info.supports_multimodal:
|
|
667
|
+
if (
|
|
668
|
+
mm_encoder_tp_mode == "data"
|
|
669
|
+
and not self._model_info.supports_multimodal_encoder_tp_data
|
|
670
|
+
):
|
|
671
|
+
logger.warning_once(
|
|
672
|
+
"This model does not support `--mm-encoder-tp-mode data`. "
|
|
673
|
+
"Falling back to `--mm-encoder-tp-mode weights`."
|
|
674
|
+
)
|
|
675
|
+
mm_encoder_tp_mode = "weights"
|
|
676
|
+
|
|
677
|
+
mm_config_kwargs = dict(
|
|
678
|
+
limit_per_prompt=limit_mm_per_prompt,
|
|
679
|
+
enable_mm_embeds=enable_mm_embeds,
|
|
680
|
+
media_io_kwargs=media_io_kwargs,
|
|
681
|
+
mm_processor_kwargs=mm_processor_kwargs,
|
|
682
|
+
mm_processor_cache_gb=mm_processor_cache_gb,
|
|
683
|
+
mm_processor_cache_type=mm_processor_cache_type,
|
|
684
|
+
mm_shm_cache_max_object_size_mb=mm_shm_cache_max_object_size_mb,
|
|
685
|
+
mm_encoder_tp_mode=mm_encoder_tp_mode,
|
|
686
|
+
mm_encoder_attn_backend=mm_encoder_attn_backend,
|
|
687
|
+
interleave_mm_strings=interleave_mm_strings,
|
|
688
|
+
skip_mm_profiling=skip_mm_profiling,
|
|
689
|
+
video_pruning_rate=video_pruning_rate,
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
mm_config_kwargs = {
|
|
693
|
+
k: v for k, v in mm_config_kwargs.items() if v is not None
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
self.multimodal_config = MultiModalConfig(**mm_config_kwargs)
|
|
697
|
+
|
|
698
|
+
# Multimodal GGUF models must use original repo for mm processing
|
|
699
|
+
if is_gguf(self.tokenizer) and self.is_multimodal_model:
|
|
700
|
+
raise ValueError(
|
|
701
|
+
"Loading a multimodal GGUF model needs to use original "
|
|
702
|
+
"tokenizer. Please specify the unquantized hf model's "
|
|
703
|
+
"repo name or path using the --tokenizer argument."
|
|
704
|
+
)
|
|
705
|
+
|
|
706
|
+
if self.disable_sliding_window:
|
|
707
|
+
# Set after get_and_verify_max_len to ensure that max_model_len
|
|
708
|
+
# can be correctly capped to sliding window size
|
|
709
|
+
self.hf_text_config.sliding_window = None
|
|
710
|
+
|
|
711
|
+
# Avoid running try_verify_and_update_config multiple times
|
|
712
|
+
self.config_updated = False
|
|
713
|
+
|
|
714
|
+
self._verify_quantization()
|
|
715
|
+
self._verify_cuda_graph()
|
|
716
|
+
self._verify_bnb_config()
|
|
717
|
+
|
|
718
|
+
@field_validator("tokenizer_mode", mode="after")
|
|
719
|
+
def _lowercase_tokenizer_mode(cls, tokenizer_mode: str) -> str:
|
|
720
|
+
return tokenizer_mode.lower()
|
|
721
|
+
|
|
722
|
+
@field_validator("quantization", mode="before")
|
|
723
|
+
@classmethod
|
|
724
|
+
def validate_quantization_before(cls, value: Any) -> Any:
|
|
725
|
+
if isinstance(value, str):
|
|
726
|
+
return value.lower()
|
|
727
|
+
return value
|
|
728
|
+
|
|
729
|
+
@model_validator(mode="after")
|
|
730
|
+
def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
|
|
731
|
+
if not isinstance(self.tokenizer, str):
|
|
732
|
+
raise ValueError("tokenizer must be a string after __post_init__.")
|
|
733
|
+
if not isinstance(self.max_model_len, int):
|
|
734
|
+
raise ValueError("max_model_len must be an integer after __post_init__.")
|
|
735
|
+
return self
|
|
736
|
+
|
|
737
|
+
def _get_transformers_backend_cls(self) -> str:
|
|
738
|
+
"""Determine which Transformers modeling backend class will be used if
|
|
739
|
+
`model_impl` is set to `transformers` or `auto`."""
|
|
740
|
+
cls = "Transformers"
|
|
741
|
+
# If 'hf_config != hf_text_config' it's a nested config, i.e. multimodal
|
|
742
|
+
cls += "MultiModal" if self.hf_config != self.hf_text_config else ""
|
|
743
|
+
cls += "MoE" if self.get_num_experts() > 1 else ""
|
|
744
|
+
# Check if the architecture we're wrapping has defaults
|
|
745
|
+
runner = None
|
|
746
|
+
task = None
|
|
747
|
+
if defaults := try_match_architecture_defaults(self.architectures[0]):
|
|
748
|
+
_, (runner, task) = defaults
|
|
749
|
+
# User specified value take precedence
|
|
750
|
+
if self.runner != "auto":
|
|
751
|
+
runner = self.runner
|
|
752
|
+
# Only consider Transformers modeling backend pooling classes if we're wrapping
|
|
753
|
+
# an architecture that defaults to pooling. Otherwise, we return the LM class
|
|
754
|
+
# and use adapters.
|
|
755
|
+
if runner == "pooling" and task in {"embed", "classify"}:
|
|
756
|
+
if task == "embed":
|
|
757
|
+
cls += "EmbeddingModel"
|
|
758
|
+
elif task == "classify":
|
|
759
|
+
cls += "ForSequenceClassification"
|
|
760
|
+
else:
|
|
761
|
+
cls += "ForCausalLM"
|
|
762
|
+
return cls
|
|
763
|
+
|
|
764
|
+
def using_transformers_backend(self) -> bool:
|
|
765
|
+
"""Check if the model is using the Transformers modeling backend class."""
|
|
766
|
+
used_cls = self._model_info.architecture
|
|
767
|
+
transformers_backend_cls = self._get_transformers_backend_cls()
|
|
768
|
+
return used_cls == transformers_backend_cls
|
|
769
|
+
|
|
770
|
+
@property
|
|
771
|
+
def registry(self):
|
|
772
|
+
return me_models.ModelRegistry
|
|
773
|
+
|
|
774
|
+
@property
|
|
775
|
+
def architectures(self) -> list[str]:
|
|
776
|
+
return getattr(self.hf_config, "architectures", [])
|
|
777
|
+
|
|
778
|
+
@property
|
|
779
|
+
def architecture(self) -> str:
|
|
780
|
+
"""The architecture vllm actually used."""
|
|
781
|
+
return self._architecture
|
|
782
|
+
|
|
783
|
+
def maybe_pull_model_tokenizer_for_runai(self, model: str, tokenizer: str) -> None:
|
|
784
|
+
"""Pull model/tokenizer from Object Storage to temporary
|
|
785
|
+
directory when needed.
|
|
786
|
+
|
|
787
|
+
Args:
|
|
788
|
+
model: Model name or path
|
|
789
|
+
tokenizer: Tokenizer name or path
|
|
790
|
+
"""
|
|
791
|
+
|
|
792
|
+
if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
|
|
793
|
+
return
|
|
794
|
+
|
|
795
|
+
if is_runai_obj_uri(model):
|
|
796
|
+
object_storage_model = ObjectStorageModel(url=model)
|
|
797
|
+
object_storage_model.pull_files(
|
|
798
|
+
model, allow_pattern=["*.model", "*.py", "*.json"]
|
|
799
|
+
)
|
|
800
|
+
self.model_weights = model
|
|
801
|
+
self.model = object_storage_model.dir
|
|
802
|
+
|
|
803
|
+
# If tokenizer is same as model, download to same directory
|
|
804
|
+
if model == tokenizer:
|
|
805
|
+
object_storage_model.pull_files(
|
|
806
|
+
model,
|
|
807
|
+
ignore_pattern=[
|
|
808
|
+
"*.pt",
|
|
809
|
+
"*.safetensors",
|
|
810
|
+
"*.bin",
|
|
811
|
+
"*.tensors",
|
|
812
|
+
"*.pth",
|
|
813
|
+
],
|
|
814
|
+
)
|
|
815
|
+
self.tokenizer = object_storage_model.dir
|
|
816
|
+
return
|
|
817
|
+
|
|
818
|
+
# Only download tokenizer if needed and not already handled
|
|
819
|
+
if is_runai_obj_uri(tokenizer):
|
|
820
|
+
object_storage_tokenizer = ObjectStorageModel(url=tokenizer)
|
|
821
|
+
object_storage_tokenizer.pull_files(
|
|
822
|
+
model,
|
|
823
|
+
ignore_pattern=["*.pt", "*.safetensors", "*.bin", "*.tensors", "*.pth"],
|
|
824
|
+
)
|
|
825
|
+
self.tokenizer = object_storage_tokenizer.dir
|
|
826
|
+
|
|
827
|
+
def _get_encoder_config(self):
|
|
828
|
+
model = self.model
|
|
829
|
+
if is_remote_gguf(model):
|
|
830
|
+
model, _ = split_remote_gguf(model)
|
|
831
|
+
return get_sentence_transformer_tokenizer_config(model, self.revision)
|
|
832
|
+
|
|
833
|
+
def _get_default_runner_type(
|
|
834
|
+
self,
|
|
835
|
+
architectures: list[str],
|
|
836
|
+
) -> RunnerType:
|
|
837
|
+
registry = self.registry
|
|
838
|
+
|
|
839
|
+
# Some Sentence Transformers models use *ForCausalLM archs
|
|
840
|
+
if get_pooling_config(self.model, self.revision):
|
|
841
|
+
return "pooling"
|
|
842
|
+
|
|
843
|
+
for arch in architectures:
|
|
844
|
+
if arch in registry.get_supported_archs():
|
|
845
|
+
if registry.is_pooling_model(architectures, self):
|
|
846
|
+
return "pooling"
|
|
847
|
+
if registry.is_text_generation_model(architectures, self):
|
|
848
|
+
return "generate"
|
|
849
|
+
|
|
850
|
+
match = try_match_architecture_defaults(arch)
|
|
851
|
+
if match:
|
|
852
|
+
_, (runner_type, _) = match
|
|
853
|
+
return runner_type
|
|
854
|
+
|
|
855
|
+
return "generate"
|
|
856
|
+
|
|
857
|
+
def _get_runner_type(
|
|
858
|
+
self,
|
|
859
|
+
architectures: list[str],
|
|
860
|
+
runner: RunnerOption,
|
|
861
|
+
) -> RunnerType:
|
|
862
|
+
if runner != "auto":
|
|
863
|
+
return runner
|
|
864
|
+
|
|
865
|
+
runner_type = self._get_default_runner_type(architectures)
|
|
866
|
+
|
|
867
|
+
# Don't log the most common case
|
|
868
|
+
if runner_type != "generate":
|
|
869
|
+
logger.info(
|
|
870
|
+
"Resolved `--runner auto` to `--runner %s`. "
|
|
871
|
+
"Pass the value explicitly to silence this message.",
|
|
872
|
+
runner_type,
|
|
873
|
+
)
|
|
874
|
+
|
|
875
|
+
return runner_type
|
|
876
|
+
|
|
877
|
+
def _get_default_convert_type(
|
|
878
|
+
self,
|
|
879
|
+
architectures: list[str],
|
|
880
|
+
runner_type: RunnerType,
|
|
881
|
+
) -> ConvertType:
|
|
882
|
+
registry = self.registry
|
|
883
|
+
|
|
884
|
+
for arch in architectures:
|
|
885
|
+
if arch in registry.get_supported_archs():
|
|
886
|
+
if runner_type == "generate" and registry.is_text_generation_model(
|
|
887
|
+
architectures, self
|
|
888
|
+
):
|
|
889
|
+
return "none"
|
|
890
|
+
if runner_type == "pooling" and registry.is_pooling_model(
|
|
891
|
+
architectures, self
|
|
892
|
+
):
|
|
893
|
+
return "none"
|
|
894
|
+
|
|
895
|
+
match = try_match_architecture_defaults(arch, runner_type=runner_type)
|
|
896
|
+
if match:
|
|
897
|
+
_, (_, convert_type) = match
|
|
898
|
+
return convert_type
|
|
899
|
+
|
|
900
|
+
# This is to handle Sentence Transformers models that use *ForCausalLM
|
|
901
|
+
# and also multi-modal pooling models which are not defined as
|
|
902
|
+
# Sentence Transformers models
|
|
903
|
+
if runner_type == "pooling":
|
|
904
|
+
return "embed"
|
|
905
|
+
|
|
906
|
+
return "none"
|
|
907
|
+
|
|
908
|
+
def _get_convert_type(
|
|
909
|
+
self,
|
|
910
|
+
architectures: list[str],
|
|
911
|
+
runner_type: RunnerType,
|
|
912
|
+
convert: ConvertOption,
|
|
913
|
+
) -> ConvertType:
|
|
914
|
+
if convert != "auto":
|
|
915
|
+
return convert
|
|
916
|
+
|
|
917
|
+
convert_type = self._get_default_convert_type(architectures, runner_type)
|
|
918
|
+
|
|
919
|
+
# Don't log the most common case
|
|
920
|
+
if convert_type != "none":
|
|
921
|
+
logger.info(
|
|
922
|
+
"Resolved `--convert auto` to `--convert %s`. "
|
|
923
|
+
"Pass the value explicitly to silence this message.",
|
|
924
|
+
convert_type,
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
return convert_type
|
|
928
|
+
|
|
929
|
+
def _get_default_pooling_task(
|
|
930
|
+
self,
|
|
931
|
+
architectures: list[str],
|
|
932
|
+
) -> Literal["embed", "classify", "reward"]:
|
|
933
|
+
if self.registry.is_cross_encoder_model(architectures, self):
|
|
934
|
+
return "classify"
|
|
935
|
+
|
|
936
|
+
for arch in architectures:
|
|
937
|
+
match = try_match_architecture_defaults(arch, runner_type="pooling")
|
|
938
|
+
if match:
|
|
939
|
+
_, (_, convert_type) = match
|
|
940
|
+
assert convert_type != "none"
|
|
941
|
+
return convert_type
|
|
942
|
+
|
|
943
|
+
return "embed"
|
|
944
|
+
|
|
945
|
+
def _parse_quant_hf_config(self, hf_config: PretrainedConfig):
|
|
946
|
+
quant_cfg = getattr(hf_config, "quantization_config", None)
|
|
947
|
+
if quant_cfg is None:
|
|
948
|
+
# compressed-tensors uses a "compression_config" key
|
|
949
|
+
quant_cfg = getattr(hf_config, "compression_config", None)
|
|
950
|
+
|
|
951
|
+
else:
|
|
952
|
+
# Set quant_method for ModelOpt models.
|
|
953
|
+
producer_name = quant_cfg.get("producer", {}).get("name")
|
|
954
|
+
if producer_name == "modelopt":
|
|
955
|
+
quant_algo = quant_cfg.get("quantization", {}).get("quant_algo")
|
|
956
|
+
if quant_algo == "FP8":
|
|
957
|
+
quant_cfg["quant_method"] = "modelopt"
|
|
958
|
+
elif quant_algo == "NVFP4":
|
|
959
|
+
quant_cfg["quant_method"] = "modelopt_fp4"
|
|
960
|
+
elif quant_algo is not None:
|
|
961
|
+
raise ValueError(f"Unknown ModelOpt quant algo: {quant_algo}")
|
|
962
|
+
|
|
963
|
+
return quant_cfg
|
|
964
|
+
|
|
965
|
+
def _verify_quantization(self) -> None:
|
|
966
|
+
supported_quantization = me_quant.QUANTIZATION_METHODS
|
|
967
|
+
if self.quantization is not None:
|
|
968
|
+
self.quantization = cast(me_quant.QuantizationMethods, self.quantization)
|
|
969
|
+
|
|
970
|
+
# Parse quantization method from the HF model config, if available.
|
|
971
|
+
quant_cfg = self._parse_quant_hf_config(self.hf_config)
|
|
972
|
+
if quant_cfg is None and (
|
|
973
|
+
text_config := getattr(self.hf_config, "text_config", None)
|
|
974
|
+
):
|
|
975
|
+
# Check the text config as well for multi-modal models.
|
|
976
|
+
quant_cfg = self._parse_quant_hf_config(text_config)
|
|
977
|
+
|
|
978
|
+
if quant_cfg is not None:
|
|
979
|
+
# Use the community standard 'quant_method'
|
|
980
|
+
quant_method = quant_cfg.get("quant_method", "").lower()
|
|
981
|
+
|
|
982
|
+
# Normalize library names
|
|
983
|
+
quant_method = quant_method.replace(
|
|
984
|
+
"compressed_tensors", "compressed-tensors"
|
|
985
|
+
)
|
|
986
|
+
|
|
987
|
+
quant_cfg["quant_method"] = quant_method
|
|
988
|
+
|
|
989
|
+
# Quantization methods which are overrides (i.e. they have a
|
|
990
|
+
# `override_quantization_method` method) must be checked in order
|
|
991
|
+
# of preference (this is particularly important for GPTQ).
|
|
992
|
+
overrides = [
|
|
993
|
+
"bitblas",
|
|
994
|
+
"gptq_marlin_24",
|
|
995
|
+
"gptq_marlin",
|
|
996
|
+
"gptq_bitblas",
|
|
997
|
+
"awq_marlin",
|
|
998
|
+
"ipex",
|
|
999
|
+
"moe_wna16",
|
|
1000
|
+
"modelopt",
|
|
1001
|
+
"modelopt_fp4",
|
|
1002
|
+
"petit_nvfp4",
|
|
1003
|
+
# Ensure heavy backends are probed last to avoid unnecessary
|
|
1004
|
+
# imports during override detection (e.g., MXFP4 imports Triton)
|
|
1005
|
+
"mxfp4",
|
|
1006
|
+
"cpu_gptq",
|
|
1007
|
+
"cpu_awq",
|
|
1008
|
+
]
|
|
1009
|
+
quantization_methods = [
|
|
1010
|
+
q for q in supported_quantization if q not in overrides
|
|
1011
|
+
]
|
|
1012
|
+
# Any custom overrides will be in quantization_methods so we place
|
|
1013
|
+
# them at the start of the list so custom overrides have preference
|
|
1014
|
+
# over the built-in ones.
|
|
1015
|
+
quantization_methods = quantization_methods + overrides
|
|
1016
|
+
|
|
1017
|
+
# Detect which checkpoint is it
|
|
1018
|
+
for name in quantization_methods:
|
|
1019
|
+
method = me_quant.get_quantization_config(name)
|
|
1020
|
+
quantization_override = method.override_quantization_method(
|
|
1021
|
+
quant_cfg, self.quantization
|
|
1022
|
+
)
|
|
1023
|
+
if quantization_override is not None:
|
|
1024
|
+
# Raise error if the override is not custom (custom would
|
|
1025
|
+
# be in QUANTIZATION_METHODS but not QuantizationMethods)
|
|
1026
|
+
# and hasn't been added to the overrides list.
|
|
1027
|
+
if (
|
|
1028
|
+
name in get_args(me_quant.QuantizationMethods)
|
|
1029
|
+
and name not in overrides
|
|
1030
|
+
):
|
|
1031
|
+
raise ValueError(
|
|
1032
|
+
f"Quantization method {name} is an override but "
|
|
1033
|
+
"is has not been added to the `overrides` list "
|
|
1034
|
+
"above. This is necessary to ensure that the "
|
|
1035
|
+
"overrides are checked in order of preference."
|
|
1036
|
+
)
|
|
1037
|
+
quant_method = quantization_override
|
|
1038
|
+
self.quantization = quantization_override
|
|
1039
|
+
break
|
|
1040
|
+
|
|
1041
|
+
quant_method = quant_method if quant_method != "" else None
|
|
1042
|
+
# Verify quantization configurations.
|
|
1043
|
+
if self.quantization is None:
|
|
1044
|
+
self.quantization = quant_method
|
|
1045
|
+
elif self.quantization != quant_method:
|
|
1046
|
+
raise ValueError(
|
|
1047
|
+
"Quantization method specified in the model config "
|
|
1048
|
+
f"({quant_method}) does not match the quantization "
|
|
1049
|
+
f"method specified in the `quantization` argument "
|
|
1050
|
+
f"({self.quantization})."
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
if self.quantization is not None:
|
|
1054
|
+
if self.quantization not in supported_quantization:
|
|
1055
|
+
raise ValueError(
|
|
1056
|
+
f"Unknown quantization method: {self.quantization}. Must "
|
|
1057
|
+
f"be one of {supported_quantization}."
|
|
1058
|
+
)
|
|
1059
|
+
from vllm.platforms import current_platform
|
|
1060
|
+
|
|
1061
|
+
current_platform.verify_quantization(self.quantization)
|
|
1062
|
+
|
|
1063
|
+
def _verify_cuda_graph(self) -> None:
|
|
1064
|
+
# CUDAGraph capture not supported for encoder-decoder models on ROCm
|
|
1065
|
+
unsupported_rocm = self.is_encoder_decoder
|
|
1066
|
+
if unsupported_rocm and not self.enforce_eager and current_platform.is_rocm():
|
|
1067
|
+
logger.warning(
|
|
1068
|
+
"CUDA graph is not supported for %s on ROCm yet, fallback "
|
|
1069
|
+
"to eager mode.",
|
|
1070
|
+
self.hf_config.model_type,
|
|
1071
|
+
)
|
|
1072
|
+
self.enforce_eager = True
|
|
1073
|
+
|
|
1074
|
+
def _verify_bnb_config(self) -> None:
|
|
1075
|
+
"""
|
|
1076
|
+
The current version of bitsandbytes (0.46.1) with 8-bit models does not
|
|
1077
|
+
yet support CUDA graph.
|
|
1078
|
+
# TODO Remove this when bitsandbytes supports.
|
|
1079
|
+
"""
|
|
1080
|
+
is_bitsandbytes = self.quantization == "bitsandbytes"
|
|
1081
|
+
has_quantization_config = (
|
|
1082
|
+
getattr(self.hf_config, "quantization_config", None) is not None
|
|
1083
|
+
)
|
|
1084
|
+
is_8bit = (
|
|
1085
|
+
self.hf_config.quantization_config.get("load_in_8bit", False)
|
|
1086
|
+
if has_quantization_config
|
|
1087
|
+
else False
|
|
1088
|
+
)
|
|
1089
|
+
if all(
|
|
1090
|
+
[
|
|
1091
|
+
is_bitsandbytes,
|
|
1092
|
+
has_quantization_config,
|
|
1093
|
+
is_8bit,
|
|
1094
|
+
not self.enforce_eager,
|
|
1095
|
+
]
|
|
1096
|
+
):
|
|
1097
|
+
logger.warning(
|
|
1098
|
+
"CUDA graph is not supported on BitsAndBytes 8bit yet, "
|
|
1099
|
+
"fallback to the eager mode."
|
|
1100
|
+
)
|
|
1101
|
+
|
|
1102
|
+
self.enforce_eager = True
|
|
1103
|
+
|
|
1104
|
+
def _verify_with_expert_parallelism(self) -> None:
|
|
1105
|
+
num_experts = self.get_num_experts()
|
|
1106
|
+
if num_experts < 1:
|
|
1107
|
+
raise ValueError(
|
|
1108
|
+
"Number of experts in the model must be greater than 0 "
|
|
1109
|
+
"when expert parallelism is enabled."
|
|
1110
|
+
)
|
|
1111
|
+
|
|
1112
|
+
def verify_dual_chunk_attention_config(
|
|
1113
|
+
self,
|
|
1114
|
+
load_config: LoadConfig,
|
|
1115
|
+
) -> None:
|
|
1116
|
+
if hasattr(self.hf_config, "dual_chunk_attention_config"):
|
|
1117
|
+
# Try loading the sparse attention config
|
|
1118
|
+
from vllm.model_executor.model_loader.weight_utils import (
|
|
1119
|
+
get_sparse_attention_config,
|
|
1120
|
+
)
|
|
1121
|
+
|
|
1122
|
+
sparse_attn_config = get_sparse_attention_config(self, load_config)
|
|
1123
|
+
if sparse_attn_config:
|
|
1124
|
+
self.hf_config.dual_chunk_attention_config[
|
|
1125
|
+
"sparse_attention_config"
|
|
1126
|
+
] = sparse_attn_config
|
|
1127
|
+
if (
|
|
1128
|
+
"sparse_attention_enabled"
|
|
1129
|
+
not in self.hf_config.dual_chunk_attention_config
|
|
1130
|
+
):
|
|
1131
|
+
self.hf_config.dual_chunk_attention_config[
|
|
1132
|
+
"sparse_attention_enabled"
|
|
1133
|
+
] = True
|
|
1134
|
+
|
|
1135
|
+
def verify_with_parallel_config(
|
|
1136
|
+
self,
|
|
1137
|
+
parallel_config: ParallelConfig,
|
|
1138
|
+
) -> None:
|
|
1139
|
+
total_num_attention_heads = getattr(
|
|
1140
|
+
self.hf_text_config, "num_attention_heads", 0
|
|
1141
|
+
)
|
|
1142
|
+
tensor_parallel_size = parallel_config.tensor_parallel_size
|
|
1143
|
+
if total_num_attention_heads % tensor_parallel_size != 0:
|
|
1144
|
+
raise ValueError(
|
|
1145
|
+
f"Total number of attention heads ({total_num_attention_heads})"
|
|
1146
|
+
" must be divisible by tensor parallel size "
|
|
1147
|
+
f"({tensor_parallel_size})."
|
|
1148
|
+
)
|
|
1149
|
+
|
|
1150
|
+
if parallel_config.enable_expert_parallel:
|
|
1151
|
+
self._verify_with_expert_parallelism()
|
|
1152
|
+
|
|
1153
|
+
pipeline_parallel_size = parallel_config.pipeline_parallel_size
|
|
1154
|
+
if pipeline_parallel_size > 1 and not self.registry.is_pp_supported_model(
|
|
1155
|
+
self.architectures, self
|
|
1156
|
+
):
|
|
1157
|
+
raise NotImplementedError(
|
|
1158
|
+
"Pipeline parallelism is not supported for this model. "
|
|
1159
|
+
"Supported models implement the `SupportsPP` interface."
|
|
1160
|
+
)
|
|
1161
|
+
|
|
1162
|
+
decode_context_parallel_size = parallel_config.decode_context_parallel_size
|
|
1163
|
+
if decode_context_parallel_size > 1 and not self.use_mla:
|
|
1164
|
+
total_num_kv_heads = self.get_total_num_kv_heads()
|
|
1165
|
+
assert tensor_parallel_size > total_num_kv_heads, (
|
|
1166
|
+
f"tensor parallel size {tensor_parallel_size} must be greater "
|
|
1167
|
+
f"than total num kv heads {total_num_kv_heads} when enable "
|
|
1168
|
+
f"decode context parallel for GQA/MQA"
|
|
1169
|
+
)
|
|
1170
|
+
|
|
1171
|
+
max_dcp_size = tensor_parallel_size // total_num_kv_heads
|
|
1172
|
+
assert decode_context_parallel_size <= max_dcp_size, (
|
|
1173
|
+
f"decode context parallel size must less than or equal to "
|
|
1174
|
+
f"(tensor parallel size {tensor_parallel_size} // total "
|
|
1175
|
+
f"num kv heads {total_num_kv_heads}) = {max_dcp_size}, "
|
|
1176
|
+
f"but got {decode_context_parallel_size}"
|
|
1177
|
+
)
|
|
1178
|
+
|
|
1179
|
+
num_q_per_kv = total_num_attention_heads // total_num_kv_heads
|
|
1180
|
+
assert num_q_per_kv % decode_context_parallel_size == 0, (
|
|
1181
|
+
f"Total number of q per kv attn heads ({num_q_per_kv})"
|
|
1182
|
+
" must be divisible by dcp world size when enable "
|
|
1183
|
+
"decode context parallel for GQA "
|
|
1184
|
+
f"({parallel_config.decode_context_parallel_size})."
|
|
1185
|
+
)
|
|
1186
|
+
|
|
1187
|
+
def get_sliding_window(self) -> int | None:
|
|
1188
|
+
"""Get the sliding window size from the HF text config if present."""
|
|
1189
|
+
return getattr(self.hf_text_config, "sliding_window", None)
|
|
1190
|
+
|
|
1191
|
+
def get_vocab_size(self) -> int:
|
|
1192
|
+
return getattr(self.hf_text_config, "vocab_size", 0)
|
|
1193
|
+
|
|
1194
|
+
def get_hidden_size(self) -> int:
|
|
1195
|
+
return getattr(self.hf_text_config, "hidden_size", 0)
|
|
1196
|
+
|
|
1197
|
+
def get_inputs_embeds_size(self) -> int:
|
|
1198
|
+
# The size of inputs_embeds is usually identical to the size
|
|
1199
|
+
# of the hidden states, however there are exceptions, such as
|
|
1200
|
+
# embedding models like CLIP and SigLIP
|
|
1201
|
+
for target_attr in ("projection_dim", "projection_size"):
|
|
1202
|
+
if hasattr(self.hf_text_config, target_attr):
|
|
1203
|
+
return getattr(self.hf_text_config, target_attr)
|
|
1204
|
+
|
|
1205
|
+
return self.get_hidden_size()
|
|
1206
|
+
|
|
1207
|
+
@property
|
|
1208
|
+
def is_deepseek_mla(self) -> bool:
|
|
1209
|
+
if not hasattr(self.hf_text_config, "model_type"):
|
|
1210
|
+
return False
|
|
1211
|
+
elif self.hf_text_config.model_type in (
|
|
1212
|
+
"deepseek_v2",
|
|
1213
|
+
"deepseek_v3",
|
|
1214
|
+
"deepseek_v32",
|
|
1215
|
+
"deepseek_mtp",
|
|
1216
|
+
"kimi_k2",
|
|
1217
|
+
"kimi_linear",
|
|
1218
|
+
"longcat_flash",
|
|
1219
|
+
"pangu_ultra_moe",
|
|
1220
|
+
"pangu_ultra_moe_mtp",
|
|
1221
|
+
):
|
|
1222
|
+
return self.hf_text_config.kv_lora_rank is not None
|
|
1223
|
+
elif self.hf_text_config.model_type == "eagle":
|
|
1224
|
+
# if the model is an EAGLE module, check for the
|
|
1225
|
+
# underlying architecture
|
|
1226
|
+
return (
|
|
1227
|
+
self.hf_text_config.model.model_type
|
|
1228
|
+
in ("deepseek_v2", "deepseek_v3", "deepseek_v32")
|
|
1229
|
+
and self.hf_text_config.kv_lora_rank is not None
|
|
1230
|
+
)
|
|
1231
|
+
return False
|
|
1232
|
+
|
|
1233
|
+
def get_head_size(self) -> int:
|
|
1234
|
+
# TODO remove hard code
|
|
1235
|
+
if self.is_deepseek_mla:
|
|
1236
|
+
qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim", 0)
|
|
1237
|
+
if self.use_mla:
|
|
1238
|
+
return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
|
|
1239
|
+
else:
|
|
1240
|
+
qk_nope_head_dim = getattr(self.hf_text_config, "qk_nope_head_dim", 0)
|
|
1241
|
+
if qk_rope_head_dim and qk_nope_head_dim:
|
|
1242
|
+
return qk_rope_head_dim + qk_nope_head_dim
|
|
1243
|
+
|
|
1244
|
+
if hasattr(self.hf_text_config, "model_type") and (
|
|
1245
|
+
self.hf_text_config.model_type == "zamba2"
|
|
1246
|
+
):
|
|
1247
|
+
return self.hf_text_config.attention_head_dim
|
|
1248
|
+
|
|
1249
|
+
if self.is_attention_free:
|
|
1250
|
+
return 0
|
|
1251
|
+
|
|
1252
|
+
# NOTE: Some configs may set head_dim=None in the config
|
|
1253
|
+
if getattr(self.hf_text_config, "head_dim", None) is not None:
|
|
1254
|
+
return self.hf_text_config.head_dim
|
|
1255
|
+
|
|
1256
|
+
# NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
|
|
1257
|
+
if getattr(self.hf_text_config, "hidden_size_per_head", None) is not None:
|
|
1258
|
+
return self.hf_text_config.hidden_size_per_head
|
|
1259
|
+
|
|
1260
|
+
# FIXME(woosuk): This may not be true for all models.
|
|
1261
|
+
return (
|
|
1262
|
+
self.hf_text_config.hidden_size // self.hf_text_config.num_attention_heads
|
|
1263
|
+
)
|
|
1264
|
+
|
|
1265
|
+
def get_total_num_kv_heads(self) -> int:
|
|
1266
|
+
"""Returns the total number of KV heads."""
|
|
1267
|
+
# For GPTBigCode & Falcon:
|
|
1268
|
+
# NOTE: for falcon, when new_decoder_architecture is True, the
|
|
1269
|
+
# multi_query flag is ignored and we use n_head_kv for the number of
|
|
1270
|
+
# KV heads.
|
|
1271
|
+
falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
|
|
1272
|
+
new_decoder_arch_falcon = (
|
|
1273
|
+
self.hf_config.model_type in falcon_model_types
|
|
1274
|
+
and getattr(self.hf_config, "new_decoder_architecture", False)
|
|
1275
|
+
)
|
|
1276
|
+
if not new_decoder_arch_falcon and getattr(
|
|
1277
|
+
self.hf_text_config, "multi_query", False
|
|
1278
|
+
):
|
|
1279
|
+
# Multi-query attention, only one KV head.
|
|
1280
|
+
# Currently, tensor parallelism is not supported in this case.
|
|
1281
|
+
return 1
|
|
1282
|
+
|
|
1283
|
+
# For DBRX and MPT
|
|
1284
|
+
if self.hf_config.model_type == "mpt":
|
|
1285
|
+
if "kv_n_heads" in self.hf_config.attn_config:
|
|
1286
|
+
return self.hf_config.attn_config["kv_n_heads"]
|
|
1287
|
+
return self.hf_config.num_attention_heads
|
|
1288
|
+
if self.hf_config.model_type == "dbrx":
|
|
1289
|
+
return getattr(
|
|
1290
|
+
self.hf_config.attn_config,
|
|
1291
|
+
"kv_n_heads",
|
|
1292
|
+
self.hf_config.num_attention_heads,
|
|
1293
|
+
)
|
|
1294
|
+
|
|
1295
|
+
if self.hf_config.model_type == "nemotron-nas":
|
|
1296
|
+
for block in self.hf_config.block_configs:
|
|
1297
|
+
if not block.attention.no_op:
|
|
1298
|
+
return (
|
|
1299
|
+
self.hf_config.num_attention_heads
|
|
1300
|
+
// block.attention.n_heads_in_group
|
|
1301
|
+
)
|
|
1302
|
+
|
|
1303
|
+
raise RuntimeError("Couldn't determine number of kv heads")
|
|
1304
|
+
|
|
1305
|
+
if self.is_attention_free:
|
|
1306
|
+
return 0
|
|
1307
|
+
|
|
1308
|
+
attributes = [
|
|
1309
|
+
# For Falcon:
|
|
1310
|
+
"n_head_kv",
|
|
1311
|
+
"num_kv_heads",
|
|
1312
|
+
# For LLaMA-2:
|
|
1313
|
+
"num_key_value_heads",
|
|
1314
|
+
# For ChatGLM:
|
|
1315
|
+
"multi_query_group_num",
|
|
1316
|
+
]
|
|
1317
|
+
for attr in attributes:
|
|
1318
|
+
num_kv_heads = getattr(self.hf_text_config, attr, None)
|
|
1319
|
+
if num_kv_heads is not None:
|
|
1320
|
+
return num_kv_heads
|
|
1321
|
+
|
|
1322
|
+
# For non-grouped-query attention models, the number of KV heads is
|
|
1323
|
+
# equal to the number of attention heads.
|
|
1324
|
+
return self.hf_text_config.num_attention_heads
|
|
1325
|
+
|
|
1326
|
+
def get_num_kv_heads(self, parallel_config: ParallelConfig) -> int:
|
|
1327
|
+
"""Returns the number of KV heads per GPU."""
|
|
1328
|
+
if self.use_mla:
|
|
1329
|
+
# When using MLA during decode it becomes MQA
|
|
1330
|
+
return 1
|
|
1331
|
+
|
|
1332
|
+
total_num_kv_heads = self.get_total_num_kv_heads()
|
|
1333
|
+
# If tensor parallelism is used, we divide the number of KV heads by
|
|
1334
|
+
# the tensor parallel size. We will replicate the KV heads in the
|
|
1335
|
+
# case where the number of KV heads is smaller than the tensor
|
|
1336
|
+
# parallel size so each GPU has at least one KV head.
|
|
1337
|
+
return max(1, total_num_kv_heads // parallel_config.tensor_parallel_size)
|
|
1338
|
+
|
|
1339
|
+
def get_num_attention_heads(self, parallel_config: ParallelConfig) -> int:
|
|
1340
|
+
num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
|
|
1341
|
+
return num_heads // parallel_config.tensor_parallel_size
|
|
1342
|
+
|
|
1343
|
+
def get_num_experts(self) -> int:
|
|
1344
|
+
"""Returns the number of experts in the model."""
|
|
1345
|
+
num_expert_names = [
|
|
1346
|
+
"num_experts", # Jamba
|
|
1347
|
+
"moe_num_experts", # Dbrx
|
|
1348
|
+
"n_routed_experts", # DeepSeek
|
|
1349
|
+
"num_local_experts", # Mixtral
|
|
1350
|
+
]
|
|
1351
|
+
num_experts = getattr_iter(self.hf_text_config, num_expert_names, 0)
|
|
1352
|
+
if isinstance(num_experts, list):
|
|
1353
|
+
# Ernie VL's remote code uses list[int]...
|
|
1354
|
+
# The values are always the same so we just take the first one.
|
|
1355
|
+
return num_experts[0]
|
|
1356
|
+
# Coerce to 0 if explicitly set to None
|
|
1357
|
+
return num_experts or 0
|
|
1358
|
+
|
|
1359
|
+
def get_total_num_hidden_layers(self) -> int:
|
|
1360
|
+
if (
|
|
1361
|
+
self.hf_text_config.model_type == "deepseek_mtp"
|
|
1362
|
+
or self.hf_config.model_type == "mimo_mtp"
|
|
1363
|
+
or self.hf_config.model_type == "glm4_moe_mtp"
|
|
1364
|
+
or self.hf_config.model_type == "ernie_mtp"
|
|
1365
|
+
or self.hf_config.model_type == "qwen3_next_mtp"
|
|
1366
|
+
or self.hf_config.model_type == "pangu_ultra_moe_mtp"
|
|
1367
|
+
):
|
|
1368
|
+
total_num_hidden_layers = getattr(
|
|
1369
|
+
self.hf_text_config, "num_nextn_predict_layers", 0
|
|
1370
|
+
)
|
|
1371
|
+
elif self.hf_config.model_type == "longcat_flash_mtp":
|
|
1372
|
+
total_num_hidden_layers = getattr(
|
|
1373
|
+
self.hf_text_config, "num_nextn_predict_layers", 1
|
|
1374
|
+
)
|
|
1375
|
+
else:
|
|
1376
|
+
total_num_hidden_layers = getattr(
|
|
1377
|
+
self.hf_text_config, "num_hidden_layers", 0
|
|
1378
|
+
)
|
|
1379
|
+
return total_num_hidden_layers
|
|
1380
|
+
|
|
1381
|
+
def get_layers_start_end_indices(
|
|
1382
|
+
self, parallel_config: ParallelConfig
|
|
1383
|
+
) -> tuple[int, int]:
|
|
1384
|
+
from vllm.distributed.utils import get_pp_indices
|
|
1385
|
+
|
|
1386
|
+
total_num_hidden_layers = self.get_total_num_hidden_layers()
|
|
1387
|
+
|
|
1388
|
+
# the layout order is: DP x PP x TP
|
|
1389
|
+
pp_rank = (
|
|
1390
|
+
parallel_config.rank // parallel_config.tensor_parallel_size
|
|
1391
|
+
) % parallel_config.pipeline_parallel_size
|
|
1392
|
+
pp_size = parallel_config.pipeline_parallel_size
|
|
1393
|
+
start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
|
|
1394
|
+
return start, end
|
|
1395
|
+
|
|
1396
|
+
def get_num_layers(self, parallel_config: ParallelConfig) -> int:
|
|
1397
|
+
start, end = self.get_layers_start_end_indices(parallel_config)
|
|
1398
|
+
return end - start
|
|
1399
|
+
|
|
1400
|
+
def get_num_layers_by_block_type(
|
|
1401
|
+
self,
|
|
1402
|
+
parallel_config: ParallelConfig,
|
|
1403
|
+
block_type: LayerBlockType = "attention",
|
|
1404
|
+
) -> int:
|
|
1405
|
+
# This function relies on 'layers_block_type' in hf_config,
|
|
1406
|
+
# for w/o this attribute, we will need to have workarounds like so
|
|
1407
|
+
attn_block_type = block_type == "attention"
|
|
1408
|
+
is_transformer = (
|
|
1409
|
+
not self.is_hybrid and not self.has_noops and not self.is_attention_free
|
|
1410
|
+
)
|
|
1411
|
+
start, end = self.get_layers_start_end_indices(parallel_config)
|
|
1412
|
+
|
|
1413
|
+
if is_transformer:
|
|
1414
|
+
# Handle the basic case first
|
|
1415
|
+
return end - start if attn_block_type else 0
|
|
1416
|
+
elif self.is_attention_free:
|
|
1417
|
+
# Attention free
|
|
1418
|
+
# Note that this code assumes there
|
|
1419
|
+
# is only one type of attention-free block type.
|
|
1420
|
+
return 0 if attn_block_type else end - start
|
|
1421
|
+
elif self.has_noops:
|
|
1422
|
+
block_configs = self.hf_config.block_configs
|
|
1423
|
+
return sum(not bc.attention.no_op for bc in block_configs[start:end])
|
|
1424
|
+
else:
|
|
1425
|
+
# Hybrid model Jamba
|
|
1426
|
+
layers_block_type_value = getattr(
|
|
1427
|
+
self.hf_text_config, "layers_block_type", None
|
|
1428
|
+
)
|
|
1429
|
+
if layers_block_type_value is not None:
|
|
1430
|
+
if hasattr(self.hf_text_config, "model_type") and (
|
|
1431
|
+
self.hf_text_config.model_type == "zamba2"
|
|
1432
|
+
):
|
|
1433
|
+
if attn_block_type:
|
|
1434
|
+
return sum(
|
|
1435
|
+
t == "hybrid" for t in layers_block_type_value[start:end]
|
|
1436
|
+
)
|
|
1437
|
+
else:
|
|
1438
|
+
return self.get_num_layers(parallel_config)
|
|
1439
|
+
return sum(t == block_type for t in layers_block_type_value[start:end])
|
|
1440
|
+
|
|
1441
|
+
# Hybrid model Minimax
|
|
1442
|
+
attn_type_list = getattr(self.hf_config, "attn_type_list", None)
|
|
1443
|
+
if attn_type_list:
|
|
1444
|
+
return sum(t == 1 for t in attn_type_list[start:end])
|
|
1445
|
+
|
|
1446
|
+
# Hybrid model Qwen3Next
|
|
1447
|
+
layer_types_value = getattr(self.hf_config, "layer_types", None)
|
|
1448
|
+
if layer_types_value is not None:
|
|
1449
|
+
if block_type == "attention":
|
|
1450
|
+
return sum(
|
|
1451
|
+
t == "full_attention" for t in layer_types_value[start:end]
|
|
1452
|
+
)
|
|
1453
|
+
elif block_type == "linear_attention":
|
|
1454
|
+
return sum(
|
|
1455
|
+
t == "linear_attention" for t in layer_types_value[start:end]
|
|
1456
|
+
)
|
|
1457
|
+
else:
|
|
1458
|
+
return sum(t == block_type for t in layer_types_value[start:end])
|
|
1459
|
+
|
|
1460
|
+
if (
|
|
1461
|
+
layers_block_type_value is None
|
|
1462
|
+
and attn_type_list is None
|
|
1463
|
+
and layer_types_value is None
|
|
1464
|
+
):
|
|
1465
|
+
raise ValueError(
|
|
1466
|
+
"The model is an hybrid without a layers_block_type or an "
|
|
1467
|
+
"attn_type_list, or a layer_types in the hf_config, "
|
|
1468
|
+
f"cannot determine the num of {block_type} layers"
|
|
1469
|
+
)
|
|
1470
|
+
|
|
1471
|
+
def get_mamba_chunk_size(self) -> int | None:
|
|
1472
|
+
"""
|
|
1473
|
+
Returns the mamba chunk size if it exists
|
|
1474
|
+
"""
|
|
1475
|
+
# used by e.g. Bamba, FalconH1, Granite, PLaMo2
|
|
1476
|
+
chunk_size = getattr(self.hf_text_config, "mamba_chunk_size", None)
|
|
1477
|
+
if chunk_size is None:
|
|
1478
|
+
# used by e.g. Mamba2, NemotronH, Zamba
|
|
1479
|
+
chunk_size = getattr(self.hf_text_config, "chunk_size", None)
|
|
1480
|
+
|
|
1481
|
+
# Since Mamba1 does not have a chunk notion
|
|
1482
|
+
# we use a default chunk size of 1024.
|
|
1483
|
+
if chunk_size is None:
|
|
1484
|
+
chunk_size = 2048
|
|
1485
|
+
|
|
1486
|
+
return chunk_size
|
|
1487
|
+
|
|
1488
|
+
def get_multimodal_config(self) -> MultiModalConfig:
|
|
1489
|
+
"""
|
|
1490
|
+
Get the multimodal configuration of the model.
|
|
1491
|
+
|
|
1492
|
+
Raises:
|
|
1493
|
+
ValueError: If the model is not multimodal.
|
|
1494
|
+
"""
|
|
1495
|
+
if self.multimodal_config is None:
|
|
1496
|
+
raise ValueError("The model is not multimodal.")
|
|
1497
|
+
|
|
1498
|
+
return self.multimodal_config
|
|
1499
|
+
|
|
1500
|
+
def try_get_generation_config(self) -> dict[str, Any]:
|
|
1501
|
+
"""
|
|
1502
|
+
This method attempts to retrieve the non-default values of the
|
|
1503
|
+
generation config for this model.
|
|
1504
|
+
|
|
1505
|
+
The generation config can contain information about special tokens, as
|
|
1506
|
+
well as sampling parameters. Which is why this method exists separately
|
|
1507
|
+
to `get_diff_sampling_param`.
|
|
1508
|
+
|
|
1509
|
+
Returns:
|
|
1510
|
+
A dictionary containing the non-default generation config.
|
|
1511
|
+
"""
|
|
1512
|
+
if self.generation_config in {"auto", "vllm"}:
|
|
1513
|
+
config = try_get_generation_config(
|
|
1514
|
+
self.hf_config_path or self.model,
|
|
1515
|
+
trust_remote_code=self.trust_remote_code,
|
|
1516
|
+
revision=self.revision,
|
|
1517
|
+
config_format=self.config_format,
|
|
1518
|
+
)
|
|
1519
|
+
else:
|
|
1520
|
+
config = try_get_generation_config(
|
|
1521
|
+
self.generation_config,
|
|
1522
|
+
trust_remote_code=self.trust_remote_code,
|
|
1523
|
+
config_format=self.config_format,
|
|
1524
|
+
)
|
|
1525
|
+
|
|
1526
|
+
if config is None:
|
|
1527
|
+
return {}
|
|
1528
|
+
|
|
1529
|
+
return config.to_diff_dict()
|
|
1530
|
+
|
|
1531
|
+
def get_diff_sampling_param(self) -> dict[str, Any]:
|
|
1532
|
+
"""
|
|
1533
|
+
This method returns a dictionary containing the non-default sampling
|
|
1534
|
+
parameters with `override_generation_config` applied.
|
|
1535
|
+
|
|
1536
|
+
The default sampling parameters are:
|
|
1537
|
+
|
|
1538
|
+
- vLLM's neutral defaults if `self.generation_config="vllm"`
|
|
1539
|
+
- the model's defaults if `self.generation_config="auto"`
|
|
1540
|
+
- as defined in `generation_config.json` if
|
|
1541
|
+
`self.generation_config="path/to/generation_config/dir"`
|
|
1542
|
+
|
|
1543
|
+
Returns:
|
|
1544
|
+
A dictionary containing the non-default sampling parameters.
|
|
1545
|
+
"""
|
|
1546
|
+
if self.generation_config == "vllm":
|
|
1547
|
+
config = {}
|
|
1548
|
+
else:
|
|
1549
|
+
config = self.try_get_generation_config()
|
|
1550
|
+
|
|
1551
|
+
# Overriding with given generation config
|
|
1552
|
+
config.update(self.override_generation_config)
|
|
1553
|
+
|
|
1554
|
+
available_params = [
|
|
1555
|
+
"repetition_penalty",
|
|
1556
|
+
"temperature",
|
|
1557
|
+
"top_k",
|
|
1558
|
+
"top_p",
|
|
1559
|
+
"min_p",
|
|
1560
|
+
"max_new_tokens",
|
|
1561
|
+
]
|
|
1562
|
+
if any(p in config for p in available_params):
|
|
1563
|
+
diff_sampling_param = {
|
|
1564
|
+
p: config.get(p) for p in available_params if config.get(p) is not None
|
|
1565
|
+
}
|
|
1566
|
+
# Huggingface definition of max_new_tokens is equivalent
|
|
1567
|
+
# to vLLM's max_tokens
|
|
1568
|
+
if "max_new_tokens" in diff_sampling_param:
|
|
1569
|
+
diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
|
|
1570
|
+
"max_new_tokens"
|
|
1571
|
+
)
|
|
1572
|
+
else:
|
|
1573
|
+
diff_sampling_param = {}
|
|
1574
|
+
|
|
1575
|
+
if diff_sampling_param:
|
|
1576
|
+
logger.warning_once(
|
|
1577
|
+
"Default sampling parameters have been overridden by the "
|
|
1578
|
+
"model's Hugging Face generation config recommended from the "
|
|
1579
|
+
"model creator. If this is not intended, please relaunch "
|
|
1580
|
+
"vLLM instance with `--generation-config vllm`."
|
|
1581
|
+
)
|
|
1582
|
+
return diff_sampling_param
|
|
1583
|
+
|
|
1584
|
+
@property
|
|
1585
|
+
def is_encoder_decoder(self) -> bool:
|
|
1586
|
+
"""Extract the HF encoder/decoder model flag."""
|
|
1587
|
+
return is_encoder_decoder(self.hf_config)
|
|
1588
|
+
|
|
1589
|
+
@property
|
|
1590
|
+
def uses_alibi(self) -> bool:
|
|
1591
|
+
cfg = self.hf_text_config
|
|
1592
|
+
|
|
1593
|
+
return (
|
|
1594
|
+
getattr(cfg, "alibi", False) # Falcon
|
|
1595
|
+
or "BloomForCausalLM" in self.architectures # Bloom
|
|
1596
|
+
or getattr(cfg, "position_encoding_type", "") == "alibi" # codellm_1b_alibi
|
|
1597
|
+
or (
|
|
1598
|
+
hasattr(cfg, "attn_config") # MPT
|
|
1599
|
+
and (
|
|
1600
|
+
(
|
|
1601
|
+
isinstance(cfg.attn_config, dict)
|
|
1602
|
+
and cfg.attn_config.get("alibi", False)
|
|
1603
|
+
)
|
|
1604
|
+
or (
|
|
1605
|
+
not isinstance(cfg.attn_config, dict)
|
|
1606
|
+
and getattr(cfg.attn_config, "alibi", False)
|
|
1607
|
+
)
|
|
1608
|
+
)
|
|
1609
|
+
)
|
|
1610
|
+
)
|
|
1611
|
+
|
|
1612
|
+
@property
|
|
1613
|
+
def uses_mrope(self) -> bool:
|
|
1614
|
+
return uses_mrope(self.hf_config)
|
|
1615
|
+
|
|
1616
|
+
@property
|
|
1617
|
+
def uses_xdrope_dim(self) -> int:
|
|
1618
|
+
return uses_xdrope_dim(self.hf_config)
|
|
1619
|
+
|
|
1620
|
+
@property
|
|
1621
|
+
def is_multimodal_model(self) -> bool:
|
|
1622
|
+
return self.multimodal_config is not None
|
|
1623
|
+
|
|
1624
|
+
@property
|
|
1625
|
+
def is_multimodal_raw_input_only_model(self) -> bool:
|
|
1626
|
+
return self._model_info.supports_multimodal_raw_input_only
|
|
1627
|
+
|
|
1628
|
+
@property
|
|
1629
|
+
def is_cross_encoder(self) -> bool:
|
|
1630
|
+
return (
|
|
1631
|
+
self._model_info.supports_cross_encoding or self.convert_type == "classify"
|
|
1632
|
+
)
|
|
1633
|
+
|
|
1634
|
+
@property
|
|
1635
|
+
def is_pp_supported(self) -> bool:
|
|
1636
|
+
return self._model_info.supports_pp
|
|
1637
|
+
|
|
1638
|
+
@property
|
|
1639
|
+
def is_attention_free(self) -> bool:
|
|
1640
|
+
return self._model_info.is_attention_free
|
|
1641
|
+
|
|
1642
|
+
@property
|
|
1643
|
+
def is_hybrid(self) -> bool:
|
|
1644
|
+
# Handle granite-4.0-micro case which uses hybrid config but does not
|
|
1645
|
+
# actually contain any non-attention layers.
|
|
1646
|
+
layer_types = getattr(self.hf_config, "layer_types", None)
|
|
1647
|
+
if layer_types is not None and all(
|
|
1648
|
+
layer == "attention" for layer in layer_types
|
|
1649
|
+
):
|
|
1650
|
+
return False
|
|
1651
|
+
return self._model_info.is_hybrid
|
|
1652
|
+
|
|
1653
|
+
@property
|
|
1654
|
+
def has_noops(self) -> bool:
|
|
1655
|
+
return self._model_info.has_noops
|
|
1656
|
+
|
|
1657
|
+
@property
|
|
1658
|
+
def has_inner_state(self):
|
|
1659
|
+
return self._model_info.has_inner_state
|
|
1660
|
+
|
|
1661
|
+
@property
|
|
1662
|
+
def supports_mamba_prefix_caching(self) -> bool:
|
|
1663
|
+
return self._model_info.supports_mamba_prefix_caching
|
|
1664
|
+
|
|
1665
|
+
@property
|
|
1666
|
+
def use_mla(self) -> bool:
|
|
1667
|
+
return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE
|
|
1668
|
+
|
|
1669
|
+
@property
|
|
1670
|
+
def is_matryoshka(self) -> bool:
|
|
1671
|
+
return bool(getattr(self.hf_config, "matryoshka_dimensions", None)) or getattr(
|
|
1672
|
+
self.hf_config, "is_matryoshka", False
|
|
1673
|
+
)
|
|
1674
|
+
|
|
1675
|
+
@property
|
|
1676
|
+
def matryoshka_dimensions(self):
|
|
1677
|
+
return getattr(self.hf_config, "matryoshka_dimensions", None)
|
|
1678
|
+
|
|
1679
|
+
@property
|
|
1680
|
+
def use_pad_token(self) -> bool:
|
|
1681
|
+
# cross_encoder models defaults to using pad_token.
|
|
1682
|
+
# `llm as reranker` models defaults to not using pad_token.
|
|
1683
|
+
return getattr(self.hf_config, "use_pad_token", True)
|
|
1684
|
+
|
|
1685
|
+
@property
|
|
1686
|
+
def head_dtype(self) -> torch.dtype:
|
|
1687
|
+
"""
|
|
1688
|
+
"head" refers to the last Linear layer(s) of an LLM,
|
|
1689
|
+
such as the lm_head in a generation model,
|
|
1690
|
+
or the score or classifier in a classification model.
|
|
1691
|
+
|
|
1692
|
+
`head_dtype` currently only supports pooling models.\n
|
|
1693
|
+
- The pooling model defaults to using fp32 head,
|
|
1694
|
+
you can use --hf-overrides '{"head_dtype": "model"}' to disable it.
|
|
1695
|
+
"""
|
|
1696
|
+
|
|
1697
|
+
head_dtype = _get_head_dtype(
|
|
1698
|
+
config=self.hf_config, dtype=self.dtype, runner_type=self.runner_type
|
|
1699
|
+
)
|
|
1700
|
+
|
|
1701
|
+
if self.runner_type != "pooling" and head_dtype != self.dtype:
|
|
1702
|
+
logger.warning_once(
|
|
1703
|
+
"`head_dtype` currently only supports pooling models."
|
|
1704
|
+
"fallback to model dtype [%s].",
|
|
1705
|
+
self.dtype,
|
|
1706
|
+
)
|
|
1707
|
+
return self.dtype
|
|
1708
|
+
|
|
1709
|
+
if head_dtype not in current_platform.supported_dtypes:
|
|
1710
|
+
logger.warning_once(
|
|
1711
|
+
"The current platform does not support [%s] head dtype, "
|
|
1712
|
+
"fallback to model dtype [%s].",
|
|
1713
|
+
head_dtype,
|
|
1714
|
+
self.dtype,
|
|
1715
|
+
)
|
|
1716
|
+
return self.dtype
|
|
1717
|
+
|
|
1718
|
+
logger.debug_once("head dtype: %s", head_dtype)
|
|
1719
|
+
return head_dtype
|
|
1720
|
+
|
|
1721
|
+
@property
|
|
1722
|
+
def embedding_size(self):
|
|
1723
|
+
dense_modules = try_get_dense_modules(self.model, revision=self.revision)
|
|
1724
|
+
if dense_modules is not None:
|
|
1725
|
+
return dense_modules[-1]["out_features"]
|
|
1726
|
+
return self.get_hidden_size()
|
|
1727
|
+
|
|
1728
|
+
def get_and_verify_max_len(self, max_model_len: int):
|
|
1729
|
+
# Consider max_model_len in tokenizer_config only when
|
|
1730
|
+
# pooling models use absolute position_embedding.
|
|
1731
|
+
tokenizer_config = None
|
|
1732
|
+
if (
|
|
1733
|
+
self.runner_type == "pooling"
|
|
1734
|
+
and getattr(self.hf_config, "position_embedding_type", "") == "absolute"
|
|
1735
|
+
):
|
|
1736
|
+
tokenizer_config = try_get_tokenizer_config(
|
|
1737
|
+
self.tokenizer,
|
|
1738
|
+
trust_remote_code=self.trust_remote_code,
|
|
1739
|
+
revision=self.tokenizer_revision,
|
|
1740
|
+
)
|
|
1741
|
+
max_model_len = _get_and_verify_max_len(
|
|
1742
|
+
hf_config=self.hf_text_config,
|
|
1743
|
+
tokenizer_config=tokenizer_config,
|
|
1744
|
+
max_model_len=max_model_len,
|
|
1745
|
+
disable_sliding_window=self.disable_sliding_window,
|
|
1746
|
+
sliding_window=self.get_sliding_window(),
|
|
1747
|
+
spec_target_max_model_len=self.spec_target_max_model_len,
|
|
1748
|
+
encoder_config=self.encoder_config,
|
|
1749
|
+
)
|
|
1750
|
+
logger.info("Using max model len %s", max_model_len)
|
|
1751
|
+
return max_model_len
|
|
1752
|
+
|
|
1753
|
+
@property
|
|
1754
|
+
def attn_type(self) -> AttnTypeStr:
|
|
1755
|
+
if self.pooler_config is not None:
|
|
1756
|
+
pooling_type = self._model_info.default_pooling_type.lower()
|
|
1757
|
+
if pooling_type == "cls":
|
|
1758
|
+
return "encoder_only"
|
|
1759
|
+
else:
|
|
1760
|
+
is_causal = getattr(self.hf_config, "is_causal", True)
|
|
1761
|
+
return "encoder_only" if not is_causal else self._model_info.attn_type
|
|
1762
|
+
elif self.is_hybrid:
|
|
1763
|
+
return "hybrid"
|
|
1764
|
+
elif self.is_attention_free:
|
|
1765
|
+
return "attention_free"
|
|
1766
|
+
elif self.is_encoder_decoder:
|
|
1767
|
+
return "encoder_decoder"
|
|
1768
|
+
else:
|
|
1769
|
+
return "decoder"
|
|
1770
|
+
|
|
1771
|
+
@property
|
|
1772
|
+
def is_chunked_prefill_supported(self) -> bool:
|
|
1773
|
+
attn_type = self.attn_type
|
|
1774
|
+
if self.pooler_config is not None:
|
|
1775
|
+
# for pooling models
|
|
1776
|
+
if attn_type == "encoder_only":
|
|
1777
|
+
logger.debug(
|
|
1778
|
+
"Pooling models with bidirectional attn does not support "
|
|
1779
|
+
"chunked prefill."
|
|
1780
|
+
)
|
|
1781
|
+
return False
|
|
1782
|
+
elif attn_type == "decoder":
|
|
1783
|
+
pooling_type = self.pooler_config.pooling_type.lower()
|
|
1784
|
+
if pooling_type in ["all", "mean", "step", "cls"]:
|
|
1785
|
+
logger.debug(
|
|
1786
|
+
"Pooling models with %s pooling does not "
|
|
1787
|
+
"support chunked prefill.",
|
|
1788
|
+
pooling_type,
|
|
1789
|
+
)
|
|
1790
|
+
return False
|
|
1791
|
+
else:
|
|
1792
|
+
# pooling_type == "last"
|
|
1793
|
+
logger.debug(
|
|
1794
|
+
"Pooling models with causal attn and last pooling support "
|
|
1795
|
+
"chunked prefill."
|
|
1796
|
+
)
|
|
1797
|
+
return True
|
|
1798
|
+
# vllm currently does not have pooling models using hybrid,
|
|
1799
|
+
# attention_free or encoder_decoder attn types.
|
|
1800
|
+
return attn_type != "encoder_decoder"
|
|
1801
|
+
else:
|
|
1802
|
+
if attn_type == "encoder_decoder":
|
|
1803
|
+
logger.debug("Encoder decoder models does not support chunked prefill.")
|
|
1804
|
+
return False
|
|
1805
|
+
logger.debug("Generative models support chunked prefill.")
|
|
1806
|
+
return True
|
|
1807
|
+
|
|
1808
|
+
@property
|
|
1809
|
+
def is_prefix_caching_supported(self) -> bool:
|
|
1810
|
+
attn_type = self.attn_type
|
|
1811
|
+
if self.pooler_config is not None:
|
|
1812
|
+
# for pooling models
|
|
1813
|
+
if attn_type == "encoder_only":
|
|
1814
|
+
logger.debug(
|
|
1815
|
+
"Pooling models with bidirectional attn does not "
|
|
1816
|
+
"support prefix caching."
|
|
1817
|
+
)
|
|
1818
|
+
return False
|
|
1819
|
+
elif attn_type == "decoder":
|
|
1820
|
+
pooling_type = self.pooler_config.pooling_type.lower()
|
|
1821
|
+
if pooling_type in ["all", "mean", "step", "cls"]:
|
|
1822
|
+
logger.debug(
|
|
1823
|
+
"Pooling models with %s pooling does not "
|
|
1824
|
+
"support prefix caching.",
|
|
1825
|
+
pooling_type,
|
|
1826
|
+
)
|
|
1827
|
+
return False
|
|
1828
|
+
else:
|
|
1829
|
+
# pooling_type == "last"
|
|
1830
|
+
logger.debug(
|
|
1831
|
+
"Pooling models with causal attn and last pooling support "
|
|
1832
|
+
"prefix caching."
|
|
1833
|
+
)
|
|
1834
|
+
return True
|
|
1835
|
+
# vllm currently does not have pooling models using hybrid,
|
|
1836
|
+
# attention_free or encoder_decoder attn types.
|
|
1837
|
+
return False
|
|
1838
|
+
else:
|
|
1839
|
+
if attn_type == "hybrid":
|
|
1840
|
+
logger.debug(
|
|
1841
|
+
"Hybrid models does not support prefix caching since the feature "
|
|
1842
|
+
"is still experimental."
|
|
1843
|
+
)
|
|
1844
|
+
return False
|
|
1845
|
+
elif attn_type == "attention_free":
|
|
1846
|
+
logger.debug(
|
|
1847
|
+
"Attention free models does not support prefix caching since the "
|
|
1848
|
+
"feature is still experimental."
|
|
1849
|
+
)
|
|
1850
|
+
return False
|
|
1851
|
+
elif attn_type == "encoder_decoder":
|
|
1852
|
+
logger.debug("Encoder decoder models does not support prefix caching.")
|
|
1853
|
+
return False
|
|
1854
|
+
else: # attn_type == "decoder"
|
|
1855
|
+
logger.debug("Generative models support prefix caching.")
|
|
1856
|
+
return True
|
|
1857
|
+
|
|
1858
|
+
def is_model_moe(
|
|
1859
|
+
self,
|
|
1860
|
+
) -> bool:
|
|
1861
|
+
return self.get_num_experts() > 1
|
|
1862
|
+
|
|
1863
|
+
def is_quantized(self) -> bool:
|
|
1864
|
+
return getattr(self.hf_config, "quantization_config", None) is not None
|
|
1865
|
+
|
|
1866
|
+
|
|
1867
|
+
def get_served_model_name(model: str, served_model_name: str | list[str] | None):
|
|
1868
|
+
"""
|
|
1869
|
+
If the input is a non-empty list, the first model_name in
|
|
1870
|
+
`served_model_name` is taken.
|
|
1871
|
+
If the input is a non-empty string, it is used directly.
|
|
1872
|
+
For cases where the input is either an empty string or an
|
|
1873
|
+
empty list, the fallback is to use `self.model`.
|
|
1874
|
+
"""
|
|
1875
|
+
if not served_model_name:
|
|
1876
|
+
return model
|
|
1877
|
+
if isinstance(served_model_name, list):
|
|
1878
|
+
return served_model_name[0]
|
|
1879
|
+
return served_model_name
|
|
1880
|
+
|
|
1881
|
+
|
|
1882
|
+
# Some model suffixes are based on auto classes from Transformers:
|
|
1883
|
+
# https://huggingface.co/docs/transformers/en/model_doc/auto
|
|
1884
|
+
# NOTE: Items higher on this list priority over lower ones
|
|
1885
|
+
_SUFFIX_TO_DEFAULTS: list[tuple[str, tuple[RunnerType, ConvertType]]] = [
|
|
1886
|
+
("ForCausalLM", ("generate", "none")),
|
|
1887
|
+
("ForConditionalGeneration", ("generate", "none")),
|
|
1888
|
+
("ChatModel", ("generate", "none")),
|
|
1889
|
+
("LMHeadModel", ("generate", "none")),
|
|
1890
|
+
("ForTextEncoding", ("pooling", "embed")),
|
|
1891
|
+
("EmbeddingModel", ("pooling", "embed")),
|
|
1892
|
+
("ForSequenceClassification", ("pooling", "classify")),
|
|
1893
|
+
("ForAudioClassification", ("pooling", "classify")),
|
|
1894
|
+
("ForImageClassification", ("pooling", "classify")),
|
|
1895
|
+
("ForVideoClassification", ("pooling", "classify")),
|
|
1896
|
+
("ClassificationModel", ("pooling", "classify")),
|
|
1897
|
+
("ForRewardModeling", ("pooling", "reward")),
|
|
1898
|
+
("RewardModel", ("pooling", "reward")),
|
|
1899
|
+
# Let other `*Model`s take priority
|
|
1900
|
+
("Model", ("pooling", "embed")),
|
|
1901
|
+
]
|
|
1902
|
+
|
|
1903
|
+
|
|
1904
|
+
def iter_architecture_defaults():
|
|
1905
|
+
yield from _SUFFIX_TO_DEFAULTS
|
|
1906
|
+
|
|
1907
|
+
|
|
1908
|
+
def try_match_architecture_defaults(
|
|
1909
|
+
architecture: str,
|
|
1910
|
+
*,
|
|
1911
|
+
runner_type: RunnerType | None = None,
|
|
1912
|
+
convert_type: ConvertType | None = None,
|
|
1913
|
+
) -> tuple[str, tuple[RunnerType, ConvertType]] | None:
|
|
1914
|
+
for suffix, (
|
|
1915
|
+
default_runner_type,
|
|
1916
|
+
default_convert_type,
|
|
1917
|
+
) in iter_architecture_defaults():
|
|
1918
|
+
if (
|
|
1919
|
+
(runner_type is None or runner_type == default_runner_type)
|
|
1920
|
+
and (convert_type is None or convert_type == default_convert_type)
|
|
1921
|
+
and architecture.endswith(suffix)
|
|
1922
|
+
):
|
|
1923
|
+
return suffix, (default_runner_type, default_convert_type)
|
|
1924
|
+
|
|
1925
|
+
return None
|
|
1926
|
+
|
|
1927
|
+
|
|
1928
|
+
_STR_DTYPE_TO_TORCH_DTYPE = {
|
|
1929
|
+
"half": torch.float16,
|
|
1930
|
+
"float16": torch.float16,
|
|
1931
|
+
"float": torch.float32,
|
|
1932
|
+
"float32": torch.float32,
|
|
1933
|
+
"bfloat16": torch.bfloat16,
|
|
1934
|
+
}
|
|
1935
|
+
|
|
1936
|
+
# model_type -> reason
|
|
1937
|
+
_FLOAT16_NOT_SUPPORTED_MODELS = {
|
|
1938
|
+
"gemma2": "Numerical instability. Please use bfloat16 or float32 instead.",
|
|
1939
|
+
"gemma3": "Numerical instability. Please use bfloat16 or float32 instead.",
|
|
1940
|
+
"gemma3_text": "Numerical instability. Please use bfloat16 or float32 instead.",
|
|
1941
|
+
"plamo2": "Numerical instability. Please use bfloat16 or float32 instead.",
|
|
1942
|
+
"glm4": "Numerical instability. Please use bfloat16 or float32 instead.",
|
|
1943
|
+
}
|
|
1944
|
+
|
|
1945
|
+
|
|
1946
|
+
def _is_valid_dtype(model_type: str, dtype: torch.dtype):
|
|
1947
|
+
if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16: # noqa: E501, SIM103
|
|
1948
|
+
return False
|
|
1949
|
+
|
|
1950
|
+
return True
|
|
1951
|
+
|
|
1952
|
+
|
|
1953
|
+
def _check_valid_dtype(model_type: str, dtype: torch.dtype):
|
|
1954
|
+
if model_type in _FLOAT16_NOT_SUPPORTED_MODELS and dtype == torch.float16:
|
|
1955
|
+
reason = _FLOAT16_NOT_SUPPORTED_MODELS[model_type]
|
|
1956
|
+
raise ValueError(
|
|
1957
|
+
f"The model type {model_type!r} does not support float16. Reason: {reason}"
|
|
1958
|
+
)
|
|
1959
|
+
|
|
1960
|
+
return True
|
|
1961
|
+
|
|
1962
|
+
|
|
1963
|
+
def _find_dtype(
|
|
1964
|
+
model_id: str,
|
|
1965
|
+
config: PretrainedConfig,
|
|
1966
|
+
*,
|
|
1967
|
+
revision: str | None,
|
|
1968
|
+
):
|
|
1969
|
+
# NOTE: getattr(config, "dtype", torch.float32) is not correct
|
|
1970
|
+
# because config.dtype can be None.
|
|
1971
|
+
config_dtype = getattr(config, "dtype", None)
|
|
1972
|
+
|
|
1973
|
+
# Fallbacks for multi-modal models if the root config
|
|
1974
|
+
# does not define dtype
|
|
1975
|
+
if config_dtype is None:
|
|
1976
|
+
config_dtype = getattr(config.get_text_config(), "dtype", None)
|
|
1977
|
+
if config_dtype is None and hasattr(config, "vision_config"):
|
|
1978
|
+
config_dtype = getattr(config.vision_config, "dtype", None)
|
|
1979
|
+
if config_dtype is None and hasattr(config, "encoder_config"):
|
|
1980
|
+
config_dtype = getattr(config.encoder_config, "dtype", None)
|
|
1981
|
+
|
|
1982
|
+
# Try to read the dtype of the weights if they are in safetensors format
|
|
1983
|
+
if config_dtype is None:
|
|
1984
|
+
repo_mt = try_get_safetensors_metadata(model_id, revision=revision)
|
|
1985
|
+
|
|
1986
|
+
if repo_mt and (files_mt := repo_mt.files_metadata):
|
|
1987
|
+
param_dtypes: set[torch.dtype] = {
|
|
1988
|
+
_SAFETENSORS_TO_TORCH_DTYPE[dtype_str]
|
|
1989
|
+
for file_mt in files_mt.values()
|
|
1990
|
+
for dtype_str in file_mt.parameter_count
|
|
1991
|
+
if dtype_str in _SAFETENSORS_TO_TORCH_DTYPE
|
|
1992
|
+
}
|
|
1993
|
+
|
|
1994
|
+
if param_dtypes:
|
|
1995
|
+
return common_broadcastable_dtype(param_dtypes)
|
|
1996
|
+
|
|
1997
|
+
if config_dtype is None:
|
|
1998
|
+
config_dtype = torch.float32
|
|
1999
|
+
|
|
2000
|
+
return config_dtype
|
|
2001
|
+
|
|
2002
|
+
|
|
2003
|
+
def _resolve_auto_dtype(
|
|
2004
|
+
model_type: str,
|
|
2005
|
+
config_dtype: torch.dtype,
|
|
2006
|
+
*,
|
|
2007
|
+
is_pooling_model: bool,
|
|
2008
|
+
):
|
|
2009
|
+
from vllm.platforms import current_platform
|
|
2010
|
+
|
|
2011
|
+
supported_dtypes = [
|
|
2012
|
+
dtype
|
|
2013
|
+
for dtype in current_platform.supported_dtypes
|
|
2014
|
+
if _is_valid_dtype(model_type, dtype)
|
|
2015
|
+
]
|
|
2016
|
+
|
|
2017
|
+
if is_pooling_model and torch.float16 in supported_dtypes:
|
|
2018
|
+
preferred_dtype = torch.float16
|
|
2019
|
+
else:
|
|
2020
|
+
preferred_dtype = supported_dtypes[0]
|
|
2021
|
+
|
|
2022
|
+
# Downcast for float32 models
|
|
2023
|
+
if config_dtype == torch.float32:
|
|
2024
|
+
config_dtype = preferred_dtype
|
|
2025
|
+
|
|
2026
|
+
if config_dtype in supported_dtypes:
|
|
2027
|
+
return config_dtype
|
|
2028
|
+
|
|
2029
|
+
# Ensure device compatibility
|
|
2030
|
+
device_name = current_platform.get_device_name()
|
|
2031
|
+
device_capability = current_platform.get_device_capability()
|
|
2032
|
+
|
|
2033
|
+
if device_capability is None:
|
|
2034
|
+
device_str = f"{device_name!r}"
|
|
2035
|
+
else:
|
|
2036
|
+
version_str = device_capability.as_version_str()
|
|
2037
|
+
device_str = f"{device_name!r} (with compute capability {version_str})"
|
|
2038
|
+
|
|
2039
|
+
logger.warning(
|
|
2040
|
+
"Your device %s doesn't support %s. Falling back to %s for compatibility.",
|
|
2041
|
+
device_str,
|
|
2042
|
+
config_dtype,
|
|
2043
|
+
preferred_dtype,
|
|
2044
|
+
)
|
|
2045
|
+
|
|
2046
|
+
return preferred_dtype
|
|
2047
|
+
|
|
2048
|
+
|
|
2049
|
+
def _get_and_verify_dtype(
|
|
2050
|
+
model_id: str,
|
|
2051
|
+
config: PretrainedConfig,
|
|
2052
|
+
dtype: str | torch.dtype,
|
|
2053
|
+
*,
|
|
2054
|
+
is_pooling_model: bool,
|
|
2055
|
+
revision: str | None = None,
|
|
2056
|
+
) -> torch.dtype:
|
|
2057
|
+
config_dtype = _find_dtype(model_id, config, revision=revision)
|
|
2058
|
+
model_type = config.model_type
|
|
2059
|
+
|
|
2060
|
+
if isinstance(dtype, str):
|
|
2061
|
+
dtype = dtype.lower()
|
|
2062
|
+
if dtype == "auto":
|
|
2063
|
+
# Set default dtype from model config
|
|
2064
|
+
torch_dtype = _resolve_auto_dtype(
|
|
2065
|
+
model_type,
|
|
2066
|
+
config_dtype,
|
|
2067
|
+
is_pooling_model=is_pooling_model,
|
|
2068
|
+
)
|
|
2069
|
+
else:
|
|
2070
|
+
if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
|
|
2071
|
+
raise ValueError(f"Unknown dtype: {dtype!r}")
|
|
2072
|
+
torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
|
|
2073
|
+
elif isinstance(dtype, torch.dtype):
|
|
2074
|
+
torch_dtype = dtype
|
|
2075
|
+
else:
|
|
2076
|
+
raise ValueError(f"Unknown dtype: {dtype}")
|
|
2077
|
+
|
|
2078
|
+
_check_valid_dtype(model_type, torch_dtype)
|
|
2079
|
+
|
|
2080
|
+
if torch_dtype != config_dtype:
|
|
2081
|
+
if torch_dtype == torch.float32:
|
|
2082
|
+
# Upcasting to float32 is allowed.
|
|
2083
|
+
logger.info("Upcasting %s to %s.", config_dtype, torch_dtype)
|
|
2084
|
+
elif config_dtype == torch.float32:
|
|
2085
|
+
# Downcasting from float32 to float16 or bfloat16 is allowed.
|
|
2086
|
+
logger.info("Downcasting %s to %s.", config_dtype, torch_dtype)
|
|
2087
|
+
else:
|
|
2088
|
+
# Casting between float16 and bfloat16 is allowed with a warning.
|
|
2089
|
+
logger.warning("Casting %s to %s.", config_dtype, torch_dtype)
|
|
2090
|
+
|
|
2091
|
+
return torch_dtype
|
|
2092
|
+
|
|
2093
|
+
|
|
2094
|
+
def _get_head_dtype(
|
|
2095
|
+
config: PretrainedConfig, dtype: torch.dtype, runner_type: str
|
|
2096
|
+
) -> torch.dtype:
|
|
2097
|
+
head_dtype: str | torch.dtype | None = getattr(config, "head_dtype", None)
|
|
2098
|
+
|
|
2099
|
+
if head_dtype == "model":
|
|
2100
|
+
return dtype
|
|
2101
|
+
elif isinstance(head_dtype, str):
|
|
2102
|
+
head_dtype = head_dtype.lower()
|
|
2103
|
+
if head_dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
|
|
2104
|
+
raise ValueError(f"Unknown dtype: {head_dtype!r}")
|
|
2105
|
+
return _STR_DTYPE_TO_TORCH_DTYPE[head_dtype]
|
|
2106
|
+
elif isinstance(head_dtype, torch.dtype):
|
|
2107
|
+
return head_dtype
|
|
2108
|
+
elif head_dtype is None:
|
|
2109
|
+
if torch.float32 not in current_platform.supported_dtypes:
|
|
2110
|
+
return dtype
|
|
2111
|
+
if runner_type == "pooling":
|
|
2112
|
+
return torch.float32
|
|
2113
|
+
return dtype
|
|
2114
|
+
else:
|
|
2115
|
+
raise ValueError(f"Unknown dtype: {head_dtype}")
|
|
2116
|
+
|
|
2117
|
+
|
|
2118
|
+
def _get_and_verify_max_len(
|
|
2119
|
+
hf_config: PretrainedConfig,
|
|
2120
|
+
tokenizer_config: dict | None,
|
|
2121
|
+
max_model_len: int | None,
|
|
2122
|
+
disable_sliding_window: bool,
|
|
2123
|
+
sliding_window: int | None,
|
|
2124
|
+
spec_target_max_model_len: int | None = None,
|
|
2125
|
+
encoder_config: Any | None = None,
|
|
2126
|
+
) -> int:
|
|
2127
|
+
"""Get and verify the model's maximum length."""
|
|
2128
|
+
derived_max_model_len = float("inf")
|
|
2129
|
+
possible_keys = [
|
|
2130
|
+
# OPT
|
|
2131
|
+
"max_position_embeddings",
|
|
2132
|
+
# GPT-2
|
|
2133
|
+
"n_positions",
|
|
2134
|
+
# MPT
|
|
2135
|
+
"max_seq_len",
|
|
2136
|
+
# ChatGLM2
|
|
2137
|
+
"seq_length",
|
|
2138
|
+
# Command-R
|
|
2139
|
+
"model_max_length",
|
|
2140
|
+
# Whisper
|
|
2141
|
+
"max_target_positions",
|
|
2142
|
+
# Others
|
|
2143
|
+
"max_sequence_length",
|
|
2144
|
+
"max_seq_length",
|
|
2145
|
+
"seq_len",
|
|
2146
|
+
]
|
|
2147
|
+
# Choose the smallest "max_length" from the possible keys
|
|
2148
|
+
max_len_key = None
|
|
2149
|
+
for key in possible_keys:
|
|
2150
|
+
max_len = getattr(hf_config, key, None)
|
|
2151
|
+
if max_len is not None:
|
|
2152
|
+
max_len_key = key if max_len < derived_max_model_len else max_len_key
|
|
2153
|
+
derived_max_model_len = min(derived_max_model_len, max_len)
|
|
2154
|
+
# For Command-R / Cohere, Cohere2 / Aya Vision models
|
|
2155
|
+
if tmp_max_len := getattr(hf_config, "model_max_length", None):
|
|
2156
|
+
max_len_key = "model_max_length"
|
|
2157
|
+
derived_max_model_len = tmp_max_len
|
|
2158
|
+
|
|
2159
|
+
# If sliding window is manually disabled, max_length should be less
|
|
2160
|
+
# than the sliding window length in the model config.
|
|
2161
|
+
if (
|
|
2162
|
+
disable_sliding_window
|
|
2163
|
+
and sliding_window is not None
|
|
2164
|
+
and sliding_window < derived_max_model_len
|
|
2165
|
+
):
|
|
2166
|
+
max_len_key = "sliding_window"
|
|
2167
|
+
derived_max_model_len = sliding_window
|
|
2168
|
+
|
|
2169
|
+
# Consider model_max_length in tokenizer_config
|
|
2170
|
+
if tokenizer_config:
|
|
2171
|
+
tokenizer_model_max_length = tokenizer_config.get(
|
|
2172
|
+
"model_max_length", derived_max_model_len
|
|
2173
|
+
)
|
|
2174
|
+
derived_max_model_len = min(derived_max_model_len, tokenizer_model_max_length)
|
|
2175
|
+
|
|
2176
|
+
# If none of the keys were found in the config, use a default and
|
|
2177
|
+
# log a warning.
|
|
2178
|
+
if derived_max_model_len == float("inf"):
|
|
2179
|
+
if max_model_len is not None:
|
|
2180
|
+
# If max_model_len is specified, we use it.
|
|
2181
|
+
return max_model_len
|
|
2182
|
+
|
|
2183
|
+
if spec_target_max_model_len is not None:
|
|
2184
|
+
# If this is a speculative draft model, we use the max model len
|
|
2185
|
+
# from the target model.
|
|
2186
|
+
return spec_target_max_model_len
|
|
2187
|
+
|
|
2188
|
+
default_max_len = 2048
|
|
2189
|
+
logger.warning(
|
|
2190
|
+
"The model's config.json does not contain any of the following "
|
|
2191
|
+
"keys to determine the original maximum length of the model: "
|
|
2192
|
+
"%s. Assuming the model's maximum length is %d.",
|
|
2193
|
+
possible_keys,
|
|
2194
|
+
default_max_len,
|
|
2195
|
+
)
|
|
2196
|
+
derived_max_model_len = default_max_len
|
|
2197
|
+
|
|
2198
|
+
# In Transformers v5 rope_parameters could be TypedDict or dict[str, TypedDict].
|
|
2199
|
+
# To simplify the verification, we convert it to dict[str, TypedDict].
|
|
2200
|
+
rope_parameters = getattr(hf_config, "rope_parameters", None)
|
|
2201
|
+
if rope_parameters and not set(rope_parameters.keys()).issubset(
|
|
2202
|
+
ALLOWED_LAYER_TYPES
|
|
2203
|
+
):
|
|
2204
|
+
rope_parameters = {"": rope_parameters}
|
|
2205
|
+
|
|
2206
|
+
# NOTE(woosuk): Gemma3's max_model_len (128K) is already scaled by RoPE
|
|
2207
|
+
# scaling, so we skip applying the scaling factor again.
|
|
2208
|
+
if rope_parameters is not None and "gemma3" not in hf_config.model_type:
|
|
2209
|
+
scaling_factor = 1.0
|
|
2210
|
+
for rp in rope_parameters.values():
|
|
2211
|
+
# No need to consider "type" key because of patch_rope_parameters when
|
|
2212
|
+
# loading HF config
|
|
2213
|
+
rope_type = rp["rope_type"]
|
|
2214
|
+
|
|
2215
|
+
if rope_type not in ("su", "longrope", "llama3"):
|
|
2216
|
+
# NOTE: rope_type == "default" does not define factor https://github.com/huggingface/transformers/blob/v4.45.2/src/transformers/modeling_rope_utils.py
|
|
2217
|
+
# NOTE: This assumes all layer types have the same scaling factor.
|
|
2218
|
+
scaling_factor = rp.get("factor", scaling_factor)
|
|
2219
|
+
|
|
2220
|
+
if rope_type == "yarn":
|
|
2221
|
+
derived_max_model_len = rp["original_max_position_embeddings"]
|
|
2222
|
+
# Do this outside loop since all layer types should have the same scaling
|
|
2223
|
+
derived_max_model_len *= scaling_factor
|
|
2224
|
+
|
|
2225
|
+
if encoder_config and "max_seq_length" in encoder_config:
|
|
2226
|
+
derived_max_model_len = encoder_config["max_seq_length"]
|
|
2227
|
+
|
|
2228
|
+
# If the user didn't specify `max_model_len`, then use that derived from
|
|
2229
|
+
# the model config as a default value.
|
|
2230
|
+
if max_model_len is None:
|
|
2231
|
+
# For LongRoPE, default to original_max_position_embeddings to avoid
|
|
2232
|
+
# performance degradation for shorter sequences
|
|
2233
|
+
if rope_parameters is not None and any(
|
|
2234
|
+
rp["rope_type"] == "longrope" for rp in rope_parameters.values()
|
|
2235
|
+
):
|
|
2236
|
+
max_model_len = int(
|
|
2237
|
+
getattr(
|
|
2238
|
+
hf_config, "original_max_position_embeddings", derived_max_model_len
|
|
2239
|
+
)
|
|
2240
|
+
)
|
|
2241
|
+
else:
|
|
2242
|
+
max_model_len = int(derived_max_model_len)
|
|
2243
|
+
max_model_len = current_platform.check_max_model_len(max_model_len)
|
|
2244
|
+
|
|
2245
|
+
# If the user specified a max length, make sure it is smaller than the
|
|
2246
|
+
# derived length from the HF model config.
|
|
2247
|
+
elif max_model_len > derived_max_model_len:
|
|
2248
|
+
# Some models might have a separate key for specifying model_max_length
|
|
2249
|
+
# that will be bigger than derived_max_model_len. We compare user input
|
|
2250
|
+
# with model_max_length and allow this override when it's smaller.
|
|
2251
|
+
model_max_length = getattr(hf_config, "model_max_length", None)
|
|
2252
|
+
if model_max_length is None or max_model_len > model_max_length:
|
|
2253
|
+
msg = (
|
|
2254
|
+
f"User-specified max_model_len ({max_model_len}) is greater "
|
|
2255
|
+
f"than the derived max_model_len ({max_len_key}="
|
|
2256
|
+
f"{derived_max_model_len} or model_max_length="
|
|
2257
|
+
f"{model_max_length} in model's config.json)."
|
|
2258
|
+
)
|
|
2259
|
+
warning = (
|
|
2260
|
+
"VLLM_ALLOW_LONG_MAX_MODEL_LEN must be used with extreme "
|
|
2261
|
+
"caution. If the model uses relative position encoding (RoPE), "
|
|
2262
|
+
"positions exceeding derived_max_model_len lead to nan. If the "
|
|
2263
|
+
"model uses absolute position encoding, positions exceeding "
|
|
2264
|
+
"derived_max_model_len will cause a CUDA array out-of-bounds "
|
|
2265
|
+
"error."
|
|
2266
|
+
)
|
|
2267
|
+
if envs.VLLM_ALLOW_LONG_MAX_MODEL_LEN:
|
|
2268
|
+
logger.warning_once("%s %s", msg, warning)
|
|
2269
|
+
else:
|
|
2270
|
+
raise ValueError(
|
|
2271
|
+
f"{msg} To allow overriding this maximum, set "
|
|
2272
|
+
f"the env var VLLM_ALLOW_LONG_MAX_MODEL_LEN=1. {warning}"
|
|
2273
|
+
)
|
|
2274
|
+
return int(max_model_len)
|