vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vllm-cpu might be problematic. Click here for more details.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +170 -0
- vllm/_custom_ops.py +1536 -0
- vllm/_ipex_ops.py +241 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +105 -0
- vllm/adapter_commons/request.py +25 -0
- vllm/adapter_commons/utils.py +92 -0
- vllm/adapter_commons/worker_manager.py +38 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +38 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +31 -0
- vllm/assets/video.py +103 -0
- vllm/attention/__init__.py +19 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +306 -0
- vllm/attention/backends/blocksparse_attn.py +457 -0
- vllm/attention/backends/cpu_mla.py +303 -0
- vllm/attention/backends/flash_attn.py +999 -0
- vllm/attention/backends/flashinfer.py +1092 -0
- vllm/attention/backends/flashmla.py +242 -0
- vllm/attention/backends/hpu_attn.py +301 -0
- vllm/attention/backends/ipex_attn.py +396 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1444 -0
- vllm/attention/backends/pallas.py +346 -0
- vllm/attention/backends/placeholder_attn.py +399 -0
- vllm/attention/backends/rocm_aiter_mla.py +412 -0
- vllm/attention/backends/rocm_flash_attn.py +969 -0
- vllm/attention/backends/torch_sdpa.py +691 -0
- vllm/attention/backends/triton_mla.py +113 -0
- vllm/attention/backends/utils.py +609 -0
- vllm/attention/backends/xformers.py +798 -0
- vllm/attention/layer.py +443 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
- vllm/attention/ops/blocksparse_attention/interface.py +238 -0
- vllm/attention/ops/blocksparse_attention/utils.py +244 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
- vllm/attention/ops/flashmla.py +115 -0
- vllm/attention/ops/hpu_paged_attn.py +105 -0
- vllm/attention/ops/ipex_attn.py +193 -0
- vllm/attention/ops/merge_attn_states.py +42 -0
- vllm/attention/ops/nki_flash_attn.py +905 -0
- vllm/attention/ops/paged_attn.py +255 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +42 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
- vllm/attention/ops/triton_decode_attention.py +675 -0
- vllm/attention/ops/triton_flash_attention.py +1375 -0
- vllm/attention/ops/triton_merge_attn_states.py +96 -0
- vllm/attention/selector.py +186 -0
- vllm/attention/utils/fa_utils.py +54 -0
- vllm/beam_search.py +82 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +831 -0
- vllm/benchmarks/endpoint_request_func.py +160 -0
- vllm/benchmarks/latency.py +181 -0
- vllm/benchmarks/serve.py +925 -0
- vllm/benchmarks/throughput.py +608 -0
- vllm/benchmarks/utils.py +69 -0
- vllm/collect_env.py +795 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/backends.py +715 -0
- vllm/compilation/compiler_interface.py +437 -0
- vllm/compilation/counter.py +33 -0
- vllm/compilation/decorators.py +249 -0
- vllm/compilation/fix_functionalization.py +182 -0
- vllm/compilation/fusion.py +617 -0
- vllm/compilation/fx_utils.py +60 -0
- vllm/compilation/inductor_pass.py +114 -0
- vllm/compilation/monitor.py +38 -0
- vllm/compilation/multi_output_match.py +108 -0
- vllm/compilation/noop_elimination.py +135 -0
- vllm/compilation/pass_manager.py +74 -0
- vllm/compilation/sequence_parallelism.py +266 -0
- vllm/compilation/torch25_custom_graph_pass.py +41 -0
- vllm/compilation/vllm_inductor_pass.py +68 -0
- vllm/compilation/wrapper.py +129 -0
- vllm/config.py +4179 -0
- vllm/connections.py +170 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +398 -0
- vllm/core/block/common.py +370 -0
- vllm/core/block/cpu_gpu_block_allocator.py +440 -0
- vllm/core/block/interfaces.py +318 -0
- vllm/core/block/naive_block.py +465 -0
- vllm/core/block/prefix_caching_block.py +1134 -0
- vllm/core/block/utils.py +27 -0
- vllm/core/block_manager.py +520 -0
- vllm/core/evictor.py +156 -0
- vllm/core/interfaces.py +134 -0
- vllm/core/placeholder_block_space_manager.py +99 -0
- vllm/core/scheduler.py +2060 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +280 -0
- vllm/distributed/__init__.py +5 -0
- vllm/distributed/communication_op.py +40 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/base_device_communicator.py +151 -0
- vllm/distributed/device_communicators/cpu_communicator.py +139 -0
- vllm/distributed/device_communicators/cuda_communicator.py +131 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
- vllm/distributed/device_communicators/hpu_communicator.py +45 -0
- vllm/distributed/device_communicators/neuron_communicator.py +19 -0
- vllm/distributed/device_communicators/pynccl.py +217 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
- vllm/distributed/device_communicators/shm_broadcast.py +557 -0
- vllm/distributed/device_communicators/tpu_communicator.py +93 -0
- vllm/distributed/device_communicators/xpu_communicator.py +54 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +11 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
- vllm/distributed/parallel_state.py +1209 -0
- vllm/distributed/utils.py +366 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1724 -0
- vllm/engine/async_llm_engine.py +1261 -0
- vllm/engine/async_timeout.py +191 -0
- vllm/engine/llm_engine.py +2150 -0
- vllm/engine/metrics.py +717 -0
- vllm/engine/metrics_types.py +96 -0
- vllm/engine/multiprocessing/__init__.py +183 -0
- vllm/engine/multiprocessing/client.py +745 -0
- vllm/engine/multiprocessing/engine.py +450 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +74 -0
- vllm/engine/output_processor/multi_step.py +210 -0
- vllm/engine/output_processor/single_step.py +136 -0
- vllm/engine/output_processor/stop_checker.py +130 -0
- vllm/engine/output_processor/util.py +27 -0
- vllm/engine/protocol.py +302 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +177 -0
- vllm/entrypoints/chat_utils.py +1259 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +38 -0
- vllm/entrypoints/cli/benchmark/latency.py +29 -0
- vllm/entrypoints/cli/benchmark/main.py +53 -0
- vllm/entrypoints/cli/benchmark/serve.py +29 -0
- vllm/entrypoints/cli/benchmark/throughput.py +29 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +59 -0
- vllm/entrypoints/cli/openai.py +175 -0
- vllm/entrypoints/cli/serve.py +59 -0
- vllm/entrypoints/cli/types.py +24 -0
- vllm/entrypoints/launcher.py +146 -0
- vllm/entrypoints/llm.py +1450 -0
- vllm/entrypoints/logger.py +44 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1130 -0
- vllm/entrypoints/openai/cli_args.py +296 -0
- vllm/entrypoints/openai/logits_processors.py +89 -0
- vllm/entrypoints/openai/protocol.py +1806 -0
- vllm/entrypoints/openai/run_batch.py +439 -0
- vllm/entrypoints/openai/serving_chat.py +1210 -0
- vllm/entrypoints/openai/serving_completion.py +557 -0
- vllm/entrypoints/openai/serving_embedding.py +245 -0
- vllm/entrypoints/openai/serving_engine.py +569 -0
- vllm/entrypoints/openai/serving_models.py +314 -0
- vllm/entrypoints/openai/serving_pooling.py +237 -0
- vllm/entrypoints/openai/serving_score.py +439 -0
- vllm/entrypoints/openai/serving_tokenization.py +147 -0
- vllm/entrypoints/openai/serving_transcription.py +421 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
- vllm/entrypoints/score_utils.py +49 -0
- vllm/entrypoints/ssl.py +74 -0
- vllm/entrypoints/utils.py +136 -0
- vllm/env_override.py +34 -0
- vllm/envs.py +800 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +400 -0
- vllm/executor/mp_distributed_executor.py +243 -0
- vllm/executor/msgspec_utils.py +29 -0
- vllm/executor/multiproc_worker_utils.py +312 -0
- vllm/executor/ray_distributed_executor.py +700 -0
- vllm/executor/ray_utils.py +400 -0
- vllm/executor/uniproc_executor.py +141 -0
- vllm/forward_context.py +159 -0
- vllm/inputs/__init__.py +37 -0
- vllm/inputs/data.py +248 -0
- vllm/inputs/parse.py +121 -0
- vllm/inputs/preprocess.py +745 -0
- vllm/inputs/registry.py +212 -0
- vllm/jsontree.py +79 -0
- vllm/logger.py +210 -0
- vllm/logging_utils/__init__.py +7 -0
- vllm/logging_utils/formatter.py +17 -0
- vllm/logits_process.py +121 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +335 -0
- vllm/lora/layers.py +1263 -0
- vllm/lora/lora.py +198 -0
- vllm/lora/models.py +802 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +15 -0
- vllm/lora/ops/torch_ops/lora_ops.py +115 -0
- vllm/lora/ops/triton_ops/__init__.py +11 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand.py +293 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
- vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
- vllm/lora/ops/triton_ops/utils.py +121 -0
- vllm/lora/peft_helper.py +115 -0
- vllm/lora/punica_wrapper/__init__.py +9 -0
- vllm/lora/punica_wrapper/punica_base.py +483 -0
- vllm/lora/punica_wrapper/punica_cpu.py +348 -0
- vllm/lora/punica_wrapper/punica_gpu.py +289 -0
- vllm/lora/punica_wrapper/punica_hpu.py +144 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/utils.py +161 -0
- vllm/lora/request.py +97 -0
- vllm/lora/resolver.py +83 -0
- vllm/lora/utils.py +237 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +15 -0
- vllm/model_executor/custom_op.py +153 -0
- vllm/model_executor/guided_decoding/__init__.py +180 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
- vllm/model_executor/guided_decoding/guided_fields.py +42 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
- vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
- vllm/model_executor/guided_decoding/utils.py +241 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +368 -0
- vllm/model_executor/layers/fused_moe/__init__.py +51 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
- vllm/model_executor/layers/fused_moe/layer.py +949 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
- vllm/model_executor/layers/fused_moe/utils.py +48 -0
- vllm/model_executor/layers/layernorm.py +277 -0
- vllm/model_executor/layers/lightning_attn.py +651 -0
- vllm/model_executor/layers/linear.py +1518 -0
- vllm/model_executor/layers/logits_processor.py +196 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
- vllm/model_executor/layers/pooler.py +336 -0
- vllm/model_executor/layers/quantization/__init__.py +153 -0
- vllm/model_executor/layers/quantization/aqlm.py +374 -0
- vllm/model_executor/layers/quantization/awq.py +184 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
- vllm/model_executor/layers/quantization/awq_triton.py +319 -0
- vllm/model_executor/layers/quantization/base_config.py +145 -0
- vllm/model_executor/layers/quantization/bitblas.py +459 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
- vllm/model_executor/layers/quantization/experts_int8.py +194 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
- vllm/model_executor/layers/quantization/fp8.py +832 -0
- vllm/model_executor/layers/quantization/gguf.py +408 -0
- vllm/model_executor/layers/quantization/gptq.py +276 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
- vllm/model_executor/layers/quantization/kv_cache.py +137 -0
- vllm/model_executor/layers/quantization/marlin.py +259 -0
- vllm/model_executor/layers/quantization/modelopt.py +410 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
- vllm/model_executor/layers/quantization/qqq.py +273 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +385 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
- vllm/model_executor/layers/quantization/quark/utils.py +102 -0
- vllm/model_executor/layers/quantization/schema.py +85 -0
- vllm/model_executor/layers/quantization/torchao.py +127 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
- vllm/model_executor/layers/rejection_sampler.py +400 -0
- vllm/model_executor/layers/resampler.py +269 -0
- vllm/model_executor/layers/rotary_embedding.py +1598 -0
- vllm/model_executor/layers/sampler.py +1221 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
- vllm/model_executor/layers/utils.py +99 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
- vllm/model_executor/model_loader/__init__.py +20 -0
- vllm/model_executor/model_loader/loader.py +1542 -0
- vllm/model_executor/model_loader/neuron.py +243 -0
- vllm/model_executor/model_loader/tensorizer.py +468 -0
- vllm/model_executor/model_loader/utils.py +171 -0
- vllm/model_executor/model_loader/weight_utils.py +749 -0
- vllm/model_executor/models/__init__.py +27 -0
- vllm/model_executor/models/adapters.py +247 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +656 -0
- vllm/model_executor/models/aya_vision.py +461 -0
- vllm/model_executor/models/baichuan.py +469 -0
- vllm/model_executor/models/bamba.py +542 -0
- vllm/model_executor/models/bart.py +936 -0
- vllm/model_executor/models/bert.py +725 -0
- vllm/model_executor/models/blip.py +337 -0
- vllm/model_executor/models/blip2.py +717 -0
- vllm/model_executor/models/bloom.py +358 -0
- vllm/model_executor/models/chameleon.py +1135 -0
- vllm/model_executor/models/chatglm.py +476 -0
- vllm/model_executor/models/clip.py +410 -0
- vllm/model_executor/models/commandr.py +466 -0
- vllm/model_executor/models/constant_size_cache.py +136 -0
- vllm/model_executor/models/dbrx.py +469 -0
- vllm/model_executor/models/deepseek.py +484 -0
- vllm/model_executor/models/deepseek_mtp.py +266 -0
- vllm/model_executor/models/deepseek_v2.py +830 -0
- vllm/model_executor/models/deepseek_vl2.py +647 -0
- vllm/model_executor/models/eagle.py +247 -0
- vllm/model_executor/models/exaone.py +548 -0
- vllm/model_executor/models/fairseq2_llama.py +153 -0
- vllm/model_executor/models/falcon.py +508 -0
- vllm/model_executor/models/florence2.py +1102 -0
- vllm/model_executor/models/fuyu.py +388 -0
- vllm/model_executor/models/gemma.py +423 -0
- vllm/model_executor/models/gemma2.py +423 -0
- vllm/model_executor/models/gemma3.py +531 -0
- vllm/model_executor/models/gemma3_mm.py +716 -0
- vllm/model_executor/models/glm.py +22 -0
- vllm/model_executor/models/glm4.py +303 -0
- vllm/model_executor/models/glm4v.py +647 -0
- vllm/model_executor/models/gpt2.py +313 -0
- vllm/model_executor/models/gpt_bigcode.py +336 -0
- vllm/model_executor/models/gpt_j.py +337 -0
- vllm/model_executor/models/gpt_neox.py +330 -0
- vllm/model_executor/models/granite.py +494 -0
- vllm/model_executor/models/granite_speech.py +777 -0
- vllm/model_executor/models/granitemoe.py +435 -0
- vllm/model_executor/models/granitemoeshared.py +339 -0
- vllm/model_executor/models/gritlm.py +245 -0
- vllm/model_executor/models/grok1.py +560 -0
- vllm/model_executor/models/h2ovl.py +542 -0
- vllm/model_executor/models/idefics2_vision_model.py +387 -0
- vllm/model_executor/models/idefics3.py +767 -0
- vllm/model_executor/models/interfaces.py +569 -0
- vllm/model_executor/models/interfaces_base.py +163 -0
- vllm/model_executor/models/intern_vit.py +476 -0
- vllm/model_executor/models/internlm2.py +453 -0
- vllm/model_executor/models/internlm2_ve.py +146 -0
- vllm/model_executor/models/internvl.py +945 -0
- vllm/model_executor/models/jais.py +371 -0
- vllm/model_executor/models/jamba.py +590 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +619 -0
- vllm/model_executor/models/llama4.py +530 -0
- vllm/model_executor/models/llama_eagle.py +152 -0
- vllm/model_executor/models/llama_eagle3.py +232 -0
- vllm/model_executor/models/llava.py +869 -0
- vllm/model_executor/models/llava_next.py +582 -0
- vllm/model_executor/models/llava_next_video.py +470 -0
- vllm/model_executor/models/llava_onevision.py +954 -0
- vllm/model_executor/models/mamba.py +271 -0
- vllm/model_executor/models/mamba2.py +302 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +210 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +229 -0
- vllm/model_executor/models/minicpmo.py +725 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +35 -0
- vllm/model_executor/models/minimax_text_01.py +1261 -0
- vllm/model_executor/models/mistral3.py +598 -0
- vllm/model_executor/models/mixtral.py +485 -0
- vllm/model_executor/models/mixtral_quant.py +447 -0
- vllm/model_executor/models/mllama.py +1623 -0
- vllm/model_executor/models/mllama4.py +838 -0
- vllm/model_executor/models/mlp_speculator.py +205 -0
- vllm/model_executor/models/modernbert.py +325 -0
- vllm/model_executor/models/module_mapping.py +71 -0
- vllm/model_executor/models/molmo.py +1567 -0
- vllm/model_executor/models/moonvit.py +628 -0
- vllm/model_executor/models/mpt.py +329 -0
- vllm/model_executor/models/nemotron.py +506 -0
- vllm/model_executor/models/nemotron_nas.py +446 -0
- vllm/model_executor/models/nvlm_d.py +212 -0
- vllm/model_executor/models/olmo.py +390 -0
- vllm/model_executor/models/olmo2.py +412 -0
- vllm/model_executor/models/olmoe.py +449 -0
- vllm/model_executor/models/opt.py +410 -0
- vllm/model_executor/models/orion.py +356 -0
- vllm/model_executor/models/paligemma.py +397 -0
- vllm/model_executor/models/persimmon.py +342 -0
- vllm/model_executor/models/phi.py +354 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3_small.py +463 -0
- vllm/model_executor/models/phi3v.py +722 -0
- vllm/model_executor/models/phi4mm.py +1263 -0
- vllm/model_executor/models/phi4mm_audio.py +1232 -0
- vllm/model_executor/models/phi4mm_utils.py +1883 -0
- vllm/model_executor/models/phimoe.py +666 -0
- vllm/model_executor/models/pixtral.py +1281 -0
- vllm/model_executor/models/plamo2.py +736 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
- vllm/model_executor/models/qwen.py +360 -0
- vllm/model_executor/models/qwen2.py +552 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
- vllm/model_executor/models/qwen2_5_vl.py +1136 -0
- vllm/model_executor/models/qwen2_audio.py +402 -0
- vllm/model_executor/models/qwen2_moe.py +531 -0
- vllm/model_executor/models/qwen2_rm.py +130 -0
- vllm/model_executor/models/qwen2_vl.py +1409 -0
- vllm/model_executor/models/qwen3.py +319 -0
- vllm/model_executor/models/qwen3_moe.py +528 -0
- vllm/model_executor/models/qwen_vl.py +784 -0
- vllm/model_executor/models/registry.py +611 -0
- vllm/model_executor/models/roberta.py +332 -0
- vllm/model_executor/models/siglip.py +522 -0
- vllm/model_executor/models/skyworkr1v.py +949 -0
- vllm/model_executor/models/smolvlm.py +51 -0
- vllm/model_executor/models/solar.py +504 -0
- vllm/model_executor/models/stablelm.py +349 -0
- vllm/model_executor/models/starcoder2.py +355 -0
- vllm/model_executor/models/telechat2.py +139 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/transformers.py +442 -0
- vllm/model_executor/models/ultravox.py +655 -0
- vllm/model_executor/models/utils.py +714 -0
- vllm/model_executor/models/vision.py +149 -0
- vllm/model_executor/models/whisper.py +746 -0
- vllm/model_executor/models/zamba2.py +1008 -0
- vllm/model_executor/parameter.py +458 -0
- vllm/model_executor/pooling_metadata.py +71 -0
- vllm/model_executor/sampling_metadata.py +596 -0
- vllm/model_executor/utils.py +53 -0
- vllm/multimodal/__init__.py +31 -0
- vllm/multimodal/audio.py +105 -0
- vllm/multimodal/base.py +218 -0
- vllm/multimodal/hasher.py +103 -0
- vllm/multimodal/image.py +77 -0
- vllm/multimodal/inputs.py +843 -0
- vllm/multimodal/parse.py +454 -0
- vllm/multimodal/processing.py +1760 -0
- vllm/multimodal/profiling.py +274 -0
- vllm/multimodal/registry.py +321 -0
- vllm/multimodal/utils.py +386 -0
- vllm/multimodal/video.py +166 -0
- vllm/outputs.py +521 -0
- vllm/platforms/__init__.py +286 -0
- vllm/platforms/cpu.py +182 -0
- vllm/platforms/cuda.py +463 -0
- vllm/platforms/hpu.py +94 -0
- vllm/platforms/interface.py +427 -0
- vllm/platforms/neuron.py +69 -0
- vllm/platforms/rocm.py +346 -0
- vllm/platforms/tpu.py +174 -0
- vllm/platforms/xpu.py +142 -0
- vllm/plugins/__init__.py +82 -0
- vllm/pooling_params.py +53 -0
- vllm/profiler/__init__.py +7 -0
- vllm/profiler/layerwise_profile.py +374 -0
- vllm/profiler/utils.py +147 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +82 -0
- vllm/prompt_adapter/models.py +357 -0
- vllm/prompt_adapter/request.py +36 -0
- vllm/prompt_adapter/utils.py +97 -0
- vllm/prompt_adapter/worker_manager.py +178 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +12 -0
- vllm/reasoning/abs_reasoning_parsers.py +189 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
- vllm/reasoning/granite_reasoning_parser.py +362 -0
- vllm/sampling_params.py +598 -0
- vllm/scalar_type.py +335 -0
- vllm/scripts.py +14 -0
- vllm/sequence.py +1486 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +505 -0
- vllm/spec_decode/draft_model_runner.py +335 -0
- vllm/spec_decode/interfaces.py +98 -0
- vllm/spec_decode/medusa_worker.py +137 -0
- vllm/spec_decode/metrics.py +212 -0
- vllm/spec_decode/mlp_speculator_worker.py +93 -0
- vllm/spec_decode/mqa_scorer.py +159 -0
- vllm/spec_decode/multi_step_worker.py +416 -0
- vllm/spec_decode/ngram_worker.py +195 -0
- vllm/spec_decode/proposer_worker_base.py +58 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
- vllm/spec_decode/spec_decode_worker.py +1324 -0
- vllm/spec_decode/target_model_runner.py +44 -0
- vllm/spec_decode/top1_proposer.py +274 -0
- vllm/spec_decode/util.py +276 -0
- vllm/test_utils.py +129 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6139 -0
- vllm/tracing.py +130 -0
- vllm/transformers_utils/__init__.py +19 -0
- vllm/transformers_utils/config.py +813 -0
- vllm/transformers_utils/configs/__init__.py +52 -0
- vllm/transformers_utils/configs/arctic.py +206 -0
- vllm/transformers_utils/configs/chatglm.py +71 -0
- vllm/transformers_utils/configs/cohere2.py +194 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +65 -0
- vllm/transformers_utils/configs/exaone.py +191 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/h2ovl.py +15 -0
- vllm/transformers_utils/configs/internvl.py +53 -0
- vllm/transformers_utils/configs/jais.py +237 -0
- vllm/transformers_utils/configs/kimi_vl.py +36 -0
- vllm/transformers_utils/configs/medusa.py +62 -0
- vllm/transformers_utils/configs/mllama.py +30 -0
- vllm/transformers_utils/configs/mlp_speculator.py +67 -0
- vllm/transformers_utils/configs/moonvit.py +32 -0
- vllm/transformers_utils/configs/mpt.py +179 -0
- vllm/transformers_utils/configs/nemotron.py +204 -0
- vllm/transformers_utils/configs/nvlm_d.py +14 -0
- vllm/transformers_utils/configs/skyworkr1v.py +53 -0
- vllm/transformers_utils/configs/solar.py +246 -0
- vllm/transformers_utils/configs/telechat2.py +63 -0
- vllm/transformers_utils/configs/ultravox.py +107 -0
- vllm/transformers_utils/detokenizer.py +167 -0
- vllm/transformers_utils/detokenizer_utils.py +188 -0
- vllm/transformers_utils/processor.py +210 -0
- vllm/transformers_utils/processors/__init__.py +6 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/s3_utils.py +161 -0
- vllm/transformers_utils/tokenizer.py +291 -0
- vllm/transformers_utils/tokenizer_base.py +146 -0
- vllm/transformers_utils/tokenizer_group.py +110 -0
- vllm/transformers_utils/tokenizers/__init__.py +9 -0
- vllm/transformers_utils/tokenizers/mistral.py +483 -0
- vllm/transformers_utils/utils.py +98 -0
- vllm/triton_utils/__init__.py +5 -0
- vllm/triton_utils/importing.py +53 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +255 -0
- vllm/utils.py +2692 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/flash_attn.py +783 -0
- vllm/v1/attention/backends/flashinfer.py +638 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +974 -0
- vllm/v1/attention/backends/mla/flashmla.py +149 -0
- vllm/v1/attention/backends/mla/triton_mla.py +118 -0
- vllm/v1/attention/backends/pallas.py +221 -0
- vllm/v1/attention/backends/triton_attn.py +198 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +281 -0
- vllm/v1/core/encoder_cache_manager.py +149 -0
- vllm/v1/core/kv_cache_manager.py +385 -0
- vllm/v1/core/kv_cache_utils.py +744 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +134 -0
- vllm/v1/core/sched/output.py +126 -0
- vllm/v1/core/sched/scheduler.py +838 -0
- vllm/v1/core/sched/utils.py +22 -0
- vllm/v1/core/specialized_manager.py +161 -0
- vllm/v1/engine/__init__.py +166 -0
- vllm/v1/engine/async_llm.py +532 -0
- vllm/v1/engine/core.py +701 -0
- vllm/v1/engine/core_client.py +942 -0
- vllm/v1/engine/detokenizer.py +260 -0
- vllm/v1/engine/exceptions.py +16 -0
- vllm/v1/engine/llm_engine.py +285 -0
- vllm/v1/engine/logprobs.py +198 -0
- vllm/v1/engine/mm_input_cache.py +82 -0
- vllm/v1/engine/output_processor.py +420 -0
- vllm/v1/engine/parallel_sampling.py +132 -0
- vllm/v1/engine/processor.py +387 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +112 -0
- vllm/v1/executor/multiproc_executor.py +480 -0
- vllm/v1/executor/ray_distributed_executor.py +61 -0
- vllm/v1/kv_cache_interface.py +166 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +498 -0
- vllm/v1/metrics/stats.py +238 -0
- vllm/v1/outputs.py +111 -0
- vllm/v1/request.py +178 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +38 -0
- vllm/v1/sample/ops/penalties.py +58 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +270 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +118 -0
- vllm/v1/sample/tpu/sampler.py +154 -0
- vllm/v1/serial_utils.py +274 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +318 -0
- vllm/v1/spec_decode/metadata.py +61 -0
- vllm/v1/spec_decode/metrics.py +164 -0
- vllm/v1/spec_decode/ngram_proposer.py +131 -0
- vllm/v1/spec_decode/utils.py +18 -0
- vllm/v1/stats/__init__.py +0 -0
- vllm/v1/stats/common.py +453 -0
- vllm/v1/structured_output/__init__.py +113 -0
- vllm/v1/structured_output/backend_guidance.py +215 -0
- vllm/v1/structured_output/backend_types.py +96 -0
- vllm/v1/structured_output/backend_xgrammar.py +299 -0
- vllm/v1/structured_output/request.py +84 -0
- vllm/v1/structured_output/utils.py +174 -0
- vllm/v1/utils.py +249 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +87 -0
- vllm/v1/worker/gpu_input_batch.py +677 -0
- vllm/v1/worker/gpu_model_runner.py +1776 -0
- vllm/v1/worker/gpu_worker.py +349 -0
- vllm/v1/worker/lora_model_runner_mixin.py +145 -0
- vllm/v1/worker/tpu_model_runner.py +1419 -0
- vllm/v1/worker/tpu_worker.py +260 -0
- vllm/v1/worker/utils.py +74 -0
- vllm/v1/worker/worker_base.py +64 -0
- vllm/version.py +40 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +144 -0
- vllm/worker/cpu_enc_dec_model_runner.py +323 -0
- vllm/worker/cpu_model_runner.py +668 -0
- vllm/worker/cpu_pooling_model_runner.py +122 -0
- vllm/worker/cpu_worker.py +400 -0
- vllm/worker/enc_dec_model_runner.py +542 -0
- vllm/worker/hpu_model_runner.py +2221 -0
- vllm/worker/hpu_worker.py +483 -0
- vllm/worker/model_runner.py +2056 -0
- vllm/worker/model_runner_base.py +281 -0
- vllm/worker/multi_step_hpu_worker.py +122 -0
- vllm/worker/multi_step_model_runner.py +908 -0
- vllm/worker/multi_step_tpu_worker.py +107 -0
- vllm/worker/multi_step_worker.py +196 -0
- vllm/worker/neuron_model_runner.py +336 -0
- vllm/worker/neuron_worker.py +138 -0
- vllm/worker/pooling_model_runner.py +200 -0
- vllm/worker/tpu_model_runner.py +908 -0
- vllm/worker/tpu_worker.py +332 -0
- vllm/worker/utils.py +52 -0
- vllm/worker/worker.py +570 -0
- vllm/worker/worker_base.py +644 -0
- vllm/worker/xpu_model_runner.py +603 -0
- vllm/worker/xpu_worker.py +185 -0
- vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
- vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
- vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
- vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
- vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1542 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
# ruff: noqa: SIM117
|
|
4
|
+
import collections
|
|
5
|
+
import copy
|
|
6
|
+
import dataclasses
|
|
7
|
+
import fnmatch
|
|
8
|
+
import glob
|
|
9
|
+
import inspect
|
|
10
|
+
import itertools
|
|
11
|
+
import math
|
|
12
|
+
import os
|
|
13
|
+
import time
|
|
14
|
+
import warnings
|
|
15
|
+
from abc import ABC, abstractmethod
|
|
16
|
+
from contextlib import contextmanager
|
|
17
|
+
from typing import (Any, Callable, Dict, Generator, Iterable, List, Optional,
|
|
18
|
+
Tuple, cast)
|
|
19
|
+
|
|
20
|
+
import gguf
|
|
21
|
+
import huggingface_hub
|
|
22
|
+
import numpy as np
|
|
23
|
+
import torch
|
|
24
|
+
from huggingface_hub import HfApi
|
|
25
|
+
from torch import nn
|
|
26
|
+
from transformers import AutoModelForCausalLM
|
|
27
|
+
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME
|
|
28
|
+
|
|
29
|
+
from vllm.attention import Attention
|
|
30
|
+
from vllm.config import (LoadConfig, LoadFormat, ModelConfig, ParallelConfig,
|
|
31
|
+
VllmConfig, set_current_vllm_config)
|
|
32
|
+
from vllm.distributed import (get_tensor_model_parallel_rank,
|
|
33
|
+
get_tensor_model_parallel_world_size)
|
|
34
|
+
from vllm.envs import VLLM_USE_MODELSCOPE
|
|
35
|
+
from vllm.logger import init_logger
|
|
36
|
+
# yapf conflicts with isort for this block
|
|
37
|
+
# yapf: disable
|
|
38
|
+
from vllm.model_executor.layers.linear import (LinearBase,
|
|
39
|
+
MergedColumnParallelLinear,
|
|
40
|
+
QKVCrossParallelLinear,
|
|
41
|
+
QKVParallelLinear,
|
|
42
|
+
ReplicatedLinear,
|
|
43
|
+
RowParallelLinear)
|
|
44
|
+
# yapf: enable
|
|
45
|
+
from vllm.model_executor.layers.quantization.base_config import (
|
|
46
|
+
QuantizeMethodBase)
|
|
47
|
+
from vllm.model_executor.model_loader.tensorizer import (
|
|
48
|
+
TensorizerConfig, is_vllm_tensorized, load_with_tensorizer,
|
|
49
|
+
serialize_vllm_model, tensorizer_weights_iterator)
|
|
50
|
+
from vllm.model_executor.model_loader.utils import (ParamMapping,
|
|
51
|
+
configure_quant_config,
|
|
52
|
+
get_model_architecture,
|
|
53
|
+
set_default_torch_dtype)
|
|
54
|
+
from vllm.model_executor.model_loader.weight_utils import (
|
|
55
|
+
download_safetensors_index_file_from_hf, download_weights_from_hf,
|
|
56
|
+
fastsafetensors_weights_iterator, filter_duplicate_safetensors_files,
|
|
57
|
+
filter_files_not_needed_for_inference, get_gguf_extra_tensor_names,
|
|
58
|
+
get_lock, gguf_quant_weights_iterator, initialize_dummy_weights,
|
|
59
|
+
np_cache_weights_iterator, pt_weights_iterator,
|
|
60
|
+
runai_safetensors_weights_iterator, safetensors_weights_iterator)
|
|
61
|
+
from vllm.model_executor.utils import set_weight_attrs
|
|
62
|
+
from vllm.platforms import current_platform
|
|
63
|
+
from vllm.transformers_utils.s3_utils import glob as s3_glob
|
|
64
|
+
from vllm.transformers_utils.utils import is_s3
|
|
65
|
+
from vllm.utils import is_pin_memory_available
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@contextmanager
|
|
69
|
+
def device_loading_context(module: torch.nn.Module,
|
|
70
|
+
target_device: torch.device):
|
|
71
|
+
if target_device.type == "cpu":
|
|
72
|
+
# If target is CPU, no need to move anything
|
|
73
|
+
yield module
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
original_device_states: Dict[str, torch.device] = {}
|
|
77
|
+
|
|
78
|
+
# Store original device states and move parameters to GPU if they're on CPU
|
|
79
|
+
for name, p in module.named_parameters():
|
|
80
|
+
if p.device.type == "cpu":
|
|
81
|
+
original_device_states[name] = p.device
|
|
82
|
+
p.data = p.data.to(target_device)
|
|
83
|
+
# Parameters already on target device are not touched
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
yield module
|
|
87
|
+
|
|
88
|
+
finally:
|
|
89
|
+
# Restore parameters to their original devices, ignoring new parameters
|
|
90
|
+
pin_memory = is_pin_memory_available()
|
|
91
|
+
for name, p in module.named_parameters():
|
|
92
|
+
if name in original_device_states:
|
|
93
|
+
original_device: torch.device = original_device_states[name]
|
|
94
|
+
if original_device.type == "cpu":
|
|
95
|
+
# `torch.empty_like` does not support `pin_memory` argument
|
|
96
|
+
cpu_data = torch.empty_strided(
|
|
97
|
+
size=p.data.size(),
|
|
98
|
+
stride=p.data.stride(),
|
|
99
|
+
dtype=p.data.dtype,
|
|
100
|
+
layout=p.data.layout,
|
|
101
|
+
device="cpu",
|
|
102
|
+
pin_memory=pin_memory,
|
|
103
|
+
)
|
|
104
|
+
cpu_data.copy_(p.data)
|
|
105
|
+
p.data = cpu_data
|
|
106
|
+
else:
|
|
107
|
+
p.data = p.data.to(original_device)
|
|
108
|
+
# New parameters or parameters already on target device are untouched
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
logger = init_logger(__name__)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _initialize_model(
|
|
115
|
+
vllm_config: VllmConfig,
|
|
116
|
+
*,
|
|
117
|
+
prefix: str = "",
|
|
118
|
+
model_class: Optional[type[nn.Module]] = None,
|
|
119
|
+
) -> nn.Module:
|
|
120
|
+
"""Initialize a model with the given configurations."""
|
|
121
|
+
model_config = vllm_config.model_config
|
|
122
|
+
if model_class is None:
|
|
123
|
+
model_class, _ = get_model_architecture(model_config)
|
|
124
|
+
|
|
125
|
+
if vllm_config.quant_config is not None:
|
|
126
|
+
configure_quant_config(vllm_config.quant_config, model_class)
|
|
127
|
+
|
|
128
|
+
signatures = inspect.signature(model_class.__init__)
|
|
129
|
+
all_params = [param.name for param in signatures.parameters.values()]
|
|
130
|
+
if "vllm_config" in all_params and "prefix" in all_params:
|
|
131
|
+
# new-style model class
|
|
132
|
+
with set_current_vllm_config(vllm_config, check_compile=True):
|
|
133
|
+
return model_class(vllm_config=vllm_config, prefix=prefix)
|
|
134
|
+
|
|
135
|
+
msg = ("vLLM model class should accept `vllm_config` and `prefix` as "
|
|
136
|
+
"input arguments. Possibly you have an old-style model class"
|
|
137
|
+
" registered from out of tree and it is used for new vLLM version. "
|
|
138
|
+
"Check https://docs.vllm.ai/en/latest/design/arch_overview.html "
|
|
139
|
+
"for the design and update the model class accordingly.")
|
|
140
|
+
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
|
141
|
+
|
|
142
|
+
logger.warning(
|
|
143
|
+
"Trying to guess the arguments for old-style model class %s",
|
|
144
|
+
model_class,
|
|
145
|
+
)
|
|
146
|
+
# try to be compatible with old-style model class
|
|
147
|
+
kwargs = {}
|
|
148
|
+
if "prefix" in all_params:
|
|
149
|
+
kwargs["prefix"] = prefix
|
|
150
|
+
if "config" in all_params:
|
|
151
|
+
kwargs["config"] = model_config.hf_config
|
|
152
|
+
if "cache_config" in all_params:
|
|
153
|
+
kwargs["cache_config"] = vllm_config.cache_config
|
|
154
|
+
if "quant_config" in all_params:
|
|
155
|
+
kwargs["quant_config"] = vllm_config.quant_config
|
|
156
|
+
if "lora_config" in all_params:
|
|
157
|
+
kwargs["lora_config"] = vllm_config.lora_config
|
|
158
|
+
if "scheduler_config" in all_params:
|
|
159
|
+
kwargs["scheduler_config"] = vllm_config.scheduler_config
|
|
160
|
+
with set_current_vllm_config(vllm_config, check_compile=True):
|
|
161
|
+
return model_class(**kwargs)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _process_weights_after_loading(model: nn.Module, model_config: ModelConfig,
|
|
165
|
+
target_device: torch.device) -> None:
|
|
166
|
+
for _, module in model.named_modules():
|
|
167
|
+
if isinstance(module, QKVCrossParallelLinear):
|
|
168
|
+
# NOTE(Isotr0py): special case for cross QKV layer because
|
|
169
|
+
# q and kv proj aren't registered as submodules intentionally
|
|
170
|
+
module.process_weights_after_loading()
|
|
171
|
+
continue
|
|
172
|
+
quant_method = getattr(module, "quant_method", None)
|
|
173
|
+
if isinstance(quant_method, QuantizeMethodBase):
|
|
174
|
+
# When quant methods need to process weights after loading
|
|
175
|
+
# (for repacking, quantizing, etc), they expect parameters
|
|
176
|
+
# to be on the global target device. This scope is for the
|
|
177
|
+
# case where cpu offloading is used, where we will move the
|
|
178
|
+
# parameters onto device for processing and back off after.
|
|
179
|
+
with device_loading_context(module, target_device):
|
|
180
|
+
quant_method.process_weights_after_loading(module)
|
|
181
|
+
|
|
182
|
+
# Currently only used by MLA.
|
|
183
|
+
# NOTE: This intentionally happens after other modules so we can easily
|
|
184
|
+
# decompress the weights for MLA.
|
|
185
|
+
for _, module in model.named_modules():
|
|
186
|
+
if isinstance(module, Attention) and \
|
|
187
|
+
hasattr(module, "process_weights_after_loading"):
|
|
188
|
+
# TODO(lucas): see if there is a way to unify the signatures
|
|
189
|
+
# of process_weights_after_loading
|
|
190
|
+
module.process_weights_after_loading(model_config.dtype)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class BaseModelLoader(ABC):
|
|
194
|
+
"""Base class for model loaders."""
|
|
195
|
+
|
|
196
|
+
def __init__(self, load_config: LoadConfig):
|
|
197
|
+
self.load_config = load_config
|
|
198
|
+
|
|
199
|
+
@abstractmethod
|
|
200
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
201
|
+
"""Download a model so that it can be immediately loaded."""
|
|
202
|
+
raise NotImplementedError
|
|
203
|
+
|
|
204
|
+
@abstractmethod
|
|
205
|
+
def load_model(self, *, vllm_config: VllmConfig) -> nn.Module:
|
|
206
|
+
"""Load a model with the given configurations."""
|
|
207
|
+
raise NotImplementedError
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class DefaultModelLoader(BaseModelLoader):
|
|
211
|
+
"""Model loader that can load different file types from disk."""
|
|
212
|
+
|
|
213
|
+
@dataclasses.dataclass
|
|
214
|
+
class Source:
|
|
215
|
+
"""A source for weights."""
|
|
216
|
+
|
|
217
|
+
model_or_path: str
|
|
218
|
+
"""The model ID or path."""
|
|
219
|
+
|
|
220
|
+
revision: Optional[str]
|
|
221
|
+
"""The optional model revision."""
|
|
222
|
+
|
|
223
|
+
prefix: str = ""
|
|
224
|
+
"""A prefix to prepend to all weights."""
|
|
225
|
+
|
|
226
|
+
fall_back_to_pt: bool = True
|
|
227
|
+
"""Whether .pt weights can be used."""
|
|
228
|
+
|
|
229
|
+
allow_patterns_overrides: Optional[list[str]] = None
|
|
230
|
+
"""If defined, weights will load exclusively using these patterns."""
|
|
231
|
+
|
|
232
|
+
counter_before_loading_weights: float = 0.0
|
|
233
|
+
counter_after_loading_weights: float = 0.0
|
|
234
|
+
|
|
235
|
+
def __init__(self, load_config: LoadConfig):
|
|
236
|
+
super().__init__(load_config)
|
|
237
|
+
if load_config.model_loader_extra_config:
|
|
238
|
+
raise ValueError(f"Model loader extra config is not supported for "
|
|
239
|
+
f"load format {load_config.load_format}")
|
|
240
|
+
|
|
241
|
+
def _maybe_download_from_modelscope(
|
|
242
|
+
self, model: str, revision: Optional[str]) -> Optional[str]:
|
|
243
|
+
"""Download model from ModelScope hub if VLLM_USE_MODELSCOPE is True.
|
|
244
|
+
|
|
245
|
+
Returns the path to the downloaded model, or None if the model is not
|
|
246
|
+
downloaded from ModelScope."""
|
|
247
|
+
if VLLM_USE_MODELSCOPE:
|
|
248
|
+
# download model from ModelScope hub,
|
|
249
|
+
# lazy import so that modelscope is not required for normal use.
|
|
250
|
+
# pylint: disable=C.
|
|
251
|
+
from modelscope.hub.snapshot_download import snapshot_download
|
|
252
|
+
|
|
253
|
+
if not os.path.exists(model):
|
|
254
|
+
# Use file lock to prevent multiple processes from
|
|
255
|
+
# downloading the same model weights at the same time.
|
|
256
|
+
with get_lock(model, self.load_config.download_dir):
|
|
257
|
+
model_path = snapshot_download(
|
|
258
|
+
model_id=model,
|
|
259
|
+
cache_dir=self.load_config.download_dir,
|
|
260
|
+
local_files_only=huggingface_hub.constants.
|
|
261
|
+
HF_HUB_OFFLINE,
|
|
262
|
+
revision=revision,
|
|
263
|
+
ignore_file_pattern=self.load_config.ignore_patterns,
|
|
264
|
+
)
|
|
265
|
+
else:
|
|
266
|
+
model_path = model
|
|
267
|
+
return model_path
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
def _prepare_weights(
|
|
271
|
+
self,
|
|
272
|
+
model_name_or_path: str,
|
|
273
|
+
revision: Optional[str],
|
|
274
|
+
fall_back_to_pt: bool,
|
|
275
|
+
allow_patterns_overrides: Optional[list[str]],
|
|
276
|
+
) -> Tuple[str, List[str], bool]:
|
|
277
|
+
"""Prepare weights for the model.
|
|
278
|
+
|
|
279
|
+
If the model is not local, it will be downloaded."""
|
|
280
|
+
model_name_or_path = (self._maybe_download_from_modelscope(
|
|
281
|
+
model_name_or_path, revision) or model_name_or_path)
|
|
282
|
+
|
|
283
|
+
is_local = os.path.isdir(model_name_or_path)
|
|
284
|
+
load_format = self.load_config.load_format
|
|
285
|
+
use_safetensors = False
|
|
286
|
+
index_file = SAFE_WEIGHTS_INDEX_NAME
|
|
287
|
+
# Some quantized models use .pt files for storing the weights.
|
|
288
|
+
if load_format == LoadFormat.AUTO:
|
|
289
|
+
allow_patterns = ["*.safetensors", "*.bin"]
|
|
290
|
+
elif (load_format == LoadFormat.SAFETENSORS
|
|
291
|
+
or load_format == LoadFormat.FASTSAFETENSORS):
|
|
292
|
+
use_safetensors = True
|
|
293
|
+
allow_patterns = ["*.safetensors"]
|
|
294
|
+
elif load_format == LoadFormat.MISTRAL:
|
|
295
|
+
use_safetensors = True
|
|
296
|
+
allow_patterns = ["consolidated*.safetensors"]
|
|
297
|
+
index_file = "consolidated.safetensors.index.json"
|
|
298
|
+
elif load_format == LoadFormat.PT:
|
|
299
|
+
allow_patterns = ["*.pt"]
|
|
300
|
+
elif load_format == LoadFormat.NPCACHE:
|
|
301
|
+
allow_patterns = ["*.bin"]
|
|
302
|
+
else:
|
|
303
|
+
raise ValueError(f"Unknown load_format: {load_format}")
|
|
304
|
+
|
|
305
|
+
if fall_back_to_pt:
|
|
306
|
+
allow_patterns += ["*.pt"]
|
|
307
|
+
|
|
308
|
+
if allow_patterns_overrides is not None:
|
|
309
|
+
allow_patterns = allow_patterns_overrides
|
|
310
|
+
|
|
311
|
+
if not is_local:
|
|
312
|
+
hf_folder = download_weights_from_hf(
|
|
313
|
+
model_name_or_path,
|
|
314
|
+
self.load_config.download_dir,
|
|
315
|
+
allow_patterns,
|
|
316
|
+
revision,
|
|
317
|
+
ignore_patterns=self.load_config.ignore_patterns,
|
|
318
|
+
)
|
|
319
|
+
else:
|
|
320
|
+
hf_folder = model_name_or_path
|
|
321
|
+
|
|
322
|
+
hf_weights_files: List[str] = []
|
|
323
|
+
for pattern in allow_patterns:
|
|
324
|
+
hf_weights_files += glob.glob(os.path.join(hf_folder, pattern))
|
|
325
|
+
if len(hf_weights_files) > 0:
|
|
326
|
+
if pattern == "*.safetensors":
|
|
327
|
+
use_safetensors = True
|
|
328
|
+
break
|
|
329
|
+
|
|
330
|
+
if use_safetensors:
|
|
331
|
+
# For models like Mistral-7B-Instruct-v0.3
|
|
332
|
+
# there are both sharded safetensors files and a consolidated
|
|
333
|
+
# safetensors file. Using both breaks.
|
|
334
|
+
# Here, we download the `model.safetensors.index.json` and filter
|
|
335
|
+
# any files not found in the index.
|
|
336
|
+
if not is_local:
|
|
337
|
+
download_safetensors_index_file_from_hf(
|
|
338
|
+
model_name_or_path,
|
|
339
|
+
index_file,
|
|
340
|
+
self.load_config.download_dir,
|
|
341
|
+
revision,
|
|
342
|
+
)
|
|
343
|
+
hf_weights_files = filter_duplicate_safetensors_files(
|
|
344
|
+
hf_weights_files, hf_folder, index_file)
|
|
345
|
+
else:
|
|
346
|
+
hf_weights_files = filter_files_not_needed_for_inference(
|
|
347
|
+
hf_weights_files)
|
|
348
|
+
|
|
349
|
+
if len(hf_weights_files) == 0:
|
|
350
|
+
raise RuntimeError(
|
|
351
|
+
f"Cannot find any model weights with `{model_name_or_path}`")
|
|
352
|
+
|
|
353
|
+
return hf_folder, hf_weights_files, use_safetensors
|
|
354
|
+
|
|
355
|
+
def _get_weights_iterator(
|
|
356
|
+
self, source: "Source"
|
|
357
|
+
) -> Generator[Tuple[str, torch.Tensor], None, None]:
|
|
358
|
+
"""Get an iterator for the model weights based on the load format."""
|
|
359
|
+
hf_folder, hf_weights_files, use_safetensors = self._prepare_weights(
|
|
360
|
+
source.model_or_path, source.revision, source.fall_back_to_pt,
|
|
361
|
+
source.allow_patterns_overrides)
|
|
362
|
+
if self.load_config.load_format == LoadFormat.NPCACHE:
|
|
363
|
+
# Currently np_cache only support *.bin checkpoints
|
|
364
|
+
assert use_safetensors is False
|
|
365
|
+
weights_iterator = np_cache_weights_iterator(
|
|
366
|
+
source.model_or_path,
|
|
367
|
+
self.load_config.download_dir,
|
|
368
|
+
hf_folder,
|
|
369
|
+
hf_weights_files,
|
|
370
|
+
self.load_config.use_tqdm_on_load,
|
|
371
|
+
)
|
|
372
|
+
elif use_safetensors:
|
|
373
|
+
if self.load_config.load_format == LoadFormat.FASTSAFETENSORS:
|
|
374
|
+
weights_iterator = fastsafetensors_weights_iterator(
|
|
375
|
+
hf_weights_files,
|
|
376
|
+
self.load_config.use_tqdm_on_load,
|
|
377
|
+
)
|
|
378
|
+
else:
|
|
379
|
+
weights_iterator = safetensors_weights_iterator(
|
|
380
|
+
hf_weights_files,
|
|
381
|
+
self.load_config.use_tqdm_on_load,
|
|
382
|
+
)
|
|
383
|
+
else:
|
|
384
|
+
weights_iterator = pt_weights_iterator(
|
|
385
|
+
hf_weights_files,
|
|
386
|
+
self.load_config.use_tqdm_on_load,
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
if current_platform.is_tpu():
|
|
390
|
+
# In PyTorch XLA, we should call `xm.mark_step` frequently so that
|
|
391
|
+
# not too many ops are accumulated in the XLA program.
|
|
392
|
+
import torch_xla.core.xla_model as xm
|
|
393
|
+
|
|
394
|
+
def _xla_weights_iterator(iterator: Generator):
|
|
395
|
+
for weights in iterator:
|
|
396
|
+
yield weights
|
|
397
|
+
xm.mark_step()
|
|
398
|
+
|
|
399
|
+
weights_iterator = _xla_weights_iterator(weights_iterator)
|
|
400
|
+
|
|
401
|
+
elif current_platform.is_hpu():
|
|
402
|
+
import habana_frameworks.torch.core as htcore
|
|
403
|
+
|
|
404
|
+
def _hpu_weights_iterator(iterator: Generator):
|
|
405
|
+
for weights in iterator:
|
|
406
|
+
yield weights
|
|
407
|
+
htcore.mark_step()
|
|
408
|
+
|
|
409
|
+
weights_iterator = _hpu_weights_iterator(weights_iterator)
|
|
410
|
+
|
|
411
|
+
if self.counter_before_loading_weights == 0.0:
|
|
412
|
+
self.counter_before_loading_weights = time.perf_counter()
|
|
413
|
+
# Apply the prefix.
|
|
414
|
+
return ((source.prefix + name, tensor)
|
|
415
|
+
for (name, tensor) in weights_iterator)
|
|
416
|
+
|
|
417
|
+
def get_all_weights(
|
|
418
|
+
self,
|
|
419
|
+
model_config: ModelConfig,
|
|
420
|
+
model: nn.Module,
|
|
421
|
+
) -> Generator[Tuple[str, torch.Tensor], None, None]:
|
|
422
|
+
primary_weights = DefaultModelLoader.Source(
|
|
423
|
+
model_config.model,
|
|
424
|
+
model_config.revision,
|
|
425
|
+
prefix="",
|
|
426
|
+
fall_back_to_pt=getattr(model, "fall_back_to_pt_during_load",
|
|
427
|
+
True),
|
|
428
|
+
allow_patterns_overrides=getattr(model, "allow_patterns_overrides",
|
|
429
|
+
None),
|
|
430
|
+
)
|
|
431
|
+
yield from self._get_weights_iterator(primary_weights)
|
|
432
|
+
|
|
433
|
+
secondary_weights = cast(
|
|
434
|
+
Iterable[DefaultModelLoader.Source],
|
|
435
|
+
getattr(model, "secondary_weights", ()),
|
|
436
|
+
)
|
|
437
|
+
for source in secondary_weights:
|
|
438
|
+
yield from self._get_weights_iterator(source)
|
|
439
|
+
|
|
440
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
441
|
+
self._prepare_weights(model_config.model,
|
|
442
|
+
model_config.revision,
|
|
443
|
+
fall_back_to_pt=True,
|
|
444
|
+
allow_patterns_overrides=None)
|
|
445
|
+
|
|
446
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
447
|
+
device_config = vllm_config.device_config
|
|
448
|
+
model_config = vllm_config.model_config
|
|
449
|
+
target_device = torch.device(device_config.device)
|
|
450
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
451
|
+
with target_device:
|
|
452
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
453
|
+
|
|
454
|
+
weights_to_load = {name for name, _ in model.named_parameters()}
|
|
455
|
+
loaded_weights = model.load_weights(
|
|
456
|
+
self.get_all_weights(model_config, model))
|
|
457
|
+
self.counter_after_loading_weights = time.perf_counter()
|
|
458
|
+
logger.info(
|
|
459
|
+
"Loading weights took %.2f seconds",
|
|
460
|
+
self.counter_after_loading_weights -
|
|
461
|
+
self.counter_before_loading_weights)
|
|
462
|
+
# We only enable strict check for non-quantized models
|
|
463
|
+
# that have loaded weights tracking currently.
|
|
464
|
+
if model_config.quantization is None and loaded_weights is not None:
|
|
465
|
+
weights_not_loaded = weights_to_load - loaded_weights
|
|
466
|
+
if weights_not_loaded:
|
|
467
|
+
raise ValueError(
|
|
468
|
+
"Following weights were not initialized from "
|
|
469
|
+
f"checkpoint: {weights_not_loaded}")
|
|
470
|
+
|
|
471
|
+
_process_weights_after_loading(model, model_config, target_device)
|
|
472
|
+
|
|
473
|
+
return model.eval()
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
class DummyModelLoader(BaseModelLoader):
|
|
477
|
+
"""Model loader that will set model weights to random values."""
|
|
478
|
+
|
|
479
|
+
def __init__(self, load_config: LoadConfig):
|
|
480
|
+
super().__init__(load_config)
|
|
481
|
+
if load_config.model_loader_extra_config:
|
|
482
|
+
raise ValueError(f"Model loader extra config is not supported for "
|
|
483
|
+
f"load format {load_config.load_format}")
|
|
484
|
+
|
|
485
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
486
|
+
pass # Nothing to download
|
|
487
|
+
|
|
488
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
489
|
+
device_config = vllm_config.device_config
|
|
490
|
+
model_config = vllm_config.model_config
|
|
491
|
+
target_device = torch.device(device_config.device)
|
|
492
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
493
|
+
with target_device:
|
|
494
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
495
|
+
# NOTE(woosuk): For accurate performance evaluation, we assign
|
|
496
|
+
# random values to the weights.
|
|
497
|
+
initialize_dummy_weights(model)
|
|
498
|
+
|
|
499
|
+
_process_weights_after_loading(model, model_config, target_device)
|
|
500
|
+
return model.eval()
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
class TensorizerLoader(BaseModelLoader):
|
|
504
|
+
"""Model loader using CoreWeave's tensorizer library."""
|
|
505
|
+
|
|
506
|
+
def __init__(self, load_config: LoadConfig):
|
|
507
|
+
super().__init__(load_config)
|
|
508
|
+
if isinstance(load_config.model_loader_extra_config, TensorizerConfig):
|
|
509
|
+
self.tensorizer_config = load_config.model_loader_extra_config
|
|
510
|
+
else:
|
|
511
|
+
self.tensorizer_config = TensorizerConfig(
|
|
512
|
+
**load_config.model_loader_extra_config)
|
|
513
|
+
|
|
514
|
+
def _verify_config(self, model_config: ModelConfig,
|
|
515
|
+
parallel_config: ParallelConfig):
|
|
516
|
+
self.tensorizer_config.verify_with_model_config(model_config)
|
|
517
|
+
self.tensorizer_config.verify_with_parallel_config(parallel_config)
|
|
518
|
+
|
|
519
|
+
def _get_weights_iterator(
|
|
520
|
+
self, ) -> Generator[Tuple[str, torch.Tensor], None, None]:
|
|
521
|
+
tensorizer_args = self.tensorizer_config._construct_tensorizer_args()
|
|
522
|
+
return tensorizer_weights_iterator(tensorizer_args)
|
|
523
|
+
|
|
524
|
+
def _load_model_serialized_cpu(
|
|
525
|
+
self,
|
|
526
|
+
vllm_config: VllmConfig,
|
|
527
|
+
) -> nn.Module:
|
|
528
|
+
"""Load a serialized model with tensorizer to the CPU.
|
|
529
|
+
|
|
530
|
+
This is only necessary when the model isn't vLLM-tensorized (see
|
|
531
|
+
examples/other/tensorize_vllm_model.py) This should still
|
|
532
|
+
be faster than default HuggingFace loading, but will be slower than
|
|
533
|
+
loading a vLLM-tensorized model.
|
|
534
|
+
"""
|
|
535
|
+
device_config = vllm_config.device_config
|
|
536
|
+
model_config = vllm_config.model_config
|
|
537
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
538
|
+
with torch.device(device_config.device):
|
|
539
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
540
|
+
|
|
541
|
+
model.load_weights(self._get_weights_iterator())
|
|
542
|
+
return model.eval()
|
|
543
|
+
|
|
544
|
+
def _load_model_serialized(
|
|
545
|
+
self,
|
|
546
|
+
vllm_config: VllmConfig,
|
|
547
|
+
) -> nn.Module:
|
|
548
|
+
"""Load a serialized model with tensorizer.
|
|
549
|
+
|
|
550
|
+
Expects a vLLM-tensorized model. See the
|
|
551
|
+
examples/other/tensorize_vllm_model.py example script
|
|
552
|
+
for serializing vLLM models."""
|
|
553
|
+
|
|
554
|
+
device_config = vllm_config.device_config
|
|
555
|
+
model_config = vllm_config.model_config
|
|
556
|
+
|
|
557
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
558
|
+
with torch.device(device_config.device):
|
|
559
|
+
model_class = get_model_architecture(model_config)[0]
|
|
560
|
+
|
|
561
|
+
tensorizer_config = copy.copy(self.tensorizer_config)
|
|
562
|
+
tensorizer_config.model_class = model_class
|
|
563
|
+
tensorizer_config.hf_config = model_config.hf_config
|
|
564
|
+
tensorizer_config.dtype = model_config.dtype
|
|
565
|
+
|
|
566
|
+
model = load_with_tensorizer(tensorizer_config,
|
|
567
|
+
vllm_config=vllm_config)
|
|
568
|
+
return model.eval()
|
|
569
|
+
|
|
570
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
571
|
+
self.tensorizer_config.verify_with_model_config(model_config)
|
|
572
|
+
|
|
573
|
+
with self.tensorizer_config.open_stream():
|
|
574
|
+
pass
|
|
575
|
+
|
|
576
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
577
|
+
model_config = vllm_config.model_config
|
|
578
|
+
parallel_config = vllm_config.parallel_config
|
|
579
|
+
self._verify_config(model_config, parallel_config)
|
|
580
|
+
|
|
581
|
+
if parallel_config.tensor_parallel_size > 1:
|
|
582
|
+
from vllm.distributed import get_tensor_model_parallel_rank
|
|
583
|
+
|
|
584
|
+
self.tensorizer_config.tensorizer_uri = (
|
|
585
|
+
self.tensorizer_config.tensorizer_uri %
|
|
586
|
+
get_tensor_model_parallel_rank())
|
|
587
|
+
|
|
588
|
+
if is_vllm_tensorized(self.tensorizer_config):
|
|
589
|
+
return self._load_model_serialized(vllm_config=vllm_config)
|
|
590
|
+
return self._load_model_serialized_cpu(vllm_config=vllm_config)
|
|
591
|
+
|
|
592
|
+
@staticmethod
|
|
593
|
+
def save_model(
|
|
594
|
+
model: torch.nn.Module,
|
|
595
|
+
tensorizer_config: TensorizerConfig,
|
|
596
|
+
) -> None:
|
|
597
|
+
serialize_vllm_model(
|
|
598
|
+
model=model,
|
|
599
|
+
tensorizer_config=tensorizer_config,
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
|
|
603
|
+
class ShardedStateLoader(BaseModelLoader):
|
|
604
|
+
"""
|
|
605
|
+
Model loader that directly loads each worker's model state dict, which
|
|
606
|
+
enables a fast load path for large tensor-parallel models where each worker
|
|
607
|
+
only needs to read its own shard rather than the entire checkpoint. See
|
|
608
|
+
`examples/offline_inference/save_sharded_state.py` for creating a sharded
|
|
609
|
+
checkpoint.
|
|
610
|
+
"""
|
|
611
|
+
|
|
612
|
+
DEFAULT_PATTERN = "model-rank-{rank}-part-{part}.safetensors"
|
|
613
|
+
|
|
614
|
+
def __init__(self,
|
|
615
|
+
load_config: LoadConfig,
|
|
616
|
+
runai_model_streamer: bool = False):
|
|
617
|
+
super().__init__(load_config)
|
|
618
|
+
|
|
619
|
+
self.runai_model_streamer = runai_model_streamer
|
|
620
|
+
extra_config = ({} if load_config.model_loader_extra_config is None
|
|
621
|
+
else load_config.model_loader_extra_config.copy())
|
|
622
|
+
self.pattern = extra_config.pop("pattern", self.DEFAULT_PATTERN)
|
|
623
|
+
if extra_config:
|
|
624
|
+
raise ValueError(f"Unexpected extra config keys for load format "
|
|
625
|
+
f"{load_config.load_format}: "
|
|
626
|
+
f"{load_config.model_loader_extra_config.keys()}")
|
|
627
|
+
|
|
628
|
+
@staticmethod
|
|
629
|
+
def _filter_subtensors(
|
|
630
|
+
tensors: Dict[str, torch.Tensor], ) -> Dict[str, torch.Tensor]:
|
|
631
|
+
"""
|
|
632
|
+
Filter out all tensors that share the same memory or a subset of the
|
|
633
|
+
memory of another tensor.
|
|
634
|
+
"""
|
|
635
|
+
same_storage_groups: Dict[Any, List[Tuple[str, torch.Tensor]]] = (
|
|
636
|
+
collections.defaultdict(list))
|
|
637
|
+
for key, tensor in tensors.items():
|
|
638
|
+
if tensor.numel():
|
|
639
|
+
ptr = tensor.untyped_storage().data_ptr()
|
|
640
|
+
same_storage_groups[tensor.device, ptr].append((key, tensor))
|
|
641
|
+
|
|
642
|
+
def get_end_ptr(tensor: torch.Tensor) -> int:
|
|
643
|
+
return tensor.view(-1)[-1].data_ptr() + tensor.element_size()
|
|
644
|
+
|
|
645
|
+
result: Dict[str, torch.Tensor] = {}
|
|
646
|
+
for group in same_storage_groups.values():
|
|
647
|
+
for k, t in group:
|
|
648
|
+
a, b = t.data_ptr(), get_end_ptr(t)
|
|
649
|
+
for k2, t2 in group:
|
|
650
|
+
if not t2.is_contiguous():
|
|
651
|
+
continue
|
|
652
|
+
a2, b2 = t2.data_ptr(), get_end_ptr(t2)
|
|
653
|
+
if a < a2 or b2 < b:
|
|
654
|
+
continue
|
|
655
|
+
if a2 < a or b < b2 or not t.is_contiguous():
|
|
656
|
+
break # t2 covers strictly more memory than t.
|
|
657
|
+
if k2 < k:
|
|
658
|
+
# Same tensors, keep the one with the smaller key.
|
|
659
|
+
break
|
|
660
|
+
else:
|
|
661
|
+
result[k] = t
|
|
662
|
+
return result
|
|
663
|
+
|
|
664
|
+
def _prepare_weights(self, model_name_or_path: str,
|
|
665
|
+
revision: Optional[str]):
|
|
666
|
+
if is_s3(model_name_or_path) or os.path.isdir(model_name_or_path):
|
|
667
|
+
return model_name_or_path
|
|
668
|
+
else:
|
|
669
|
+
allow_patterns = ["*.safetensors"]
|
|
670
|
+
return download_weights_from_hf(
|
|
671
|
+
model_name_or_path,
|
|
672
|
+
self.load_config.download_dir,
|
|
673
|
+
allow_patterns,
|
|
674
|
+
revision,
|
|
675
|
+
ignore_patterns=self.load_config.ignore_patterns,
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
679
|
+
self._prepare_weights(model_config.model, model_config.revision)
|
|
680
|
+
|
|
681
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
682
|
+
device_config = vllm_config.device_config
|
|
683
|
+
model_config = vllm_config.model_config
|
|
684
|
+
target_device = torch.device(device_config.device)
|
|
685
|
+
|
|
686
|
+
from vllm.distributed import get_tensor_model_parallel_rank
|
|
687
|
+
|
|
688
|
+
model_weights = model_config.model
|
|
689
|
+
if hasattr(model_config, "model_weights"):
|
|
690
|
+
model_weights = model_config.model_weights
|
|
691
|
+
local_model_path = model_weights
|
|
692
|
+
|
|
693
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
694
|
+
with target_device:
|
|
695
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
696
|
+
_process_weights_after_loading(model, model_config,
|
|
697
|
+
target_device)
|
|
698
|
+
rank = get_tensor_model_parallel_rank()
|
|
699
|
+
pattern = os.path.join(
|
|
700
|
+
local_model_path,
|
|
701
|
+
self.pattern.format(rank=rank, part="*"),
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
filepaths = []
|
|
705
|
+
if is_s3(local_model_path):
|
|
706
|
+
file_pattern = f"*{self.pattern.format(rank=rank, part=' * ')}"
|
|
707
|
+
filepaths = s3_glob(path=local_model_path,
|
|
708
|
+
allow_pattern=[file_pattern])
|
|
709
|
+
else:
|
|
710
|
+
filepaths = glob.glob(pattern)
|
|
711
|
+
if not filepaths:
|
|
712
|
+
# TODO: support un-sharded checkpoints too
|
|
713
|
+
raise ValueError(
|
|
714
|
+
f"Could not find checkpoint files '{pattern}', only "
|
|
715
|
+
f"pre-sharded checkpoints are currently supported!")
|
|
716
|
+
state_dict = self._filter_subtensors(model.state_dict())
|
|
717
|
+
for key, tensor in self.iterate_over_files(filepaths):
|
|
718
|
+
# If loading with LoRA enabled, additional padding may
|
|
719
|
+
# be added to certain parameters. We only load into a
|
|
720
|
+
# narrowed view of the parameter data.
|
|
721
|
+
param_data = state_dict[key].data
|
|
722
|
+
param_shape = state_dict[key].shape
|
|
723
|
+
for dim, size in enumerate(tensor.shape):
|
|
724
|
+
if size < param_shape[dim]:
|
|
725
|
+
param_data = param_data.narrow(dim, 0, size)
|
|
726
|
+
if tensor.shape != param_shape:
|
|
727
|
+
logger.warning(
|
|
728
|
+
"loading tensor of shape %s into "
|
|
729
|
+
"parameter '%s' of shape %s",
|
|
730
|
+
tensor.shape,
|
|
731
|
+
key,
|
|
732
|
+
param_shape,
|
|
733
|
+
)
|
|
734
|
+
param_data.copy_(tensor)
|
|
735
|
+
state_dict.pop(key)
|
|
736
|
+
if state_dict:
|
|
737
|
+
raise ValueError(
|
|
738
|
+
f"Missing keys {tuple(state_dict)} in loaded state!")
|
|
739
|
+
return model.eval()
|
|
740
|
+
|
|
741
|
+
def iterate_over_files(
|
|
742
|
+
self, paths) -> Generator[Tuple[str, torch.Tensor], None, None]:
|
|
743
|
+
if self.runai_model_streamer:
|
|
744
|
+
yield from runai_safetensors_weights_iterator(paths, True)
|
|
745
|
+
else:
|
|
746
|
+
from safetensors.torch import safe_open
|
|
747
|
+
for path in paths:
|
|
748
|
+
with safe_open(path, framework="pt") as f:
|
|
749
|
+
for key in f.keys(): # noqa: SIM118
|
|
750
|
+
tensor = f.get_tensor(key)
|
|
751
|
+
yield key, tensor
|
|
752
|
+
|
|
753
|
+
@staticmethod
|
|
754
|
+
def save_model(
|
|
755
|
+
model: torch.nn.Module,
|
|
756
|
+
path: str,
|
|
757
|
+
pattern: Optional[str] = None,
|
|
758
|
+
max_size: Optional[int] = None,
|
|
759
|
+
) -> None:
|
|
760
|
+
from safetensors.torch import save_file
|
|
761
|
+
|
|
762
|
+
from vllm.distributed import get_tensor_model_parallel_rank
|
|
763
|
+
|
|
764
|
+
if pattern is None:
|
|
765
|
+
pattern = ShardedStateLoader.DEFAULT_PATTERN
|
|
766
|
+
rank = get_tensor_model_parallel_rank()
|
|
767
|
+
part_idx = 0
|
|
768
|
+
total_size = 0
|
|
769
|
+
state_dict = ShardedStateLoader._filter_subtensors(model.state_dict())
|
|
770
|
+
state_dict_part: Dict[str, torch.Tensor] = {}
|
|
771
|
+
for key, tensor in state_dict.items():
|
|
772
|
+
param_size = tensor.nelement() * tensor.element_size()
|
|
773
|
+
if max_size is not None and total_size + param_size > max_size:
|
|
774
|
+
filename = pattern.format(rank=rank, part=part_idx)
|
|
775
|
+
save_file(
|
|
776
|
+
state_dict_part,
|
|
777
|
+
os.path.join(path, filename),
|
|
778
|
+
)
|
|
779
|
+
part_idx += 1
|
|
780
|
+
total_size = 0
|
|
781
|
+
state_dict_part = {}
|
|
782
|
+
state_dict_part[key] = tensor
|
|
783
|
+
total_size += param_size
|
|
784
|
+
if len(state_dict_part) > 0:
|
|
785
|
+
filename = pattern.format(rank=rank, part=part_idx)
|
|
786
|
+
save_file(
|
|
787
|
+
state_dict_part,
|
|
788
|
+
os.path.join(path, filename),
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
|
|
792
|
+
class BitsAndBytesModelLoader(BaseModelLoader):
|
|
793
|
+
"""Model loader to load model weights with BitAndBytes quantization."""
|
|
794
|
+
|
|
795
|
+
possible_config_file_names = ["adapter_config.json"]
|
|
796
|
+
|
|
797
|
+
def __init__(self, load_config: LoadConfig):
|
|
798
|
+
super().__init__(load_config)
|
|
799
|
+
|
|
800
|
+
# Save the module names without sharding.
|
|
801
|
+
self.unsharded_weights_modules: List[str] = []
|
|
802
|
+
# Save the module names that are sharded by column.
|
|
803
|
+
self.column_sharded_weights_modules: List[str] = []
|
|
804
|
+
# Store all module names (from transformers) that support
|
|
805
|
+
# BNB quantization.
|
|
806
|
+
self.target_modules: List[str] = []
|
|
807
|
+
# mapping weight names from transformers to vllm.
|
|
808
|
+
self.weight_mapper: Callable = lambda name: name
|
|
809
|
+
|
|
810
|
+
def _get_weight_files(
|
|
811
|
+
self,
|
|
812
|
+
model_name_or_path: str,
|
|
813
|
+
allowed_patterns: List[str],
|
|
814
|
+
revision: Optional[str] = None,
|
|
815
|
+
) -> Tuple[str, List[str], str]:
|
|
816
|
+
"""Retrieve weight files. Download the files if necessary.
|
|
817
|
+
|
|
818
|
+
Return the weight files and the file pattern."""
|
|
819
|
+
is_local = os.path.isdir(model_name_or_path)
|
|
820
|
+
|
|
821
|
+
if is_local:
|
|
822
|
+
for pattern in allowed_patterns:
|
|
823
|
+
weight_files = glob.glob(
|
|
824
|
+
os.path.join(model_name_or_path, pattern))
|
|
825
|
+
if weight_files:
|
|
826
|
+
return model_name_or_path, weight_files, pattern
|
|
827
|
+
else:
|
|
828
|
+
hf_api = HfApi()
|
|
829
|
+
repo_files = hf_api.list_repo_files(repo_id=model_name_or_path)
|
|
830
|
+
for pattern in allowed_patterns:
|
|
831
|
+
matching_files = fnmatch.filter(repo_files, pattern)
|
|
832
|
+
if matching_files:
|
|
833
|
+
hf_folder = download_weights_from_hf(
|
|
834
|
+
model_name_or_path,
|
|
835
|
+
self.load_config.download_dir,
|
|
836
|
+
[pattern],
|
|
837
|
+
revision,
|
|
838
|
+
ignore_patterns=self.load_config.ignore_patterns,
|
|
839
|
+
)
|
|
840
|
+
return hf_folder, glob.glob(
|
|
841
|
+
os.path.join(hf_folder, pattern)), pattern
|
|
842
|
+
|
|
843
|
+
raise RuntimeError(
|
|
844
|
+
f"No model weights found in: `{model_name_or_path}`")
|
|
845
|
+
|
|
846
|
+
def _prepare_weights(self, model_name_or_path: str,
|
|
847
|
+
revision: Optional[str]) -> Tuple[List[str], bool]:
|
|
848
|
+
"""Prepare weight files for the model."""
|
|
849
|
+
|
|
850
|
+
allowed_patterns = ["*.safetensors", "*.bin", "*.pt"]
|
|
851
|
+
|
|
852
|
+
hf_folder, hf_weights_files, matched_pattern = self._get_weight_files(
|
|
853
|
+
model_name_or_path, allowed_patterns, revision)
|
|
854
|
+
|
|
855
|
+
use_safetensors = matched_pattern == "*.safetensors"
|
|
856
|
+
is_local = os.path.isdir(model_name_or_path)
|
|
857
|
+
index_file = SAFE_WEIGHTS_INDEX_NAME
|
|
858
|
+
if use_safetensors:
|
|
859
|
+
# For models like Mistral-7B-Instruct-v0.3
|
|
860
|
+
# there are both sharded safetensors files and a consolidated
|
|
861
|
+
# safetensors file. Using both breaks.
|
|
862
|
+
# Here, we download the `model.safetensors.index.json` and filter
|
|
863
|
+
# any files not found in the index.
|
|
864
|
+
if not is_local:
|
|
865
|
+
download_safetensors_index_file_from_hf(
|
|
866
|
+
model_name_or_path,
|
|
867
|
+
index_file,
|
|
868
|
+
self.load_config.download_dir,
|
|
869
|
+
revision,
|
|
870
|
+
)
|
|
871
|
+
hf_weights_files = filter_duplicate_safetensors_files(
|
|
872
|
+
hf_weights_files, hf_folder, index_file)
|
|
873
|
+
else:
|
|
874
|
+
hf_weights_files = filter_files_not_needed_for_inference(
|
|
875
|
+
hf_weights_files)
|
|
876
|
+
|
|
877
|
+
if len(hf_weights_files) == 0:
|
|
878
|
+
raise RuntimeError(
|
|
879
|
+
f"Cannot find any model weights with `{model_name_or_path}`")
|
|
880
|
+
|
|
881
|
+
return hf_weights_files, use_safetensors
|
|
882
|
+
|
|
883
|
+
def _hf_weight_iter(self, hf_weights_files, use_safetensors: bool):
|
|
884
|
+
if use_safetensors:
|
|
885
|
+
iterator = safetensors_weights_iterator(
|
|
886
|
+
hf_weights_files,
|
|
887
|
+
self.load_config.use_tqdm_on_load,
|
|
888
|
+
)
|
|
889
|
+
else:
|
|
890
|
+
iterator = pt_weights_iterator(
|
|
891
|
+
hf_weights_files,
|
|
892
|
+
self.load_config.use_tqdm_on_load,
|
|
893
|
+
)
|
|
894
|
+
for org_name, param in iterator:
|
|
895
|
+
# mapping weight names from transformers to vllm while preserving
|
|
896
|
+
# original names.
|
|
897
|
+
mapped_name = self.weight_mapper(org_name)
|
|
898
|
+
yield org_name, mapped_name, param
|
|
899
|
+
|
|
900
|
+
def _get_quantized_weights_iterator(
|
|
901
|
+
self,
|
|
902
|
+
model_name_or_path: str,
|
|
903
|
+
revision: Optional[str],
|
|
904
|
+
pre_quant: bool,
|
|
905
|
+
load_8bit: bool,
|
|
906
|
+
) -> Tuple[Generator[Tuple[str, torch.Tensor], None, None], Dict[str,
|
|
907
|
+
Any]]:
|
|
908
|
+
"""Get an iterator to the model weights with bitsandbytes quantization,
|
|
909
|
+
as well as the quantization state dictionary."""
|
|
910
|
+
|
|
911
|
+
# only load the bitsandbytes module when needed
|
|
912
|
+
try:
|
|
913
|
+
import bitsandbytes
|
|
914
|
+
|
|
915
|
+
if bitsandbytes.__version__ < "0.45.3":
|
|
916
|
+
raise ImportError("bitsandbytes version is wrong. Please "
|
|
917
|
+
"install bitsandbytes>=0.45.3.")
|
|
918
|
+
except ImportError as err:
|
|
919
|
+
raise ImportError("Please install bitsandbytes>=0.45.3 via "
|
|
920
|
+
"`pip install bitsandbytes>=0.45.3` to use "
|
|
921
|
+
"bitsandbytes quantizer.") from err
|
|
922
|
+
|
|
923
|
+
hf_weights_files, use_safetensors = self._prepare_weights(
|
|
924
|
+
model_name_or_path, revision)
|
|
925
|
+
|
|
926
|
+
quant_state_dict: Dict[str, Any] = {}
|
|
927
|
+
|
|
928
|
+
if pre_quant:
|
|
929
|
+
if load_8bit:
|
|
930
|
+
return self._quantized_8bit_generator(
|
|
931
|
+
hf_weights_files, use_safetensors,
|
|
932
|
+
quant_state_dict), quant_state_dict
|
|
933
|
+
else:
|
|
934
|
+
return self._quantized_4bit_generator(
|
|
935
|
+
hf_weights_files, use_safetensors,
|
|
936
|
+
quant_state_dict), quant_state_dict
|
|
937
|
+
|
|
938
|
+
return self._unquantized_generator(hf_weights_files, use_safetensors,
|
|
939
|
+
quant_state_dict), quant_state_dict
|
|
940
|
+
|
|
941
|
+
def _is_8bit_weight_name(self, weight_name: str):
|
|
942
|
+
quantized_suffix = {".scb", ".weight_format"}
|
|
943
|
+
return any(weight_name.lower().endswith(suffix)
|
|
944
|
+
for suffix in quantized_suffix)
|
|
945
|
+
|
|
946
|
+
def _is_4bit_weight_name(self, weight_name: str):
|
|
947
|
+
quantized_suffix = {
|
|
948
|
+
"absmax",
|
|
949
|
+
"quant_map",
|
|
950
|
+
"nested_absmax",
|
|
951
|
+
"nested_quant_map",
|
|
952
|
+
"bitsandbytes",
|
|
953
|
+
}
|
|
954
|
+
suffix = weight_name.split(".")[-1]
|
|
955
|
+
return any(q_suffix in suffix for q_suffix in quantized_suffix)
|
|
956
|
+
|
|
957
|
+
def _quantized_8bit_generator(self, hf_weights_files, use_safetensors,
|
|
958
|
+
quant_state_dict) -> Generator:
|
|
959
|
+
for (
|
|
960
|
+
org_weight_name,
|
|
961
|
+
mapped_weight_name,
|
|
962
|
+
weight_tensor,
|
|
963
|
+
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
|
|
964
|
+
if not mapped_weight_name.lower().endswith(".scb"):
|
|
965
|
+
continue
|
|
966
|
+
|
|
967
|
+
weight_key = mapped_weight_name.lower().replace(".scb", ".weight")
|
|
968
|
+
quant_state_dict[weight_key] = weight_tensor
|
|
969
|
+
|
|
970
|
+
for (
|
|
971
|
+
org_weight_name,
|
|
972
|
+
mapped_weight_name,
|
|
973
|
+
weight_tensor,
|
|
974
|
+
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
|
|
975
|
+
if self._is_8bit_weight_name(mapped_weight_name):
|
|
976
|
+
continue
|
|
977
|
+
|
|
978
|
+
if mapped_weight_name in quant_state_dict:
|
|
979
|
+
set_weight_attrs(weight_tensor, {"load_in_8bit": True})
|
|
980
|
+
yield org_weight_name, weight_tensor
|
|
981
|
+
else:
|
|
982
|
+
yield org_weight_name, weight_tensor
|
|
983
|
+
|
|
984
|
+
def _quantized_4bit_generator(self, hf_weights_files, use_safetensors,
|
|
985
|
+
quant_state_dict) -> Generator:
|
|
986
|
+
from bitsandbytes.functional import QuantState
|
|
987
|
+
|
|
988
|
+
# First iterate over all quant state weights
|
|
989
|
+
weight_iterator = self._hf_weight_iter(hf_weights_files,
|
|
990
|
+
use_safetensors)
|
|
991
|
+
temp_state_dict = {}
|
|
992
|
+
for (
|
|
993
|
+
org_weight_name,
|
|
994
|
+
mapped_weight_name,
|
|
995
|
+
weight_tensor,
|
|
996
|
+
) in weight_iterator:
|
|
997
|
+
if not self._is_4bit_weight_name(mapped_weight_name):
|
|
998
|
+
continue
|
|
999
|
+
# bitsandbytes library requires
|
|
1000
|
+
# weight.quant_state.bitsandbytes__* in CPU
|
|
1001
|
+
if "quant_state.bitsandbytes" in mapped_weight_name:
|
|
1002
|
+
temp_state_dict[mapped_weight_name] = weight_tensor.cpu().data
|
|
1003
|
+
else:
|
|
1004
|
+
temp_state_dict[mapped_weight_name] = weight_tensor
|
|
1005
|
+
|
|
1006
|
+
# Closure to parse quant_state for each prequant weight
|
|
1007
|
+
def _parse_quant_state(param_name: str,
|
|
1008
|
+
temp_state_dict: Dict) -> QuantState:
|
|
1009
|
+
quant_state = {}
|
|
1010
|
+
for k in temp_state_dict:
|
|
1011
|
+
if param_name + "." in k:
|
|
1012
|
+
quant_state[k] = temp_state_dict[k]
|
|
1013
|
+
|
|
1014
|
+
return QuantState.from_dict(quant_state,
|
|
1015
|
+
device=current_platform.device_type)
|
|
1016
|
+
|
|
1017
|
+
# Second iterate over all prequant and normal weights
|
|
1018
|
+
# pre quantized weights would have a quant_state
|
|
1019
|
+
for (
|
|
1020
|
+
org_weight_name,
|
|
1021
|
+
mapped_weight_name,
|
|
1022
|
+
weight_tensor,
|
|
1023
|
+
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
|
|
1024
|
+
if self._is_4bit_weight_name(mapped_weight_name):
|
|
1025
|
+
continue
|
|
1026
|
+
|
|
1027
|
+
if (f"{mapped_weight_name}.quant_state.bitsandbytes__nf4"
|
|
1028
|
+
in temp_state_dict) or (
|
|
1029
|
+
f"{mapped_weight_name}.quant_state.bitsandbytes__fp4"
|
|
1030
|
+
in temp_state_dict):
|
|
1031
|
+
quant_state = _parse_quant_state(mapped_weight_name,
|
|
1032
|
+
temp_state_dict)
|
|
1033
|
+
quant_state_dict[mapped_weight_name] = quant_state
|
|
1034
|
+
yield org_weight_name, weight_tensor
|
|
1035
|
+
else:
|
|
1036
|
+
yield org_weight_name, weight_tensor
|
|
1037
|
+
|
|
1038
|
+
def _unquantized_generator(self, hf_weights_files, use_safetensors,
|
|
1039
|
+
quant_state_dict) -> Generator:
|
|
1040
|
+
from bitsandbytes.functional import quantize_4bit
|
|
1041
|
+
|
|
1042
|
+
tp_size = get_tensor_model_parallel_world_size()
|
|
1043
|
+
tp_rank = get_tensor_model_parallel_rank()
|
|
1044
|
+
|
|
1045
|
+
for (
|
|
1046
|
+
org_weight_name,
|
|
1047
|
+
mapped_weight_name,
|
|
1048
|
+
weight_tensor,
|
|
1049
|
+
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
|
|
1050
|
+
if any(target_module in mapped_weight_name
|
|
1051
|
+
for target_module in self.target_modules
|
|
1052
|
+
) and mapped_weight_name.endswith(".weight"):
|
|
1053
|
+
# Without sharding
|
|
1054
|
+
if any(
|
|
1055
|
+
mapped_weight_name.startswith(module)
|
|
1056
|
+
for module in self.unsharded_weights_modules):
|
|
1057
|
+
weight_sub_tensor = weight_tensor
|
|
1058
|
+
# Shard by column
|
|
1059
|
+
elif any(
|
|
1060
|
+
mapped_weight_name.startswith(module)
|
|
1061
|
+
for module in self.column_sharded_weights_modules):
|
|
1062
|
+
total_size = weight_tensor.size(-1)
|
|
1063
|
+
start_index = total_size // tp_size * tp_rank
|
|
1064
|
+
end_index = total_size // tp_size * (tp_rank + 1)
|
|
1065
|
+
weight_sub_tensor = weight_tensor[...,
|
|
1066
|
+
start_index:end_index]
|
|
1067
|
+
# Weights have fused on disk. In this case, we assume that the
|
|
1068
|
+
# weight and module use same name.
|
|
1069
|
+
elif any(
|
|
1070
|
+
mapped_weight_name.startswith(module)
|
|
1071
|
+
for module in self.maybe_fused_weights_modules):
|
|
1072
|
+
# special case for fused weights
|
|
1073
|
+
# get the size of each shard weight tensor
|
|
1074
|
+
total_shard_sizes = next(
|
|
1075
|
+
(sizes for module, sizes in
|
|
1076
|
+
self.maybe_fused_weights_modules.items()
|
|
1077
|
+
if mapped_weight_name.startswith(module)))
|
|
1078
|
+
total_size = weight_tensor.size(0)
|
|
1079
|
+
assert total_size == sum(total_shard_sizes)
|
|
1080
|
+
# get the start/end index of each shard weight tensor
|
|
1081
|
+
total_start_index = list(
|
|
1082
|
+
itertools.accumulate([0] + total_shard_sizes))[:-1]
|
|
1083
|
+
shard_weights_index = [(
|
|
1084
|
+
idx + size // tp_size * tp_rank,
|
|
1085
|
+
idx + size // tp_size * (tp_rank + 1),
|
|
1086
|
+
) for idx, size in zip(total_start_index,
|
|
1087
|
+
total_shard_sizes)]
|
|
1088
|
+
# slice and reorder the weight tensor
|
|
1089
|
+
weight_tensor = [
|
|
1090
|
+
weight_tensor[start_index:end_index, ...]
|
|
1091
|
+
for start_index, end_index in shard_weights_index
|
|
1092
|
+
]
|
|
1093
|
+
weight_sub_tensor = torch.cat(weight_tensor, dim=0)
|
|
1094
|
+
# Shard by row
|
|
1095
|
+
else:
|
|
1096
|
+
total_size = weight_tensor.size(0)
|
|
1097
|
+
start_index = total_size // tp_size * tp_rank
|
|
1098
|
+
end_index = total_size // tp_size * (tp_rank + 1)
|
|
1099
|
+
weight_sub_tensor = weight_tensor[start_index:end_index,
|
|
1100
|
+
...]
|
|
1101
|
+
|
|
1102
|
+
# bitsandbytes requires data in GPU
|
|
1103
|
+
if weight_sub_tensor.is_cuda:
|
|
1104
|
+
loaded_weight = weight_sub_tensor
|
|
1105
|
+
else:
|
|
1106
|
+
loaded_weight = weight_sub_tensor.cuda()
|
|
1107
|
+
|
|
1108
|
+
# remove the following after the issue is fixed:
|
|
1109
|
+
# https://github.com/bitsandbytes-foundation/bitsandbytes/issues/1342
|
|
1110
|
+
if loaded_weight.is_contiguous() is False:
|
|
1111
|
+
loaded_weight = loaded_weight.contiguous()
|
|
1112
|
+
|
|
1113
|
+
with set_default_torch_dtype(torch.float32):
|
|
1114
|
+
processed_weight, quant_state = quantize_4bit(
|
|
1115
|
+
loaded_weight,
|
|
1116
|
+
compress_statistics=True,
|
|
1117
|
+
quant_type="nf4",
|
|
1118
|
+
)
|
|
1119
|
+
|
|
1120
|
+
quant_state_dict[mapped_weight_name] = quant_state
|
|
1121
|
+
else:
|
|
1122
|
+
processed_weight = weight_tensor
|
|
1123
|
+
yield org_weight_name, processed_weight
|
|
1124
|
+
|
|
1125
|
+
def _get_bnb_target_modules(self, model: nn.Module) -> None:
|
|
1126
|
+
|
|
1127
|
+
for name, module in model.named_modules():
|
|
1128
|
+
if isinstance(module, (LinearBase, )):
|
|
1129
|
+
if modules_info := self.modules_mapping.get_sub_modules(name):
|
|
1130
|
+
# Map vllm's names to transformers's names.
|
|
1131
|
+
rep_name, sub_modules = modules_info
|
|
1132
|
+
for sub_name in sub_modules:
|
|
1133
|
+
self.target_modules.append(
|
|
1134
|
+
name.replace(rep_name, sub_name))
|
|
1135
|
+
# Add original module name even if the module has stacked map,
|
|
1136
|
+
# in case model has a mixture of disk-merged and disk-splitted
|
|
1137
|
+
# weights with same last name.
|
|
1138
|
+
self.target_modules.append(name)
|
|
1139
|
+
|
|
1140
|
+
assert (self.target_modules
|
|
1141
|
+
), "vllm currently does not support BNB quantization for"
|
|
1142
|
+
f" {type(model).__name__}"
|
|
1143
|
+
|
|
1144
|
+
def _load_weights(self, model_config: ModelConfig,
|
|
1145
|
+
model: nn.Module) -> None:
|
|
1146
|
+
if not hasattr(model, "load_weights"):
|
|
1147
|
+
raise AttributeError(
|
|
1148
|
+
"The required method 'load_weights' is not defined in class"
|
|
1149
|
+
f" {type(model).__name__}.")
|
|
1150
|
+
|
|
1151
|
+
if not hasattr(model, "packed_modules_mapping"):
|
|
1152
|
+
raise AttributeError(
|
|
1153
|
+
f"Model {type(model).__name__} does not support BitsAndBytes "
|
|
1154
|
+
"quantization yet. No 'packed_modules_mapping' found.")
|
|
1155
|
+
|
|
1156
|
+
self.modules_mapping = ParamMapping(
|
|
1157
|
+
copy.deepcopy(model.packed_modules_mapping))
|
|
1158
|
+
|
|
1159
|
+
# For some models like Molmo, we need to use hf_to_vllm_mapper
|
|
1160
|
+
# to ensure correct loading of weights.
|
|
1161
|
+
if hf_to_vllm_mapper := getattr(model, "hf_to_vllm_mapper", None):
|
|
1162
|
+
self.weight_mapper = lambda name: hf_to_vllm_mapper._map_name(name)
|
|
1163
|
+
|
|
1164
|
+
# Modules whose weights might have fused on disk
|
|
1165
|
+
# we need their output_sizes to make shard in flight correctly with TP
|
|
1166
|
+
self.maybe_fused_weights_modules: Dict[str, List[int]] = {}
|
|
1167
|
+
self._get_bnb_target_modules(model)
|
|
1168
|
+
for name, module in model.named_modules():
|
|
1169
|
+
# Some modules like `ReplicatedLinear` should not have their weights
|
|
1170
|
+
# sharded. The reason for implementing it this way is to avoid new
|
|
1171
|
+
# static variable in the model implementation.
|
|
1172
|
+
if isinstance(module, (ReplicatedLinear, )):
|
|
1173
|
+
self.unsharded_weights_modules.append(name)
|
|
1174
|
+
# `QKVParallelLinear` and `MergedColumnParallelLinear` might have
|
|
1175
|
+
# fused weights on disk. We need to use the output sizes of these
|
|
1176
|
+
# modules to shard the weights correctly.
|
|
1177
|
+
elif isinstance(module,
|
|
1178
|
+
(QKVParallelLinear, MergedColumnParallelLinear)):
|
|
1179
|
+
self.maybe_fused_weights_modules[name] = module.output_sizes
|
|
1180
|
+
# In TP, these weights are partitioned along the column
|
|
1181
|
+
# dimension (dim=-1)
|
|
1182
|
+
elif isinstance(module, (RowParallelLinear, )):
|
|
1183
|
+
self.column_sharded_weights_modules.append(name)
|
|
1184
|
+
|
|
1185
|
+
self.model_type = type(model).__name__
|
|
1186
|
+
|
|
1187
|
+
logger.info("Loading weights with BitsAndBytes quantization. "
|
|
1188
|
+
"May take a while ...")
|
|
1189
|
+
|
|
1190
|
+
quant_config = getattr(model_config.hf_config, "quantization_config",
|
|
1191
|
+
None)
|
|
1192
|
+
|
|
1193
|
+
pre_quant = False
|
|
1194
|
+
if quant_config is not None:
|
|
1195
|
+
quant_method = quant_config.get("quant_method")
|
|
1196
|
+
if quant_method == "bitsandbytes":
|
|
1197
|
+
pre_quant = True
|
|
1198
|
+
else:
|
|
1199
|
+
raise ValueError(
|
|
1200
|
+
f"BitsAndBytes loader does not support {quant_method} "
|
|
1201
|
+
"quantization")
|
|
1202
|
+
|
|
1203
|
+
# The quant_states in pre_quantized models cannot work with a split
|
|
1204
|
+
# weight tensor. So TP does not work with pre_quantized bnb models.
|
|
1205
|
+
if pre_quant and get_tensor_model_parallel_world_size() > 1:
|
|
1206
|
+
raise ValueError(
|
|
1207
|
+
"Prequant BitsAndBytes models with tensor parallelism is not "
|
|
1208
|
+
"supported. Please try with pipeline parallelism.")
|
|
1209
|
+
|
|
1210
|
+
load_8bit = False
|
|
1211
|
+
if pre_quant:
|
|
1212
|
+
load_8bit = quant_config.get("load_in_8bit", False)
|
|
1213
|
+
|
|
1214
|
+
qweight_iterator, quant_state_dict = (
|
|
1215
|
+
self._get_quantized_weights_iterator(model_config.model,
|
|
1216
|
+
model_config.revision,
|
|
1217
|
+
pre_quant, load_8bit))
|
|
1218
|
+
|
|
1219
|
+
weights_to_load = {name for name, _ in model.named_parameters()}
|
|
1220
|
+
loaded_weights = model.load_weights(qweight_iterator)
|
|
1221
|
+
# Some models may have weights loading tracker unimplemented.
|
|
1222
|
+
if loaded_weights is not None:
|
|
1223
|
+
weights_not_loaded = weights_to_load - loaded_weights
|
|
1224
|
+
if weights_not_loaded:
|
|
1225
|
+
raise ValueError("Following weights were not initialized from "
|
|
1226
|
+
f"checkpoint: {weights_not_loaded}")
|
|
1227
|
+
|
|
1228
|
+
torch.cuda.empty_cache()
|
|
1229
|
+
|
|
1230
|
+
param_dict = dict(model.named_parameters())
|
|
1231
|
+
stacked_quant_state_dict: Dict[str, Dict[int, Any]] = {}
|
|
1232
|
+
# TODO: Change this lazy import to normal import
|
|
1233
|
+
# after the checks are updated to run on a new version
|
|
1234
|
+
from vllm.model_executor.models.utils import is_pp_missing_parameter
|
|
1235
|
+
|
|
1236
|
+
for quant_param_name in quant_state_dict:
|
|
1237
|
+
if is_pp_missing_parameter(quant_param_name, model):
|
|
1238
|
+
continue
|
|
1239
|
+
|
|
1240
|
+
non_stacked_param_name = quant_param_name
|
|
1241
|
+
|
|
1242
|
+
shard_index = 0
|
|
1243
|
+
for shard_name, (
|
|
1244
|
+
weight_name,
|
|
1245
|
+
index,
|
|
1246
|
+
) in self.modules_mapping.inverse_packed_mapping.items():
|
|
1247
|
+
# Some models, such as MiniCPM V2.5/2.6, contain both
|
|
1248
|
+
# module names 'kv_proj' and 'qkv_proj'. To prevent 'kv_proj'
|
|
1249
|
+
# from being incorrectly identified as being present in
|
|
1250
|
+
# 'vpm.encoder.layers.0.self_attn.qkv_proj.weight
|
|
1251
|
+
shard_pos = quant_param_name.find(shard_name)
|
|
1252
|
+
can_correct_rename = (shard_pos
|
|
1253
|
+
> 0) and (quant_param_name[shard_pos - 1]
|
|
1254
|
+
== ".")
|
|
1255
|
+
# If the quant_param_name is packed, it won't occur in the
|
|
1256
|
+
# param_dict before renaming.
|
|
1257
|
+
new_quant_param_name = quant_param_name.replace(
|
|
1258
|
+
shard_name, weight_name)
|
|
1259
|
+
need_rename = (quant_param_name not in param_dict) \
|
|
1260
|
+
and (new_quant_param_name in param_dict)
|
|
1261
|
+
if can_correct_rename and need_rename:
|
|
1262
|
+
shard_index = index
|
|
1263
|
+
quant_param_name = new_quant_param_name
|
|
1264
|
+
break
|
|
1265
|
+
|
|
1266
|
+
# Models like Clip/Siglip may skip some layers in initialization,
|
|
1267
|
+
# causing unused quant_param_name in state_dict.
|
|
1268
|
+
if quant_param_name not in param_dict:
|
|
1269
|
+
continue
|
|
1270
|
+
|
|
1271
|
+
if quant_param_name not in stacked_quant_state_dict:
|
|
1272
|
+
stacked_quant_state_dict[quant_param_name] = {}
|
|
1273
|
+
|
|
1274
|
+
stacked_quant_state_dict[quant_param_name][shard_index] = (
|
|
1275
|
+
quant_state_dict[non_stacked_param_name])
|
|
1276
|
+
|
|
1277
|
+
# save quant_states and offsets as the attributes of the parameters
|
|
1278
|
+
for param_name, param in param_dict.items():
|
|
1279
|
+
if param_name in stacked_quant_state_dict:
|
|
1280
|
+
quant_states = stacked_quant_state_dict[param_name]
|
|
1281
|
+
set_weight_attrs(param, {"bnb_quant_state": quant_states})
|
|
1282
|
+
|
|
1283
|
+
pack_ratio = getattr(param, "pack_factor", -1)
|
|
1284
|
+
if pack_ratio == -1:
|
|
1285
|
+
raise ValueError(
|
|
1286
|
+
f"pack_factor not set for parameter {param_name}.")
|
|
1287
|
+
|
|
1288
|
+
num_elements = [0] * len(quant_states)
|
|
1289
|
+
for seq, quant_state in quant_states.items():
|
|
1290
|
+
num_elements[seq] = (math.prod(quant_state.shape) //
|
|
1291
|
+
pack_ratio)
|
|
1292
|
+
|
|
1293
|
+
offsets = np.concatenate(([0], np.cumsum(num_elements)))
|
|
1294
|
+
# Make torch infer_schema happy
|
|
1295
|
+
offsets = torch.tensor(offsets).cpu()
|
|
1296
|
+
set_weight_attrs(param, {"bnb_shard_offsets": offsets})
|
|
1297
|
+
|
|
1298
|
+
if load_8bit:
|
|
1299
|
+
set_weight_attrs(
|
|
1300
|
+
param, {"matmul_state": [None] * len(quant_states)})
|
|
1301
|
+
|
|
1302
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
1303
|
+
self._prepare_weights(model_config.model, model_config.revision)
|
|
1304
|
+
|
|
1305
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
1306
|
+
device_config = vllm_config.device_config
|
|
1307
|
+
model_config = vllm_config.model_config
|
|
1308
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
1309
|
+
with torch.device(device_config.device):
|
|
1310
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
1311
|
+
|
|
1312
|
+
self._load_weights(model_config, model)
|
|
1313
|
+
|
|
1314
|
+
return model.eval()
|
|
1315
|
+
|
|
1316
|
+
|
|
1317
|
+
class GGUFModelLoader(BaseModelLoader):
|
|
1318
|
+
"""
|
|
1319
|
+
Model loader that can load GGUF files. This is useful for loading models
|
|
1320
|
+
that are quantized with GGUF and saved in the GGUF format. This loader
|
|
1321
|
+
supports loading both full models and sharded models.
|
|
1322
|
+
"""
|
|
1323
|
+
|
|
1324
|
+
def __init__(self, load_config: LoadConfig):
|
|
1325
|
+
super().__init__(load_config)
|
|
1326
|
+
if load_config.model_loader_extra_config:
|
|
1327
|
+
raise ValueError(f"Model loader extra config is not supported for "
|
|
1328
|
+
f"load format {load_config.load_format}")
|
|
1329
|
+
|
|
1330
|
+
def _prepare_weights(self, model_name_or_path: str):
|
|
1331
|
+
if os.path.isfile(model_name_or_path):
|
|
1332
|
+
return model_name_or_path
|
|
1333
|
+
else:
|
|
1334
|
+
raise ValueError(f"{model_name_or_path} is not a file.")
|
|
1335
|
+
|
|
1336
|
+
def _get_gguf_weights_map(self, model_config: ModelConfig):
|
|
1337
|
+
"""
|
|
1338
|
+
GGUF uses this naming convention for their tensors from HF checkpoint:
|
|
1339
|
+
`blk.N.BB.weight` and `blk.N.BB.bias`
|
|
1340
|
+
where N signifies the block number of a layer, and BB signifies the
|
|
1341
|
+
attention/mlp layer components.
|
|
1342
|
+
See "Standardized tensor names" in
|
|
1343
|
+
https://github.com/ggerganov/ggml/blob/master/docs/gguf.md for details.
|
|
1344
|
+
"""
|
|
1345
|
+
config = model_config.hf_config
|
|
1346
|
+
model_type = config.model_type
|
|
1347
|
+
gguf_to_hf_name_map = {}
|
|
1348
|
+
# hack: ggufs have a different name than transformers
|
|
1349
|
+
if model_type == "cohere":
|
|
1350
|
+
model_type = "command-r"
|
|
1351
|
+
if model_type in ("deepseek_v3", "deepseek_v2"):
|
|
1352
|
+
model_type = "deepseek2"
|
|
1353
|
+
# GGUF layer map assumes that we will have a merged expert weights
|
|
1354
|
+
# so we need to map them manually
|
|
1355
|
+
for idx in range(config.num_hidden_layers):
|
|
1356
|
+
gguf_to_hf_name_map[f"blk.{idx}.exp_probs_b.bias"] = \
|
|
1357
|
+
f"model.layers.{idx}.mlp.gate.e_score_correction_bias"
|
|
1358
|
+
gguf_to_hf_name_map[f"blk.{idx}.ffn_down_exps.weight"] = \
|
|
1359
|
+
f"model.layers.{idx}.mlp.experts.0.down_proj.weight"
|
|
1360
|
+
gguf_to_hf_name_map[f"blk.{idx}.ffn_gate_exps.weight"] = \
|
|
1361
|
+
f"model.layers.{idx}.mlp.experts.0.gate_proj.weight"
|
|
1362
|
+
gguf_to_hf_name_map[f"blk.{idx}.ffn_up_exps.weight"] = \
|
|
1363
|
+
f"model.layers.{idx}.mlp.experts.0.up_proj.weight"
|
|
1364
|
+
|
|
1365
|
+
arch = None
|
|
1366
|
+
for key, value in gguf.MODEL_ARCH_NAMES.items():
|
|
1367
|
+
if value == model_type:
|
|
1368
|
+
arch = key
|
|
1369
|
+
break
|
|
1370
|
+
if arch is None:
|
|
1371
|
+
raise RuntimeError(f"Unknown gguf model_type: {model_type}")
|
|
1372
|
+
num_layers = config.num_hidden_layers
|
|
1373
|
+
name_map = gguf.get_tensor_name_map(arch, num_layers)
|
|
1374
|
+
with torch.device("meta"):
|
|
1375
|
+
dummy_model = AutoModelForCausalLM.from_config(
|
|
1376
|
+
config, trust_remote_code=model_config.trust_remote_code)
|
|
1377
|
+
state_dict = dummy_model.state_dict()
|
|
1378
|
+
|
|
1379
|
+
for hf_name in state_dict:
|
|
1380
|
+
name, suffix = hf_name.rsplit(".", 1)
|
|
1381
|
+
gguf_name = name_map.get_name(name)
|
|
1382
|
+
gguf_to_hf_name_map[f"{gguf_name}.{suffix}"] = hf_name
|
|
1383
|
+
return gguf_to_hf_name_map
|
|
1384
|
+
|
|
1385
|
+
def _get_weights_iterator(
|
|
1386
|
+
self, model_name_or_path: str, gguf_to_hf_name_map: Dict[str, str]
|
|
1387
|
+
) -> Generator[Tuple[str, torch.Tensor], None, None]:
|
|
1388
|
+
return gguf_quant_weights_iterator(model_name_or_path,
|
|
1389
|
+
gguf_to_hf_name_map)
|
|
1390
|
+
|
|
1391
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
1392
|
+
self._prepare_weights(model_config.model)
|
|
1393
|
+
|
|
1394
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
1395
|
+
device_config = vllm_config.device_config
|
|
1396
|
+
model_config = vllm_config.model_config
|
|
1397
|
+
local_model_path = self._prepare_weights(model_config.model)
|
|
1398
|
+
gguf_weights_map = self._get_gguf_weights_map(model_config)
|
|
1399
|
+
# we can only know if tie word embeddings after mapping weights
|
|
1400
|
+
if "lm_head.weight" in get_gguf_extra_tensor_names(
|
|
1401
|
+
local_model_path, gguf_weights_map):
|
|
1402
|
+
model_config.hf_config.update({"tie_word_embeddings": True})
|
|
1403
|
+
|
|
1404
|
+
target_device = torch.device(device_config.device)
|
|
1405
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
1406
|
+
with target_device:
|
|
1407
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
1408
|
+
model.load_weights(
|
|
1409
|
+
self._get_weights_iterator(local_model_path, gguf_weights_map))
|
|
1410
|
+
|
|
1411
|
+
_process_weights_after_loading(model, model_config, target_device)
|
|
1412
|
+
return model
|
|
1413
|
+
|
|
1414
|
+
|
|
1415
|
+
class RunaiModelStreamerLoader(BaseModelLoader):
|
|
1416
|
+
"""
|
|
1417
|
+
Model loader that can load safetensors
|
|
1418
|
+
files from local FS or S3 bucket.
|
|
1419
|
+
"""
|
|
1420
|
+
|
|
1421
|
+
def __init__(self, load_config: LoadConfig):
|
|
1422
|
+
super().__init__(load_config)
|
|
1423
|
+
if load_config.model_loader_extra_config:
|
|
1424
|
+
extra_config = load_config.model_loader_extra_config
|
|
1425
|
+
|
|
1426
|
+
if ("concurrency" in extra_config
|
|
1427
|
+
and isinstance(extra_config.get("concurrency"), int)):
|
|
1428
|
+
os.environ["RUNAI_STREAMER_CONCURRENCY"] = str(
|
|
1429
|
+
extra_config.get("concurrency"))
|
|
1430
|
+
|
|
1431
|
+
if ("memory_limit" in extra_config
|
|
1432
|
+
and isinstance(extra_config.get("memory_limit"), int)):
|
|
1433
|
+
os.environ["RUNAI_STREAMER_MEMORY_LIMIT"] = str(
|
|
1434
|
+
extra_config.get("memory_limit"))
|
|
1435
|
+
|
|
1436
|
+
runai_streamer_s3_endpoint = os.getenv(
|
|
1437
|
+
'RUNAI_STREAMER_S3_ENDPOINT')
|
|
1438
|
+
aws_endpoint_url = os.getenv('AWS_ENDPOINT_URL')
|
|
1439
|
+
if (runai_streamer_s3_endpoint is None
|
|
1440
|
+
and aws_endpoint_url is not None):
|
|
1441
|
+
os.environ["RUNAI_STREAMER_S3_ENDPOINT"] = aws_endpoint_url
|
|
1442
|
+
|
|
1443
|
+
def _prepare_weights(self, model_name_or_path: str,
|
|
1444
|
+
revision: Optional[str]) -> List[str]:
|
|
1445
|
+
"""Prepare weights for the model.
|
|
1446
|
+
|
|
1447
|
+
If the model is not local, it will be downloaded."""
|
|
1448
|
+
|
|
1449
|
+
is_s3_path = is_s3(model_name_or_path)
|
|
1450
|
+
is_local = os.path.isdir(model_name_or_path)
|
|
1451
|
+
safetensors_pattern = "*.safetensors"
|
|
1452
|
+
index_file = SAFE_WEIGHTS_INDEX_NAME
|
|
1453
|
+
|
|
1454
|
+
hf_folder = (model_name_or_path if
|
|
1455
|
+
(is_local or is_s3_path) else download_weights_from_hf(
|
|
1456
|
+
model_name_or_path,
|
|
1457
|
+
self.load_config.download_dir,
|
|
1458
|
+
[safetensors_pattern],
|
|
1459
|
+
revision,
|
|
1460
|
+
ignore_patterns=self.load_config.ignore_patterns,
|
|
1461
|
+
))
|
|
1462
|
+
if is_s3_path:
|
|
1463
|
+
hf_weights_files = s3_glob(path=hf_folder,
|
|
1464
|
+
allow_pattern=[safetensors_pattern])
|
|
1465
|
+
else:
|
|
1466
|
+
hf_weights_files = glob.glob(
|
|
1467
|
+
os.path.join(hf_folder, safetensors_pattern))
|
|
1468
|
+
|
|
1469
|
+
if not is_local and not is_s3_path:
|
|
1470
|
+
download_safetensors_index_file_from_hf(
|
|
1471
|
+
model_name_or_path, index_file, self.load_config.download_dir,
|
|
1472
|
+
revision)
|
|
1473
|
+
|
|
1474
|
+
if not hf_weights_files:
|
|
1475
|
+
raise RuntimeError(
|
|
1476
|
+
f"Cannot find any safetensors model weights with "
|
|
1477
|
+
f"`{model_name_or_path}`")
|
|
1478
|
+
|
|
1479
|
+
return hf_weights_files
|
|
1480
|
+
|
|
1481
|
+
def _get_weights_iterator(
|
|
1482
|
+
self, model_or_path: str,
|
|
1483
|
+
revision: str) -> Generator[Tuple[str, torch.Tensor], None, None]:
|
|
1484
|
+
"""Get an iterator for the model weights based on the load format."""
|
|
1485
|
+
hf_weights_files = self._prepare_weights(model_or_path, revision)
|
|
1486
|
+
return runai_safetensors_weights_iterator(
|
|
1487
|
+
hf_weights_files,
|
|
1488
|
+
self.load_config.use_tqdm_on_load,
|
|
1489
|
+
)
|
|
1490
|
+
|
|
1491
|
+
def download_model(self, model_config: ModelConfig) -> None:
|
|
1492
|
+
"""Download model if necessary"""
|
|
1493
|
+
self._prepare_weights(model_config.model, model_config.revision)
|
|
1494
|
+
|
|
1495
|
+
def load_model(self, vllm_config: VllmConfig) -> nn.Module:
|
|
1496
|
+
"""Perform streaming of the model to destination"""
|
|
1497
|
+
device_config = vllm_config.device_config
|
|
1498
|
+
model_config = vllm_config.model_config
|
|
1499
|
+
|
|
1500
|
+
target_device = torch.device(device_config.device)
|
|
1501
|
+
with set_default_torch_dtype(model_config.dtype):
|
|
1502
|
+
with target_device:
|
|
1503
|
+
model = _initialize_model(vllm_config=vllm_config)
|
|
1504
|
+
|
|
1505
|
+
model_weights = model_config.model
|
|
1506
|
+
if hasattr(model_config, "model_weights"):
|
|
1507
|
+
model_weights = model_config.model_weights
|
|
1508
|
+
model.load_weights(
|
|
1509
|
+
self._get_weights_iterator(model_weights,
|
|
1510
|
+
model_config.revision))
|
|
1511
|
+
|
|
1512
|
+
_process_weights_after_loading(model, model_config, target_device)
|
|
1513
|
+
return model.eval()
|
|
1514
|
+
|
|
1515
|
+
|
|
1516
|
+
def get_model_loader(load_config: LoadConfig) -> BaseModelLoader:
|
|
1517
|
+
"""Get a model loader based on the load format."""
|
|
1518
|
+
if isinstance(load_config.load_format, type):
|
|
1519
|
+
return load_config.load_format(load_config)
|
|
1520
|
+
|
|
1521
|
+
if load_config.load_format == LoadFormat.DUMMY:
|
|
1522
|
+
return DummyModelLoader(load_config)
|
|
1523
|
+
|
|
1524
|
+
if load_config.load_format == LoadFormat.TENSORIZER:
|
|
1525
|
+
return TensorizerLoader(load_config)
|
|
1526
|
+
|
|
1527
|
+
if load_config.load_format == LoadFormat.SHARDED_STATE:
|
|
1528
|
+
return ShardedStateLoader(load_config)
|
|
1529
|
+
|
|
1530
|
+
if load_config.load_format == LoadFormat.BITSANDBYTES:
|
|
1531
|
+
return BitsAndBytesModelLoader(load_config)
|
|
1532
|
+
|
|
1533
|
+
if load_config.load_format == LoadFormat.GGUF:
|
|
1534
|
+
return GGUFModelLoader(load_config)
|
|
1535
|
+
|
|
1536
|
+
if load_config.load_format == LoadFormat.RUNAI_STREAMER:
|
|
1537
|
+
return RunaiModelStreamerLoader(load_config)
|
|
1538
|
+
|
|
1539
|
+
if load_config.load_format == LoadFormat.RUNAI_STREAMER_SHARDED:
|
|
1540
|
+
return ShardedStateLoader(load_config, runai_model_streamer=True)
|
|
1541
|
+
|
|
1542
|
+
return DefaultModelLoader(load_config)
|