vllm-cpu 0.8.5.post2__cp310-cp310-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vllm-cpu might be problematic. Click here for more details.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +170 -0
- vllm/_custom_ops.py +1536 -0
- vllm/_ipex_ops.py +241 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +105 -0
- vllm/adapter_commons/request.py +25 -0
- vllm/adapter_commons/utils.py +92 -0
- vllm/adapter_commons/worker_manager.py +38 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +38 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +31 -0
- vllm/assets/video.py +103 -0
- vllm/attention/__init__.py +19 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +306 -0
- vllm/attention/backends/blocksparse_attn.py +457 -0
- vllm/attention/backends/cpu_mla.py +303 -0
- vllm/attention/backends/flash_attn.py +999 -0
- vllm/attention/backends/flashinfer.py +1092 -0
- vllm/attention/backends/flashmla.py +242 -0
- vllm/attention/backends/hpu_attn.py +301 -0
- vllm/attention/backends/ipex_attn.py +396 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1444 -0
- vllm/attention/backends/pallas.py +346 -0
- vllm/attention/backends/placeholder_attn.py +399 -0
- vllm/attention/backends/rocm_aiter_mla.py +412 -0
- vllm/attention/backends/rocm_flash_attn.py +969 -0
- vllm/attention/backends/torch_sdpa.py +691 -0
- vllm/attention/backends/triton_mla.py +113 -0
- vllm/attention/backends/utils.py +609 -0
- vllm/attention/backends/xformers.py +798 -0
- vllm/attention/layer.py +443 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
- vllm/attention/ops/blocksparse_attention/interface.py +238 -0
- vllm/attention/ops/blocksparse_attention/utils.py +244 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +366 -0
- vllm/attention/ops/flashmla.py +115 -0
- vllm/attention/ops/hpu_paged_attn.py +105 -0
- vllm/attention/ops/ipex_attn.py +193 -0
- vllm/attention/ops/merge_attn_states.py +42 -0
- vllm/attention/ops/nki_flash_attn.py +905 -0
- vllm/attention/ops/paged_attn.py +255 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +42 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
- vllm/attention/ops/triton_decode_attention.py +675 -0
- vllm/attention/ops/triton_flash_attention.py +1375 -0
- vllm/attention/ops/triton_merge_attn_states.py +96 -0
- vllm/attention/selector.py +186 -0
- vllm/attention/utils/fa_utils.py +54 -0
- vllm/beam_search.py +82 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +831 -0
- vllm/benchmarks/endpoint_request_func.py +160 -0
- vllm/benchmarks/latency.py +181 -0
- vllm/benchmarks/serve.py +925 -0
- vllm/benchmarks/throughput.py +608 -0
- vllm/benchmarks/utils.py +69 -0
- vllm/collect_env.py +795 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/backends.py +715 -0
- vllm/compilation/compiler_interface.py +437 -0
- vllm/compilation/counter.py +33 -0
- vllm/compilation/decorators.py +249 -0
- vllm/compilation/fix_functionalization.py +182 -0
- vllm/compilation/fusion.py +617 -0
- vllm/compilation/fx_utils.py +60 -0
- vllm/compilation/inductor_pass.py +114 -0
- vllm/compilation/monitor.py +38 -0
- vllm/compilation/multi_output_match.py +108 -0
- vllm/compilation/noop_elimination.py +135 -0
- vllm/compilation/pass_manager.py +74 -0
- vllm/compilation/sequence_parallelism.py +266 -0
- vllm/compilation/torch25_custom_graph_pass.py +41 -0
- vllm/compilation/vllm_inductor_pass.py +68 -0
- vllm/compilation/wrapper.py +129 -0
- vllm/config.py +4179 -0
- vllm/connections.py +170 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +398 -0
- vllm/core/block/common.py +370 -0
- vllm/core/block/cpu_gpu_block_allocator.py +440 -0
- vllm/core/block/interfaces.py +318 -0
- vllm/core/block/naive_block.py +465 -0
- vllm/core/block/prefix_caching_block.py +1134 -0
- vllm/core/block/utils.py +27 -0
- vllm/core/block_manager.py +520 -0
- vllm/core/evictor.py +156 -0
- vllm/core/interfaces.py +134 -0
- vllm/core/placeholder_block_space_manager.py +99 -0
- vllm/core/scheduler.py +2060 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +280 -0
- vllm/distributed/__init__.py +5 -0
- vllm/distributed/communication_op.py +40 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/base_device_communicator.py +151 -0
- vllm/distributed/device_communicators/cpu_communicator.py +139 -0
- vllm/distributed/device_communicators/cuda_communicator.py +131 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +301 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +257 -0
- vllm/distributed/device_communicators/hpu_communicator.py +45 -0
- vllm/distributed/device_communicators/neuron_communicator.py +19 -0
- vllm/distributed/device_communicators/pynccl.py +217 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
- vllm/distributed/device_communicators/shm_broadcast.py +557 -0
- vllm/distributed/device_communicators/tpu_communicator.py +93 -0
- vllm/distributed/device_communicators/xpu_communicator.py +54 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +11 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +107 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +90 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +8 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +209 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +131 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
- vllm/distributed/parallel_state.py +1209 -0
- vllm/distributed/utils.py +366 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1724 -0
- vllm/engine/async_llm_engine.py +1261 -0
- vllm/engine/async_timeout.py +191 -0
- vllm/engine/llm_engine.py +2150 -0
- vllm/engine/metrics.py +717 -0
- vllm/engine/metrics_types.py +96 -0
- vllm/engine/multiprocessing/__init__.py +183 -0
- vllm/engine/multiprocessing/client.py +745 -0
- vllm/engine/multiprocessing/engine.py +450 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +74 -0
- vllm/engine/output_processor/multi_step.py +210 -0
- vllm/engine/output_processor/single_step.py +136 -0
- vllm/engine/output_processor/stop_checker.py +130 -0
- vllm/engine/output_processor/util.py +27 -0
- vllm/engine/protocol.py +302 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +177 -0
- vllm/entrypoints/chat_utils.py +1259 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +38 -0
- vllm/entrypoints/cli/benchmark/latency.py +29 -0
- vllm/entrypoints/cli/benchmark/main.py +53 -0
- vllm/entrypoints/cli/benchmark/serve.py +29 -0
- vllm/entrypoints/cli/benchmark/throughput.py +29 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +59 -0
- vllm/entrypoints/cli/openai.py +175 -0
- vllm/entrypoints/cli/serve.py +59 -0
- vllm/entrypoints/cli/types.py +24 -0
- vllm/entrypoints/launcher.py +146 -0
- vllm/entrypoints/llm.py +1450 -0
- vllm/entrypoints/logger.py +44 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1130 -0
- vllm/entrypoints/openai/cli_args.py +296 -0
- vllm/entrypoints/openai/logits_processors.py +89 -0
- vllm/entrypoints/openai/protocol.py +1806 -0
- vllm/entrypoints/openai/run_batch.py +439 -0
- vllm/entrypoints/openai/serving_chat.py +1210 -0
- vllm/entrypoints/openai/serving_completion.py +557 -0
- vllm/entrypoints/openai/serving_embedding.py +245 -0
- vllm/entrypoints/openai/serving_engine.py +569 -0
- vllm/entrypoints/openai/serving_models.py +314 -0
- vllm/entrypoints/openai/serving_pooling.py +237 -0
- vllm/entrypoints/openai/serving_score.py +439 -0
- vllm/entrypoints/openai/serving_tokenization.py +147 -0
- vllm/entrypoints/openai/serving_transcription.py +421 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +19 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +254 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +232 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +211 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +303 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +262 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +110 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +292 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
- vllm/entrypoints/score_utils.py +49 -0
- vllm/entrypoints/ssl.py +74 -0
- vllm/entrypoints/utils.py +136 -0
- vllm/env_override.py +34 -0
- vllm/envs.py +800 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +400 -0
- vllm/executor/mp_distributed_executor.py +243 -0
- vllm/executor/msgspec_utils.py +29 -0
- vllm/executor/multiproc_worker_utils.py +312 -0
- vllm/executor/ray_distributed_executor.py +700 -0
- vllm/executor/ray_utils.py +400 -0
- vllm/executor/uniproc_executor.py +141 -0
- vllm/forward_context.py +159 -0
- vllm/inputs/__init__.py +37 -0
- vllm/inputs/data.py +248 -0
- vllm/inputs/parse.py +121 -0
- vllm/inputs/preprocess.py +745 -0
- vllm/inputs/registry.py +212 -0
- vllm/jsontree.py +79 -0
- vllm/logger.py +210 -0
- vllm/logging_utils/__init__.py +7 -0
- vllm/logging_utils/formatter.py +17 -0
- vllm/logits_process.py +121 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +335 -0
- vllm/lora/layers.py +1263 -0
- vllm/lora/lora.py +198 -0
- vllm/lora/models.py +802 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +15 -0
- vllm/lora/ops/torch_ops/lora_ops.py +115 -0
- vllm/lora/ops/triton_ops/__init__.py +11 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand.py +293 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
- vllm/lora/ops/triton_ops/lora_shrink.py +247 -0
- vllm/lora/ops/triton_ops/utils.py +121 -0
- vllm/lora/peft_helper.py +115 -0
- vllm/lora/punica_wrapper/__init__.py +9 -0
- vllm/lora/punica_wrapper/punica_base.py +483 -0
- vllm/lora/punica_wrapper/punica_cpu.py +348 -0
- vllm/lora/punica_wrapper/punica_gpu.py +289 -0
- vllm/lora/punica_wrapper/punica_hpu.py +144 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/utils.py +161 -0
- vllm/lora/request.py +97 -0
- vllm/lora/resolver.py +83 -0
- vllm/lora/utils.py +237 -0
- vllm/lora/worker_manager.py +251 -0
- vllm/model_executor/__init__.py +15 -0
- vllm/model_executor/custom_op.py +153 -0
- vllm/model_executor/guided_decoding/__init__.py +180 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +85 -0
- vllm/model_executor/guided_decoding/guided_fields.py +42 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +271 -0
- vllm/model_executor/guided_decoding/reasoner/__init__.py +35 -0
- vllm/model_executor/guided_decoding/utils.py +241 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +368 -0
- vllm/model_executor/layers/fused_moe/__init__.py +51 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +180 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +294 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +374 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1539 -0
- vllm/model_executor/layers/fused_moe/layer.py +949 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +64 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +416 -0
- vllm/model_executor/layers/fused_moe/utils.py +48 -0
- vllm/model_executor/layers/layernorm.py +277 -0
- vllm/model_executor/layers/lightning_attn.py +651 -0
- vllm/model_executor/layers/linear.py +1518 -0
- vllm/model_executor/layers/logits_processor.py +196 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +109 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +538 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +415 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
- vllm/model_executor/layers/pooler.py +336 -0
- vllm/model_executor/layers/quantization/__init__.py +153 -0
- vllm/model_executor/layers/quantization/aqlm.py +374 -0
- vllm/model_executor/layers/quantization/awq.py +184 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
- vllm/model_executor/layers/quantization/awq_triton.py +319 -0
- vllm/model_executor/layers/quantization/base_config.py +145 -0
- vllm/model_executor/layers/quantization/bitblas.py +459 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +624 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1100 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +20 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +119 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +213 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +193 -0
- vllm/model_executor/layers/quantization/experts_int8.py +194 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +168 -0
- vllm/model_executor/layers/quantization/fp8.py +832 -0
- vllm/model_executor/layers/quantization/gguf.py +408 -0
- vllm/model_executor/layers/quantization/gptq.py +276 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +438 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +643 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +295 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +328 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +132 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
- vllm/model_executor/layers/quantization/kv_cache.py +137 -0
- vllm/model_executor/layers/quantization/marlin.py +259 -0
- vllm/model_executor/layers/quantization/modelopt.py +410 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +447 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +67 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +125 -0
- vllm/model_executor/layers/quantization/qqq.py +273 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +385 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +7 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +142 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
- vllm/model_executor/layers/quantization/quark/utils.py +102 -0
- vllm/model_executor/layers/quantization/schema.py +85 -0
- vllm/model_executor/layers/quantization/torchao.py +127 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +119 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +198 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +523 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +459 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +413 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +110 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +127 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +571 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
- vllm/model_executor/layers/rejection_sampler.py +400 -0
- vllm/model_executor/layers/resampler.py +269 -0
- vllm/model_executor/layers/rotary_embedding.py +1598 -0
- vllm/model_executor/layers/sampler.py +1221 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +172 -0
- vllm/model_executor/layers/utils.py +99 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +485 -0
- vllm/model_executor/model_loader/__init__.py +20 -0
- vllm/model_executor/model_loader/loader.py +1542 -0
- vllm/model_executor/model_loader/neuron.py +243 -0
- vllm/model_executor/model_loader/tensorizer.py +468 -0
- vllm/model_executor/model_loader/utils.py +171 -0
- vllm/model_executor/model_loader/weight_utils.py +749 -0
- vllm/model_executor/models/__init__.py +27 -0
- vllm/model_executor/models/adapters.py +247 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +656 -0
- vllm/model_executor/models/aya_vision.py +461 -0
- vllm/model_executor/models/baichuan.py +469 -0
- vllm/model_executor/models/bamba.py +542 -0
- vllm/model_executor/models/bart.py +936 -0
- vllm/model_executor/models/bert.py +725 -0
- vllm/model_executor/models/blip.py +337 -0
- vllm/model_executor/models/blip2.py +717 -0
- vllm/model_executor/models/bloom.py +358 -0
- vllm/model_executor/models/chameleon.py +1135 -0
- vllm/model_executor/models/chatglm.py +476 -0
- vllm/model_executor/models/clip.py +410 -0
- vllm/model_executor/models/commandr.py +466 -0
- vllm/model_executor/models/constant_size_cache.py +136 -0
- vllm/model_executor/models/dbrx.py +469 -0
- vllm/model_executor/models/deepseek.py +484 -0
- vllm/model_executor/models/deepseek_mtp.py +266 -0
- vllm/model_executor/models/deepseek_v2.py +830 -0
- vllm/model_executor/models/deepseek_vl2.py +647 -0
- vllm/model_executor/models/eagle.py +247 -0
- vllm/model_executor/models/exaone.py +548 -0
- vllm/model_executor/models/fairseq2_llama.py +153 -0
- vllm/model_executor/models/falcon.py +508 -0
- vllm/model_executor/models/florence2.py +1102 -0
- vllm/model_executor/models/fuyu.py +388 -0
- vllm/model_executor/models/gemma.py +423 -0
- vllm/model_executor/models/gemma2.py +423 -0
- vllm/model_executor/models/gemma3.py +531 -0
- vllm/model_executor/models/gemma3_mm.py +716 -0
- vllm/model_executor/models/glm.py +22 -0
- vllm/model_executor/models/glm4.py +303 -0
- vllm/model_executor/models/glm4v.py +647 -0
- vllm/model_executor/models/gpt2.py +313 -0
- vllm/model_executor/models/gpt_bigcode.py +336 -0
- vllm/model_executor/models/gpt_j.py +337 -0
- vllm/model_executor/models/gpt_neox.py +330 -0
- vllm/model_executor/models/granite.py +494 -0
- vllm/model_executor/models/granite_speech.py +777 -0
- vllm/model_executor/models/granitemoe.py +435 -0
- vllm/model_executor/models/granitemoeshared.py +339 -0
- vllm/model_executor/models/gritlm.py +245 -0
- vllm/model_executor/models/grok1.py +560 -0
- vllm/model_executor/models/h2ovl.py +542 -0
- vllm/model_executor/models/idefics2_vision_model.py +387 -0
- vllm/model_executor/models/idefics3.py +767 -0
- vllm/model_executor/models/interfaces.py +569 -0
- vllm/model_executor/models/interfaces_base.py +163 -0
- vllm/model_executor/models/intern_vit.py +476 -0
- vllm/model_executor/models/internlm2.py +453 -0
- vllm/model_executor/models/internlm2_ve.py +146 -0
- vllm/model_executor/models/internvl.py +945 -0
- vllm/model_executor/models/jais.py +371 -0
- vllm/model_executor/models/jamba.py +590 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +619 -0
- vllm/model_executor/models/llama4.py +530 -0
- vllm/model_executor/models/llama_eagle.py +152 -0
- vllm/model_executor/models/llama_eagle3.py +232 -0
- vllm/model_executor/models/llava.py +869 -0
- vllm/model_executor/models/llava_next.py +582 -0
- vllm/model_executor/models/llava_next_video.py +470 -0
- vllm/model_executor/models/llava_onevision.py +954 -0
- vllm/model_executor/models/mamba.py +271 -0
- vllm/model_executor/models/mamba2.py +302 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +210 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +229 -0
- vllm/model_executor/models/minicpmo.py +725 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +35 -0
- vllm/model_executor/models/minimax_text_01.py +1261 -0
- vllm/model_executor/models/mistral3.py +598 -0
- vllm/model_executor/models/mixtral.py +485 -0
- vllm/model_executor/models/mixtral_quant.py +447 -0
- vllm/model_executor/models/mllama.py +1623 -0
- vllm/model_executor/models/mllama4.py +838 -0
- vllm/model_executor/models/mlp_speculator.py +205 -0
- vllm/model_executor/models/modernbert.py +325 -0
- vllm/model_executor/models/module_mapping.py +71 -0
- vllm/model_executor/models/molmo.py +1567 -0
- vllm/model_executor/models/moonvit.py +628 -0
- vllm/model_executor/models/mpt.py +329 -0
- vllm/model_executor/models/nemotron.py +506 -0
- vllm/model_executor/models/nemotron_nas.py +446 -0
- vllm/model_executor/models/nvlm_d.py +212 -0
- vllm/model_executor/models/olmo.py +390 -0
- vllm/model_executor/models/olmo2.py +412 -0
- vllm/model_executor/models/olmoe.py +449 -0
- vllm/model_executor/models/opt.py +410 -0
- vllm/model_executor/models/orion.py +356 -0
- vllm/model_executor/models/paligemma.py +397 -0
- vllm/model_executor/models/persimmon.py +342 -0
- vllm/model_executor/models/phi.py +354 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3_small.py +463 -0
- vllm/model_executor/models/phi3v.py +722 -0
- vllm/model_executor/models/phi4mm.py +1263 -0
- vllm/model_executor/models/phi4mm_audio.py +1232 -0
- vllm/model_executor/models/phi4mm_utils.py +1883 -0
- vllm/model_executor/models/phimoe.py +666 -0
- vllm/model_executor/models/pixtral.py +1281 -0
- vllm/model_executor/models/plamo2.py +736 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
- vllm/model_executor/models/qwen.py +360 -0
- vllm/model_executor/models/qwen2.py +552 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +901 -0
- vllm/model_executor/models/qwen2_5_vl.py +1136 -0
- vllm/model_executor/models/qwen2_audio.py +402 -0
- vllm/model_executor/models/qwen2_moe.py +531 -0
- vllm/model_executor/models/qwen2_rm.py +130 -0
- vllm/model_executor/models/qwen2_vl.py +1409 -0
- vllm/model_executor/models/qwen3.py +319 -0
- vllm/model_executor/models/qwen3_moe.py +528 -0
- vllm/model_executor/models/qwen_vl.py +784 -0
- vllm/model_executor/models/registry.py +611 -0
- vllm/model_executor/models/roberta.py +332 -0
- vllm/model_executor/models/siglip.py +522 -0
- vllm/model_executor/models/skyworkr1v.py +949 -0
- vllm/model_executor/models/smolvlm.py +51 -0
- vllm/model_executor/models/solar.py +504 -0
- vllm/model_executor/models/stablelm.py +349 -0
- vllm/model_executor/models/starcoder2.py +355 -0
- vllm/model_executor/models/telechat2.py +139 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/transformers.py +442 -0
- vllm/model_executor/models/ultravox.py +655 -0
- vllm/model_executor/models/utils.py +714 -0
- vllm/model_executor/models/vision.py +149 -0
- vllm/model_executor/models/whisper.py +746 -0
- vllm/model_executor/models/zamba2.py +1008 -0
- vllm/model_executor/parameter.py +458 -0
- vllm/model_executor/pooling_metadata.py +71 -0
- vllm/model_executor/sampling_metadata.py +596 -0
- vllm/model_executor/utils.py +53 -0
- vllm/multimodal/__init__.py +31 -0
- vllm/multimodal/audio.py +105 -0
- vllm/multimodal/base.py +218 -0
- vllm/multimodal/hasher.py +103 -0
- vllm/multimodal/image.py +77 -0
- vllm/multimodal/inputs.py +843 -0
- vllm/multimodal/parse.py +454 -0
- vllm/multimodal/processing.py +1760 -0
- vllm/multimodal/profiling.py +274 -0
- vllm/multimodal/registry.py +321 -0
- vllm/multimodal/utils.py +386 -0
- vllm/multimodal/video.py +166 -0
- vllm/outputs.py +521 -0
- vllm/platforms/__init__.py +286 -0
- vllm/platforms/cpu.py +182 -0
- vllm/platforms/cuda.py +463 -0
- vllm/platforms/hpu.py +94 -0
- vllm/platforms/interface.py +427 -0
- vllm/platforms/neuron.py +69 -0
- vllm/platforms/rocm.py +346 -0
- vllm/platforms/tpu.py +174 -0
- vllm/platforms/xpu.py +142 -0
- vllm/plugins/__init__.py +82 -0
- vllm/pooling_params.py +53 -0
- vllm/profiler/__init__.py +7 -0
- vllm/profiler/layerwise_profile.py +374 -0
- vllm/profiler/utils.py +147 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +82 -0
- vllm/prompt_adapter/models.py +357 -0
- vllm/prompt_adapter/request.py +36 -0
- vllm/prompt_adapter/utils.py +97 -0
- vllm/prompt_adapter/worker_manager.py +178 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +12 -0
- vllm/reasoning/abs_reasoning_parsers.py +189 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
- vllm/reasoning/granite_reasoning_parser.py +362 -0
- vllm/sampling_params.py +598 -0
- vllm/scalar_type.py +335 -0
- vllm/scripts.py +14 -0
- vllm/sequence.py +1486 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +505 -0
- vllm/spec_decode/draft_model_runner.py +335 -0
- vllm/spec_decode/interfaces.py +98 -0
- vllm/spec_decode/medusa_worker.py +137 -0
- vllm/spec_decode/metrics.py +212 -0
- vllm/spec_decode/mlp_speculator_worker.py +93 -0
- vllm/spec_decode/mqa_scorer.py +159 -0
- vllm/spec_decode/multi_step_worker.py +416 -0
- vllm/spec_decode/ngram_worker.py +195 -0
- vllm/spec_decode/proposer_worker_base.py +58 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +194 -0
- vllm/spec_decode/spec_decode_worker.py +1324 -0
- vllm/spec_decode/target_model_runner.py +44 -0
- vllm/spec_decode/top1_proposer.py +274 -0
- vllm/spec_decode/util.py +276 -0
- vllm/test_utils.py +129 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6139 -0
- vllm/tracing.py +130 -0
- vllm/transformers_utils/__init__.py +19 -0
- vllm/transformers_utils/config.py +813 -0
- vllm/transformers_utils/configs/__init__.py +52 -0
- vllm/transformers_utils/configs/arctic.py +206 -0
- vllm/transformers_utils/configs/chatglm.py +71 -0
- vllm/transformers_utils/configs/cohere2.py +194 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +65 -0
- vllm/transformers_utils/configs/exaone.py +191 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/h2ovl.py +15 -0
- vllm/transformers_utils/configs/internvl.py +53 -0
- vllm/transformers_utils/configs/jais.py +237 -0
- vllm/transformers_utils/configs/kimi_vl.py +36 -0
- vllm/transformers_utils/configs/medusa.py +62 -0
- vllm/transformers_utils/configs/mllama.py +30 -0
- vllm/transformers_utils/configs/mlp_speculator.py +67 -0
- vllm/transformers_utils/configs/moonvit.py +32 -0
- vllm/transformers_utils/configs/mpt.py +179 -0
- vllm/transformers_utils/configs/nemotron.py +204 -0
- vllm/transformers_utils/configs/nvlm_d.py +14 -0
- vllm/transformers_utils/configs/skyworkr1v.py +53 -0
- vllm/transformers_utils/configs/solar.py +246 -0
- vllm/transformers_utils/configs/telechat2.py +63 -0
- vllm/transformers_utils/configs/ultravox.py +107 -0
- vllm/transformers_utils/detokenizer.py +167 -0
- vllm/transformers_utils/detokenizer_utils.py +188 -0
- vllm/transformers_utils/processor.py +210 -0
- vllm/transformers_utils/processors/__init__.py +6 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/s3_utils.py +161 -0
- vllm/transformers_utils/tokenizer.py +291 -0
- vllm/transformers_utils/tokenizer_base.py +146 -0
- vllm/transformers_utils/tokenizer_group.py +110 -0
- vllm/transformers_utils/tokenizers/__init__.py +9 -0
- vllm/transformers_utils/tokenizers/mistral.py +483 -0
- vllm/transformers_utils/utils.py +98 -0
- vllm/triton_utils/__init__.py +5 -0
- vllm/triton_utils/importing.py +53 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +255 -0
- vllm/utils.py +2692 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/flash_attn.py +783 -0
- vllm/v1/attention/backends/flashinfer.py +638 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +974 -0
- vllm/v1/attention/backends/mla/flashmla.py +149 -0
- vllm/v1/attention/backends/mla/triton_mla.py +118 -0
- vllm/v1/attention/backends/pallas.py +221 -0
- vllm/v1/attention/backends/triton_attn.py +198 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +281 -0
- vllm/v1/core/encoder_cache_manager.py +149 -0
- vllm/v1/core/kv_cache_manager.py +385 -0
- vllm/v1/core/kv_cache_utils.py +744 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +134 -0
- vllm/v1/core/sched/output.py +126 -0
- vllm/v1/core/sched/scheduler.py +838 -0
- vllm/v1/core/sched/utils.py +22 -0
- vllm/v1/core/specialized_manager.py +161 -0
- vllm/v1/engine/__init__.py +166 -0
- vllm/v1/engine/async_llm.py +532 -0
- vllm/v1/engine/core.py +701 -0
- vllm/v1/engine/core_client.py +942 -0
- vllm/v1/engine/detokenizer.py +260 -0
- vllm/v1/engine/exceptions.py +16 -0
- vllm/v1/engine/llm_engine.py +285 -0
- vllm/v1/engine/logprobs.py +198 -0
- vllm/v1/engine/mm_input_cache.py +82 -0
- vllm/v1/engine/output_processor.py +420 -0
- vllm/v1/engine/parallel_sampling.py +132 -0
- vllm/v1/engine/processor.py +387 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +112 -0
- vllm/v1/executor/multiproc_executor.py +480 -0
- vllm/v1/executor/ray_distributed_executor.py +61 -0
- vllm/v1/kv_cache_interface.py +166 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +498 -0
- vllm/v1/metrics/stats.py +238 -0
- vllm/v1/outputs.py +111 -0
- vllm/v1/request.py +178 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +38 -0
- vllm/v1/sample/ops/penalties.py +58 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +315 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +270 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +118 -0
- vllm/v1/sample/tpu/sampler.py +154 -0
- vllm/v1/serial_utils.py +274 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +318 -0
- vllm/v1/spec_decode/metadata.py +61 -0
- vllm/v1/spec_decode/metrics.py +164 -0
- vllm/v1/spec_decode/ngram_proposer.py +131 -0
- vllm/v1/spec_decode/utils.py +18 -0
- vllm/v1/stats/__init__.py +0 -0
- vllm/v1/stats/common.py +453 -0
- vllm/v1/structured_output/__init__.py +113 -0
- vllm/v1/structured_output/backend_guidance.py +215 -0
- vllm/v1/structured_output/backend_types.py +96 -0
- vllm/v1/structured_output/backend_xgrammar.py +299 -0
- vllm/v1/structured_output/request.py +84 -0
- vllm/v1/structured_output/utils.py +174 -0
- vllm/v1/utils.py +249 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +87 -0
- vllm/v1/worker/gpu_input_batch.py +677 -0
- vllm/v1/worker/gpu_model_runner.py +1776 -0
- vllm/v1/worker/gpu_worker.py +349 -0
- vllm/v1/worker/lora_model_runner_mixin.py +145 -0
- vllm/v1/worker/tpu_model_runner.py +1419 -0
- vllm/v1/worker/tpu_worker.py +260 -0
- vllm/v1/worker/utils.py +74 -0
- vllm/v1/worker/worker_base.py +64 -0
- vllm/version.py +40 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +144 -0
- vllm/worker/cpu_enc_dec_model_runner.py +323 -0
- vllm/worker/cpu_model_runner.py +668 -0
- vllm/worker/cpu_pooling_model_runner.py +122 -0
- vllm/worker/cpu_worker.py +400 -0
- vllm/worker/enc_dec_model_runner.py +542 -0
- vllm/worker/hpu_model_runner.py +2221 -0
- vllm/worker/hpu_worker.py +483 -0
- vllm/worker/model_runner.py +2056 -0
- vllm/worker/model_runner_base.py +281 -0
- vllm/worker/multi_step_hpu_worker.py +122 -0
- vllm/worker/multi_step_model_runner.py +908 -0
- vllm/worker/multi_step_tpu_worker.py +107 -0
- vllm/worker/multi_step_worker.py +196 -0
- vllm/worker/neuron_model_runner.py +336 -0
- vllm/worker/neuron_worker.py +138 -0
- vllm/worker/pooling_model_runner.py +200 -0
- vllm/worker/tpu_model_runner.py +908 -0
- vllm/worker/tpu_worker.py +332 -0
- vllm/worker/utils.py +52 -0
- vllm/worker/worker.py +570 -0
- vllm/worker/worker_base.py +644 -0
- vllm/worker/xpu_model_runner.py +603 -0
- vllm/worker/xpu_worker.py +185 -0
- vllm_cpu-0.8.5.post2.dist-info/METADATA +309 -0
- vllm_cpu-0.8.5.post2.dist-info/RECORD +1103 -0
- vllm_cpu-0.8.5.post2.dist-info/WHEEL +5 -0
- vllm_cpu-0.8.5.post2.dist-info/entry_points.txt +2 -0
- vllm_cpu-0.8.5.post2.dist-info/top_level.txt +1 -0
vllm/lora/models.py
ADDED
|
@@ -0,0 +1,802 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
import copy
|
|
4
|
+
import math
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Type,
|
|
9
|
+
Union)
|
|
10
|
+
|
|
11
|
+
import safetensors.torch
|
|
12
|
+
import torch
|
|
13
|
+
from torch import nn
|
|
14
|
+
|
|
15
|
+
from vllm.adapter_commons.models import (AdapterLRUCache, AdapterModel,
|
|
16
|
+
AdapterModelManager)
|
|
17
|
+
from vllm.adapter_commons.utils import (add_adapter, deactivate_adapter,
|
|
18
|
+
get_adapter, list_adapters,
|
|
19
|
+
remove_adapter, set_adapter_mapping)
|
|
20
|
+
from vllm.config import LoRAConfig
|
|
21
|
+
from vllm.logger import init_logger
|
|
22
|
+
from vllm.lora.layers import (BaseLayerWithLoRA,
|
|
23
|
+
LinearScalingRotaryEmbeddingWithLoRA,
|
|
24
|
+
LoRAMapping)
|
|
25
|
+
from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights
|
|
26
|
+
from vllm.lora.peft_helper import PEFTHelper
|
|
27
|
+
from vllm.lora.punica_wrapper import get_punica_wrapper
|
|
28
|
+
from vllm.lora.utils import (from_layer, from_layer_logits_processor,
|
|
29
|
+
get_supported_lora_modules,
|
|
30
|
+
is_regex_target_modules,
|
|
31
|
+
parse_fine_tuned_lora_name, replace_submodule)
|
|
32
|
+
from vllm.model_executor.models import SupportsLoRA, supports_multimodal
|
|
33
|
+
from vllm.model_executor.models.interfaces import is_pooling_model
|
|
34
|
+
from vllm.model_executor.models.module_mapping import MultiModelKeys
|
|
35
|
+
from vllm.model_executor.models.utils import PPMissingLayer, WeightsMapper
|
|
36
|
+
from vllm.utils import is_pin_memory_available
|
|
37
|
+
|
|
38
|
+
logger = init_logger(__name__)
|
|
39
|
+
|
|
40
|
+
_GLOBAL_LORA_ID = 0
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class LongContextLoRAContext:
|
|
45
|
+
"""Context for lora adapters that support long context."""
|
|
46
|
+
# The scaling factors to support long context lora fine tuned models.
|
|
47
|
+
scaling_factors: List[float]
|
|
48
|
+
# dimension to apply rotary embedding.
|
|
49
|
+
rot_dim: int
|
|
50
|
+
# offsets to the sin_cos_cache for each lora_id loaded.
|
|
51
|
+
# This value is dynamically modified.
|
|
52
|
+
offsets_by_lora_id: Dict[int, int] = field(default_factory=dict)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def get_lora_id():
|
|
56
|
+
global _GLOBAL_LORA_ID
|
|
57
|
+
_GLOBAL_LORA_ID += 1
|
|
58
|
+
return _GLOBAL_LORA_ID
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class LoRAModel(AdapterModel):
|
|
62
|
+
"""A LoRA fine-tuned model."""
|
|
63
|
+
|
|
64
|
+
def __init__(
|
|
65
|
+
self,
|
|
66
|
+
lora_model_id: int,
|
|
67
|
+
rank: int,
|
|
68
|
+
loras: Dict[str, LoRALayerWeights],
|
|
69
|
+
scaling_factor: Optional[float] = None,
|
|
70
|
+
) -> None:
|
|
71
|
+
"""
|
|
72
|
+
Args:
|
|
73
|
+
lora_model_id: The integer id for the lora model.
|
|
74
|
+
rank: lora rank.
|
|
75
|
+
loras: module name -> weights for lora-replaced layers.
|
|
76
|
+
scaling_factor: Scaling factor to support long context lora model.
|
|
77
|
+
None if the lora is not tuned for long context support.
|
|
78
|
+
"""
|
|
79
|
+
self.id = lora_model_id
|
|
80
|
+
# Scaling factor for long context lora model. None if it is not
|
|
81
|
+
# fine tuned for the long context.
|
|
82
|
+
self.scaling_factor = scaling_factor
|
|
83
|
+
assert (
|
|
84
|
+
lora_model_id
|
|
85
|
+
> 0), f"a valid lora id should be greater than 0, got {self.id}"
|
|
86
|
+
self.rank = rank
|
|
87
|
+
self.loras: Dict[str, LoRALayerWeights] = loras
|
|
88
|
+
|
|
89
|
+
def clone(self, lora_model_id: int) -> "LoRAModel":
|
|
90
|
+
"""Return a copy of the object with different ids.
|
|
91
|
+
|
|
92
|
+
Will share the underlying tensors."""
|
|
93
|
+
return self.__class__(
|
|
94
|
+
lora_model_id,
|
|
95
|
+
rank=self.rank,
|
|
96
|
+
loras=self.loras.copy(),
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def extra_vocab_size(self) -> int:
|
|
101
|
+
return max(lora.extra_vocab_size
|
|
102
|
+
for lora in self.loras.values()) if self.loras else 0
|
|
103
|
+
|
|
104
|
+
def get_lora(self, module_name: str) -> Optional[LoRALayerWeights]:
|
|
105
|
+
"""Get LoRA for a given module by name"""
|
|
106
|
+
return self.loras.get(module_name, None)
|
|
107
|
+
|
|
108
|
+
def check_lora_name(self, lora_name: str) -> bool:
|
|
109
|
+
return lora_name in self.loras
|
|
110
|
+
|
|
111
|
+
# (yard1): TODO see if we can derive target_embedding_padding automatically
|
|
112
|
+
@classmethod
|
|
113
|
+
def from_lora_tensors(
|
|
114
|
+
cls,
|
|
115
|
+
lora_model_id: int,
|
|
116
|
+
tensors: Dict[str, torch.Tensor],
|
|
117
|
+
peft_helper: PEFTHelper,
|
|
118
|
+
device: str = "cuda",
|
|
119
|
+
dtype: Optional[torch.dtype] = None,
|
|
120
|
+
embeddings: Optional[Dict[str, torch.Tensor]] = None,
|
|
121
|
+
target_embedding_padding: Optional[int] = None,
|
|
122
|
+
embedding_modules: Optional[Dict[str, str]] = None,
|
|
123
|
+
embedding_padding_modules: Optional[List[str]] = None,
|
|
124
|
+
weights_mapper: Optional[WeightsMapper] = None,
|
|
125
|
+
) -> "LoRAModel":
|
|
126
|
+
"""Create a LoRAModel from a dictionary of tensors."""
|
|
127
|
+
pin_memory = str(device) == "cpu" and is_pin_memory_available()
|
|
128
|
+
loras: Dict[str, LoRALayerWeights] = {}
|
|
129
|
+
for tensor_name, tensor in tensors.items():
|
|
130
|
+
module_name, is_lora_a, is_bias = parse_fine_tuned_lora_name(
|
|
131
|
+
tensor_name, weights_mapper)
|
|
132
|
+
if module_name not in loras:
|
|
133
|
+
lora_embeddings_tensor = None
|
|
134
|
+
if embeddings:
|
|
135
|
+
assert embedding_modules is not None
|
|
136
|
+
embeddings_module = next(
|
|
137
|
+
(k for k in embedding_modules if k in module_name),
|
|
138
|
+
None)
|
|
139
|
+
if embeddings_module:
|
|
140
|
+
lora_embeddings_tensor = embeddings[
|
|
141
|
+
embedding_modules[embeddings_module]].to(
|
|
142
|
+
device=device, dtype=dtype)
|
|
143
|
+
if pin_memory:
|
|
144
|
+
lora_embeddings_tensor = (
|
|
145
|
+
lora_embeddings_tensor.pin_memory())
|
|
146
|
+
loras[module_name] = LoRALayerWeights.from_config(
|
|
147
|
+
module_name, peft_helper, lora_embeddings_tensor)
|
|
148
|
+
|
|
149
|
+
if is_bias:
|
|
150
|
+
loras[module_name].bias = tensor.to(device=device,
|
|
151
|
+
dtype=dtype).t()
|
|
152
|
+
bias = tensor.to(device=device, dtype=dtype).t()
|
|
153
|
+
if pin_memory:
|
|
154
|
+
bias = bias.pin_memory()
|
|
155
|
+
loras[module_name].bias = bias
|
|
156
|
+
elif is_lora_a:
|
|
157
|
+
loras[module_name].lora_a = tensor.to(device=device,
|
|
158
|
+
dtype=dtype).t()
|
|
159
|
+
if pin_memory:
|
|
160
|
+
loras[module_name].lora_a = loras[
|
|
161
|
+
module_name].lora_a.pin_memory()
|
|
162
|
+
else:
|
|
163
|
+
loras[module_name].lora_b = tensor.to(device=device,
|
|
164
|
+
dtype=dtype).t()
|
|
165
|
+
assert embedding_padding_modules is not None
|
|
166
|
+
if any(name in module_name
|
|
167
|
+
for name in embedding_padding_modules
|
|
168
|
+
) and target_embedding_padding is not None:
|
|
169
|
+
lora_b = loras[module_name].lora_b
|
|
170
|
+
assert target_embedding_padding >= lora_b.shape[1]
|
|
171
|
+
addition = target_embedding_padding - lora_b.shape[1]
|
|
172
|
+
loras[module_name].lora_b = torch.nn.functional.pad(
|
|
173
|
+
lora_b, (0, addition))
|
|
174
|
+
if pin_memory:
|
|
175
|
+
loras[module_name].lora_b = loras[
|
|
176
|
+
module_name].lora_b.pin_memory()
|
|
177
|
+
|
|
178
|
+
for lora in loras.values():
|
|
179
|
+
lora.optimize()
|
|
180
|
+
|
|
181
|
+
return cls(lora_model_id,
|
|
182
|
+
peft_helper.r,
|
|
183
|
+
loras,
|
|
184
|
+
scaling_factor=peft_helper.vllm_long_context_scaling_factor)
|
|
185
|
+
|
|
186
|
+
@classmethod
|
|
187
|
+
def from_local_checkpoint(
|
|
188
|
+
cls,
|
|
189
|
+
lora_dir: str,
|
|
190
|
+
expected_lora_modules: List[str],
|
|
191
|
+
peft_helper: PEFTHelper,
|
|
192
|
+
*,
|
|
193
|
+
lora_model_id: Optional[int] = None,
|
|
194
|
+
device: str = "cuda",
|
|
195
|
+
dtype: Optional[torch.dtype] = None,
|
|
196
|
+
target_embedding_padding: Optional[int] = None,
|
|
197
|
+
embedding_modules: Optional[Dict[str, str]] = None,
|
|
198
|
+
embedding_padding_modules: Optional[List[str]] = None,
|
|
199
|
+
weights_mapper: Optional[WeightsMapper] = None,
|
|
200
|
+
) -> "LoRAModel":
|
|
201
|
+
"""Create a LoRAModel from a local checkpoint.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
lora_dir: The local path that has lora data.
|
|
205
|
+
expected_lora_modules: Name of modules that are expected to be
|
|
206
|
+
replaced by lora.
|
|
207
|
+
peft_helper: Loaded lora configuration information.
|
|
208
|
+
lora_model_id: LoRA model id. If not given, automatically set by
|
|
209
|
+
a global counter.
|
|
210
|
+
device: Device where the lora model is loaded.
|
|
211
|
+
dtype: dtype of the lora model weights.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
Loaded LoRA Model.
|
|
215
|
+
"""
|
|
216
|
+
lora_tensor_path = os.path.join(lora_dir, "adapter_model.safetensors")
|
|
217
|
+
lora_bin_file_path = os.path.join(lora_dir, "adapter_model.bin")
|
|
218
|
+
new_embeddings_tensor_path = os.path.join(
|
|
219
|
+
lora_dir, "new_embeddings.safetensors")
|
|
220
|
+
new_embeddings_bin_file_path = os.path.join(lora_dir,
|
|
221
|
+
"new_embeddings.bin")
|
|
222
|
+
|
|
223
|
+
unexpected_modules: List[Union[list[str], str]]
|
|
224
|
+
if os.path.isfile(lora_tensor_path):
|
|
225
|
+
tensors: Dict[str, torch.Tensor] = {}
|
|
226
|
+
# Find unexpected modules.
|
|
227
|
+
# Use safetensor key as a source of truth to find expected modules.
|
|
228
|
+
# in peft if you have target_modules A, B, C and C does not exist
|
|
229
|
+
# in the model it won’t error and model will be trained with A, B
|
|
230
|
+
# loraified. C won’t exist in the safetensor but it will exist in
|
|
231
|
+
# the target_modules of the adapter_config.json.
|
|
232
|
+
unexpected_modules = []
|
|
233
|
+
with safetensors.safe_open(lora_tensor_path,
|
|
234
|
+
framework="pt") as f: # type: ignore
|
|
235
|
+
for lora_module in f.keys(): # noqa
|
|
236
|
+
module_name, _, _ = parse_fine_tuned_lora_name(
|
|
237
|
+
lora_module, weights_mapper)
|
|
238
|
+
part_name = module_name.split(".")[-1]
|
|
239
|
+
if part_name not in expected_lora_modules:
|
|
240
|
+
unexpected_modules.append(module_name)
|
|
241
|
+
if unexpected_modules:
|
|
242
|
+
raise ValueError(
|
|
243
|
+
f"While loading {lora_dir}, expected"
|
|
244
|
+
f" target modules in {expected_lora_modules}"
|
|
245
|
+
f" but received {unexpected_modules}."
|
|
246
|
+
f" Please verify that the loaded LoRA module is correct"
|
|
247
|
+
)
|
|
248
|
+
# Load tensors if there are only expected modules.
|
|
249
|
+
for module in f.keys(): # noqa
|
|
250
|
+
tensors[module] = f.get_tensor(module)
|
|
251
|
+
elif os.path.isfile(lora_bin_file_path):
|
|
252
|
+
# When a bin file is provided, we rely on config to find unexpected
|
|
253
|
+
# modules.
|
|
254
|
+
unexpected_modules = []
|
|
255
|
+
target_modules = peft_helper.target_modules
|
|
256
|
+
if not isinstance(target_modules, list):
|
|
257
|
+
target_modules = [target_modules]
|
|
258
|
+
for module in target_modules:
|
|
259
|
+
# Compatible with more modules,
|
|
260
|
+
# such as:layers.11.self_attn.k_proj
|
|
261
|
+
part_name = module.split(".")[-1]
|
|
262
|
+
if part_name not in expected_lora_modules:
|
|
263
|
+
unexpected_modules.append(module)
|
|
264
|
+
# loaded lora's target modules must be a subset of
|
|
265
|
+
# expected_lora_modules. It is not reliable. See
|
|
266
|
+
# https://github.com/vllm-project/vllm/pull/5909. But there's no
|
|
267
|
+
# other better mechanism.
|
|
268
|
+
if unexpected_modules and not is_regex_target_modules(
|
|
269
|
+
peft_helper.target_modules, expected_lora_modules):
|
|
270
|
+
raise ValueError(
|
|
271
|
+
f"While loading {lora_dir}, expected"
|
|
272
|
+
f" target modules in {expected_lora_modules}"
|
|
273
|
+
f" but received {unexpected_modules}."
|
|
274
|
+
f" Please verify that the loaded LoRA module is correct")
|
|
275
|
+
tensors = torch.load(lora_bin_file_path,
|
|
276
|
+
map_location=device,
|
|
277
|
+
weights_only=True)
|
|
278
|
+
else:
|
|
279
|
+
raise ValueError(f"{lora_dir} doesn't contain tensors")
|
|
280
|
+
|
|
281
|
+
embeddings = None
|
|
282
|
+
if os.path.isfile(new_embeddings_tensor_path):
|
|
283
|
+
embeddings = safetensors.torch.load_file(
|
|
284
|
+
new_embeddings_tensor_path)
|
|
285
|
+
elif os.path.isfile(new_embeddings_bin_file_path):
|
|
286
|
+
embeddings = torch.load(new_embeddings_bin_file_path,
|
|
287
|
+
map_location=device,
|
|
288
|
+
weights_only=True)
|
|
289
|
+
|
|
290
|
+
return cls.from_lora_tensors(
|
|
291
|
+
lora_model_id=get_lora_id()
|
|
292
|
+
if lora_model_id is None else lora_model_id,
|
|
293
|
+
tensors=tensors,
|
|
294
|
+
peft_helper=peft_helper,
|
|
295
|
+
device=device,
|
|
296
|
+
dtype=dtype,
|
|
297
|
+
embeddings=embeddings,
|
|
298
|
+
target_embedding_padding=target_embedding_padding,
|
|
299
|
+
embedding_modules=embedding_modules,
|
|
300
|
+
embedding_padding_modules=embedding_padding_modules,
|
|
301
|
+
weights_mapper=weights_mapper)
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
class LoRAModelManager(AdapterModelManager):
|
|
305
|
+
"""A manager that manages multiple LoRA-fine-tuned models."""
|
|
306
|
+
|
|
307
|
+
def __init__(
|
|
308
|
+
self,
|
|
309
|
+
model: SupportsLoRA,
|
|
310
|
+
max_num_seqs: int,
|
|
311
|
+
max_num_batched_tokens: int,
|
|
312
|
+
vocab_size: int,
|
|
313
|
+
lora_config: LoRAConfig,
|
|
314
|
+
device: torch.device,
|
|
315
|
+
):
|
|
316
|
+
"""Create a LoRAModelManager and adapter for a given model.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
model: the model to be adapted.
|
|
320
|
+
max_num_seqs: the maximum number of sequences model can run in a
|
|
321
|
+
single batch.
|
|
322
|
+
max_num_batched_tokens: the maximum number of tokens model can run
|
|
323
|
+
in a single batch.
|
|
324
|
+
vocab_size: the vocab size of the model.
|
|
325
|
+
lora_config: the LoRA configuration.
|
|
326
|
+
"""
|
|
327
|
+
self.lora_config = lora_config
|
|
328
|
+
self.device = device
|
|
329
|
+
self.max_num_seqs = max_num_seqs
|
|
330
|
+
assert self.capacity >= self.lora_slots
|
|
331
|
+
self.max_num_batched_tokens = math.ceil(max_num_batched_tokens / 8) * 8
|
|
332
|
+
self.lora_index_to_id: List[Optional[int]] = [None] * self.lora_slots
|
|
333
|
+
self.vocab_size = vocab_size
|
|
334
|
+
self.long_lora_context: Optional[LongContextLoRAContext] = None
|
|
335
|
+
self.punica_wrapper = get_punica_wrapper(
|
|
336
|
+
max_num_batched_tokens,
|
|
337
|
+
max_batches=self.max_num_seqs,
|
|
338
|
+
device=self.device,
|
|
339
|
+
max_loras=self.lora_config.max_loras)
|
|
340
|
+
# Scaling factor -> offset to the sin_cos_cache to it.
|
|
341
|
+
# Used for long context lora.
|
|
342
|
+
self.scaling_factor_to_offset: Dict[float, int] = {}
|
|
343
|
+
super().__init__(model)
|
|
344
|
+
|
|
345
|
+
self.supported_lora_modules = get_supported_lora_modules(self.model)
|
|
346
|
+
assert self.supported_lora_modules, "No supported LoRA modules found in"
|
|
347
|
+
f"{self.model.__class__.__name__}."
|
|
348
|
+
if lora_config.long_lora_scaling_factors:
|
|
349
|
+
# We need to replace rotary emb layer to do batch computation
|
|
350
|
+
# for long lora.
|
|
351
|
+
self.supported_lora_modules.append("rotary_emb")
|
|
352
|
+
self.packed_modules_mapping = copy.deepcopy(
|
|
353
|
+
self.model.packed_modules_mapping)
|
|
354
|
+
# Used to indicate whether the model is a multimodal model
|
|
355
|
+
self.supports_mm: bool = (
|
|
356
|
+
supports_multimodal(self.model)
|
|
357
|
+
# In case the model only supports LoRA for
|
|
358
|
+
# text modules (e.g. ChatGLM)
|
|
359
|
+
and hasattr(self.model, "get_mm_mapping"))
|
|
360
|
+
self.is_pooling_model = is_pooling_model(self.model)
|
|
361
|
+
self.packed_modules: Dict[str, List[str]] = {}
|
|
362
|
+
self.modules: Dict[str, BaseLayerWithLoRA] = {}
|
|
363
|
+
# Dict instead of a Set for compatibility with LRUCache.
|
|
364
|
+
self._last_mapping: Optional[LoRAMapping] = None
|
|
365
|
+
self._create_lora_modules()
|
|
366
|
+
self.model.lora_manager = self
|
|
367
|
+
self.adapter_type = 'LoRA'
|
|
368
|
+
|
|
369
|
+
@property
|
|
370
|
+
def capacity(self) -> int:
|
|
371
|
+
return self.lora_config.max_cpu_loras
|
|
372
|
+
|
|
373
|
+
@property
|
|
374
|
+
def lora_slots(self) -> int:
|
|
375
|
+
return self.lora_config.max_loras
|
|
376
|
+
|
|
377
|
+
@property
|
|
378
|
+
def adapter_slots(self) -> int:
|
|
379
|
+
return self.lora_slots
|
|
380
|
+
|
|
381
|
+
def activate_adapter(
|
|
382
|
+
self,
|
|
383
|
+
lora_id: int,
|
|
384
|
+
) -> bool:
|
|
385
|
+
"""Move LoRA into a GPU buffer to be used in the forward pass."""
|
|
386
|
+
if lora_id in self._active_adapters:
|
|
387
|
+
return False
|
|
388
|
+
first_free_slot = next(
|
|
389
|
+
((i, lora_id) for i, lora_id in enumerate(self.lora_index_to_id)
|
|
390
|
+
if lora_id is None), None)
|
|
391
|
+
if first_free_slot is None:
|
|
392
|
+
raise ValueError("No free lora slots")
|
|
393
|
+
index, _ = first_free_slot
|
|
394
|
+
self._active_adapters[lora_id] = None
|
|
395
|
+
lora_model = self._registered_adapters[lora_id]
|
|
396
|
+
logger.debug("Activating LoRA. int id: %d, slot index: %d",
|
|
397
|
+
lora_model.id, index)
|
|
398
|
+
self.lora_index_to_id[index] = lora_model.id
|
|
399
|
+
for module_name, module in self.modules.items():
|
|
400
|
+
module_lora = self._get_lora_layer_weights(lora_model, module_name)
|
|
401
|
+
if module_lora:
|
|
402
|
+
module_lora.optimize()
|
|
403
|
+
# Bias is not explicitly enabled with the flag enable_lora_bias.
|
|
404
|
+
bias = module_lora.bias
|
|
405
|
+
if ((torch.is_tensor(bias) or
|
|
406
|
+
(isinstance(bias, Sequence) and any(b is not None
|
|
407
|
+
for b in bias)))
|
|
408
|
+
and not self.lora_config.bias_enabled):
|
|
409
|
+
module_lora.bias = None
|
|
410
|
+
raise ValueError(
|
|
411
|
+
f"Adapter bias cannot be used for {module_name}"
|
|
412
|
+
" without --enable-lora-bias.")
|
|
413
|
+
module.set_lora(index, module_lora.lora_a, module_lora.lora_b,
|
|
414
|
+
module_lora.embeddings_tensor,
|
|
415
|
+
module_lora.bias)
|
|
416
|
+
else:
|
|
417
|
+
module.reset_lora(index)
|
|
418
|
+
return True
|
|
419
|
+
|
|
420
|
+
def _deactivate_adapter(self, lora_id: int):
|
|
421
|
+
try:
|
|
422
|
+
index = self.lora_index_to_id.index(lora_id)
|
|
423
|
+
self.lora_index_to_id[index] = None
|
|
424
|
+
except ValueError:
|
|
425
|
+
pass
|
|
426
|
+
|
|
427
|
+
def _set_long_lora_context(self, lora: LoRAModel):
|
|
428
|
+
if self.long_lora_context is None:
|
|
429
|
+
return
|
|
430
|
+
|
|
431
|
+
if lora.scaling_factor is None:
|
|
432
|
+
return
|
|
433
|
+
|
|
434
|
+
if (lora.scaling_factor not in self.scaling_factor_to_offset):
|
|
435
|
+
raise ValueError(f"Long LoRA scaling factor {lora.scaling_factor}"
|
|
436
|
+
" has not been initialized.")
|
|
437
|
+
|
|
438
|
+
offsets = self.scaling_factor_to_offset.get(lora.scaling_factor)
|
|
439
|
+
if offsets:
|
|
440
|
+
self.long_lora_context.offsets_by_lora_id[lora.id] = offsets
|
|
441
|
+
|
|
442
|
+
def _add_adapter(self, lora: LoRAModel):
|
|
443
|
+
self._create_merged_loras_inplace(lora)
|
|
444
|
+
self._registered_adapters[lora.id] = lora
|
|
445
|
+
self._set_long_lora_context(lora)
|
|
446
|
+
|
|
447
|
+
def pin_adapter(self, lora_id: int) -> bool:
|
|
448
|
+
"""Pin a LoRAModel in the manager cache."""
|
|
449
|
+
raise NotImplementedError(
|
|
450
|
+
"Pinning is not supported in LoRAModelManager. "
|
|
451
|
+
"Use LRUCacheLoRAModelManager for pinning") # type: ignore
|
|
452
|
+
|
|
453
|
+
def _set_adapter_mapping(self, mapping: LoRAMapping) -> None:
|
|
454
|
+
# update lora states
|
|
455
|
+
self.punica_wrapper.update_metadata(
|
|
456
|
+
mapping,
|
|
457
|
+
self.lora_index_to_id,
|
|
458
|
+
self.lora_slots + 1,
|
|
459
|
+
self.vocab_size,
|
|
460
|
+
self.lora_config.lora_extra_vocab_size,
|
|
461
|
+
self.long_lora_context,
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
def remove_all_adapters(self):
|
|
465
|
+
"""Remove all LoRAModels from the manager."""
|
|
466
|
+
self._registered_adapters.clear()
|
|
467
|
+
self.lora_index_to_id = [None] * self.lora_slots
|
|
468
|
+
self._active_adapters.clear()
|
|
469
|
+
|
|
470
|
+
def _create_lora_modules(self):
|
|
471
|
+
for module_name, module in self.model.named_modules(
|
|
472
|
+
remove_duplicate=False):
|
|
473
|
+
if isinstance(module, PPMissingLayer):
|
|
474
|
+
continue
|
|
475
|
+
if not self._match_target_modules(module_name):
|
|
476
|
+
continue
|
|
477
|
+
# A temporary approach for multimodal models to support LoRA
|
|
478
|
+
# TODO: Remove this restriction
|
|
479
|
+
if self._filter_unsupported_mm_module(module_name):
|
|
480
|
+
logger.warning(
|
|
481
|
+
"Regarding multimodal models, vLLM currently only supports "
|
|
482
|
+
"adding LoRA to language model, %s will be ignored.",
|
|
483
|
+
module_name,
|
|
484
|
+
)
|
|
485
|
+
continue
|
|
486
|
+
parts = module_name.split(".")[-1]
|
|
487
|
+
packed_moduled_lst = self.packed_modules_mapping.get(parts, [])
|
|
488
|
+
new_module = replace_submodule(
|
|
489
|
+
self.model, module_name,
|
|
490
|
+
from_layer(module, self.lora_slots, self.lora_config,
|
|
491
|
+
packed_moduled_lst, self.model.config))
|
|
492
|
+
|
|
493
|
+
# LinearScalingRotaryEmbeddingWithLoRA is used to handle
|
|
494
|
+
# long context lora. Register relevant metadata.
|
|
495
|
+
if isinstance(new_module, LinearScalingRotaryEmbeddingWithLoRA):
|
|
496
|
+
self.long_lora_context = LongContextLoRAContext(
|
|
497
|
+
new_module.scaling_factors, new_module.rotary_dim)
|
|
498
|
+
self.scaling_factor_to_offset = \
|
|
499
|
+
new_module.scaling_factor_to_offset
|
|
500
|
+
# (yard1): TODO make this more robust
|
|
501
|
+
if "lm_head" in module_name:
|
|
502
|
+
logits_processor_module = self.model.get_submodule(
|
|
503
|
+
"logits_processor")
|
|
504
|
+
new_module = replace_submodule(
|
|
505
|
+
self.model, "logits_processor",
|
|
506
|
+
from_layer_logits_processor(logits_processor_module,
|
|
507
|
+
module, self.lora_slots,
|
|
508
|
+
self.lora_config,
|
|
509
|
+
self.model.config))
|
|
510
|
+
|
|
511
|
+
# In some models, especially multimodal ones, layers with the same
|
|
512
|
+
# name may have different types, such as nn.Linear and
|
|
513
|
+
# ReplicatedLinear. The nn.Linear layers cannot be replaced with
|
|
514
|
+
# LoRA layers, leading to assertion error. The following check
|
|
515
|
+
# aims to prevent this error
|
|
516
|
+
if self.supports_mm and not isinstance(new_module,
|
|
517
|
+
BaseLayerWithLoRA):
|
|
518
|
+
continue
|
|
519
|
+
self.register_module(module_name, new_module)
|
|
520
|
+
self._register_packed_modules(module_name)
|
|
521
|
+
# All lora layers share the same punica_wrapper based on reference.
|
|
522
|
+
new_module.set_mapping(self.punica_wrapper)
|
|
523
|
+
|
|
524
|
+
def register_module(self, module_name: str, module: "BaseLayerWithLoRA"):
|
|
525
|
+
assert isinstance(module, BaseLayerWithLoRA)
|
|
526
|
+
self.modules[module_name] = module
|
|
527
|
+
|
|
528
|
+
def create_dummy_lora(
|
|
529
|
+
self,
|
|
530
|
+
lora_id: int,
|
|
531
|
+
rank: int,
|
|
532
|
+
scaling_factor: Optional[float],
|
|
533
|
+
embedding_modules: Optional[Dict[str, str]] = None) -> LoRAModel:
|
|
534
|
+
"""Create zero-initialized LoRAModel for warmup."""
|
|
535
|
+
model = LoRAModel(lora_id, rank, {}, scaling_factor)
|
|
536
|
+
for module_name, module in self.model.named_modules():
|
|
537
|
+
bias_enabled = self.lora_config.bias_enabled
|
|
538
|
+
if (not self._match_target_modules(module_name)
|
|
539
|
+
or not isinstance(module, BaseLayerWithLoRA)
|
|
540
|
+
or isinstance(module, LinearScalingRotaryEmbeddingWithLoRA)
|
|
541
|
+
or self._filter_unsupported_mm_module(module_name)):
|
|
542
|
+
continue
|
|
543
|
+
parts = module_name.split(".")
|
|
544
|
+
if module_name not in self.packed_modules:
|
|
545
|
+
assert embedding_modules is not None
|
|
546
|
+
if parts[-1] in embedding_modules:
|
|
547
|
+
input_dim = (module.base_layer.org_vocab_size +
|
|
548
|
+
self.lora_config.lora_extra_vocab_size if
|
|
549
|
+
hasattr(module.base_layer, "org_vocab_size")
|
|
550
|
+
else module.base_layer.weight.shape[1])
|
|
551
|
+
output_dim = module.base_layer.embedding_dim if hasattr(
|
|
552
|
+
module.base_layer,
|
|
553
|
+
"embedding_dim") else module.base_layer.weight.shape[0]
|
|
554
|
+
embeddings_tensor_dim = (module.base_layer.embedding_dim if
|
|
555
|
+
hasattr(module.base_layer,
|
|
556
|
+
"embedding_dim") else
|
|
557
|
+
module.base_layer.weight.shape[1])
|
|
558
|
+
lora = LoRALayerWeights.create_dummy_lora_weights(
|
|
559
|
+
module_name,
|
|
560
|
+
input_dim,
|
|
561
|
+
output_dim,
|
|
562
|
+
rank,
|
|
563
|
+
module.lora_a_stacked[0].dtype,
|
|
564
|
+
"cpu",
|
|
565
|
+
embeddings_tensor_dim=embeddings_tensor_dim,
|
|
566
|
+
bias_enabled=bias_enabled)
|
|
567
|
+
else:
|
|
568
|
+
lora = LoRALayerWeights.create_dummy_lora_weights(
|
|
569
|
+
module_name,
|
|
570
|
+
module.lora_a_stacked[0].shape[-1],
|
|
571
|
+
module.lora_b_stacked[0].shape[-2],
|
|
572
|
+
rank,
|
|
573
|
+
module.lora_a_stacked[0].dtype,
|
|
574
|
+
"cpu",
|
|
575
|
+
bias_enabled=bias_enabled,
|
|
576
|
+
)
|
|
577
|
+
lora.optimize()
|
|
578
|
+
else:
|
|
579
|
+
parts = module_name.split(".")
|
|
580
|
+
replacements = self.packed_modules_mapping[parts[-1]]
|
|
581
|
+
subloras: List[Optional[LoRALayerWeights]] = []
|
|
582
|
+
for i, r in enumerate(replacements):
|
|
583
|
+
lora = LoRALayerWeights.create_dummy_lora_weights(
|
|
584
|
+
module_name + "." + r,
|
|
585
|
+
module.lora_a_stacked[i].shape[-1],
|
|
586
|
+
module.lora_b_stacked[i].shape[-2],
|
|
587
|
+
rank,
|
|
588
|
+
module.lora_a_stacked[i].dtype,
|
|
589
|
+
"cpu",
|
|
590
|
+
bias_enabled=bias_enabled,
|
|
591
|
+
)
|
|
592
|
+
lora.optimize()
|
|
593
|
+
subloras.append(lora)
|
|
594
|
+
lora = PackedLoRALayerWeights.pack(subloras)
|
|
595
|
+
model.loras[module_name] = lora
|
|
596
|
+
return model
|
|
597
|
+
|
|
598
|
+
def _match_target_modules(self, module_name: str):
|
|
599
|
+
return any(
|
|
600
|
+
re.match(
|
|
601
|
+
r".*\.{target_module}$".format(target_module=target_module),
|
|
602
|
+
module_name) or target_module == module_name
|
|
603
|
+
for target_module in self.supported_lora_modules)
|
|
604
|
+
|
|
605
|
+
def _filter_unsupported_mm_module(self, module_name: str) -> bool:
|
|
606
|
+
"""
|
|
607
|
+
Regarding multimodal models, vLLM currently only supports adding LoRA to
|
|
608
|
+
language model. LoRA for other modules, such as the vision tower, will
|
|
609
|
+
be filtered out.
|
|
610
|
+
"""
|
|
611
|
+
if self.supports_mm:
|
|
612
|
+
module_mapping: MultiModelKeys = self.model.get_mm_mapping()
|
|
613
|
+
prefix_lst = module_mapping.connector + module_mapping.tower_model
|
|
614
|
+
return any(
|
|
615
|
+
[module_name.startswith(prefix) for prefix in prefix_lst])
|
|
616
|
+
return False
|
|
617
|
+
|
|
618
|
+
def _register_packed_modules(self, module_full_name: str) -> None:
|
|
619
|
+
parts = module_full_name.split(".")
|
|
620
|
+
module_name = parts[-1]
|
|
621
|
+
replacements = self.packed_modules_mapping.get(module_name, [])
|
|
622
|
+
# When replacements is less than or equal to 1, it indicates that this
|
|
623
|
+
# module is not a packed module.
|
|
624
|
+
if len(replacements) <= 1:
|
|
625
|
+
return
|
|
626
|
+
prefix = ".".join(parts[:-1])
|
|
627
|
+
self.packed_modules[module_full_name] = [
|
|
628
|
+
prefix + "." + r if prefix else r for r in replacements
|
|
629
|
+
]
|
|
630
|
+
|
|
631
|
+
def _create_merged_loras_inplace(self, lora_model: LoRAModel) -> None:
|
|
632
|
+
for module_name, new_module_names in self.packed_modules.items():
|
|
633
|
+
replacement_loras: List[Optional[LoRALayerWeights]] = []
|
|
634
|
+
replaced_module: Set[str] = set()
|
|
635
|
+
has_replacement = False
|
|
636
|
+
for r in new_module_names:
|
|
637
|
+
lora = self._get_lora_layer_weights(lora_model, r)
|
|
638
|
+
replacement_loras.append(lora)
|
|
639
|
+
if lora:
|
|
640
|
+
has_replacement = True
|
|
641
|
+
replaced_module.add(r)
|
|
642
|
+
if not has_replacement:
|
|
643
|
+
continue
|
|
644
|
+
for i in range(len(replacement_loras)):
|
|
645
|
+
if replacement_loras[i]:
|
|
646
|
+
continue
|
|
647
|
+
replacement_loras[i] = None
|
|
648
|
+
# HACK Temporary solution for the pool model.
|
|
649
|
+
if self.is_pooling_model and not lora_model.check_lora_name(
|
|
650
|
+
module_name):
|
|
651
|
+
replaced_module_name = module_name.replace("model.", "")
|
|
652
|
+
if lora_model.check_lora_name(module_name):
|
|
653
|
+
module_name = replaced_module_name
|
|
654
|
+
lora_model.loras[module_name] = PackedLoRALayerWeights.pack(
|
|
655
|
+
replacement_loras)
|
|
656
|
+
# Remove the modules that have been replaced.
|
|
657
|
+
for module in replaced_module:
|
|
658
|
+
lora_model.loras.pop(module, None)
|
|
659
|
+
|
|
660
|
+
def _get_lora_layer_weights(
|
|
661
|
+
self, lora_model: LoRAModel,
|
|
662
|
+
module_name: str) -> Optional[LoRALayerWeights]:
|
|
663
|
+
org_module_name = module_name
|
|
664
|
+
if self.is_pooling_model and not lora_model.check_lora_name(
|
|
665
|
+
module_name):
|
|
666
|
+
# If it's a pool model, and the layer name is not found,
|
|
667
|
+
# remove the prefix 'model.' and search again.
|
|
668
|
+
module_name = module_name.replace("model.", "")
|
|
669
|
+
if lora_model.check_lora_name(module_name):
|
|
670
|
+
org_module_name = module_name
|
|
671
|
+
logger.info_once(
|
|
672
|
+
"For the pool model, successfully loaded the LoRA weights "
|
|
673
|
+
"after removing the prefix 'model.'.")
|
|
674
|
+
return lora_model.get_lora(org_module_name)
|
|
675
|
+
|
|
676
|
+
def deactivate_adapter(self, adapter_id: int) -> bool:
|
|
677
|
+
return deactivate_adapter(adapter_id, self._active_adapters,
|
|
678
|
+
self._deactivate_adapter)
|
|
679
|
+
|
|
680
|
+
def add_adapter(self, adapter: LoRAModel) -> bool:
|
|
681
|
+
logger.debug(
|
|
682
|
+
"Adding lora. Model id: %d, "
|
|
683
|
+
"int id: %d, "
|
|
684
|
+
"scaling factor: %s", adapter.id, adapter.id,
|
|
685
|
+
adapter.scaling_factor)
|
|
686
|
+
return add_adapter(adapter, self._registered_adapters, self.capacity,
|
|
687
|
+
self._add_adapter)
|
|
688
|
+
|
|
689
|
+
def set_adapter_mapping(self, mapping: LoRAMapping) -> None:
|
|
690
|
+
self._last_mapping = set_adapter_mapping(mapping, self._last_mapping,
|
|
691
|
+
self._set_adapter_mapping)
|
|
692
|
+
|
|
693
|
+
def remove_adapter(self, adapter_id: int) -> bool:
|
|
694
|
+
return remove_adapter(adapter_id, self._registered_adapters,
|
|
695
|
+
self.deactivate_adapter)
|
|
696
|
+
|
|
697
|
+
def list_adapters(self) -> Dict[int, Any]:
|
|
698
|
+
return list_adapters(self._registered_adapters)
|
|
699
|
+
|
|
700
|
+
def get_adapter(self, adapter_id: int) -> Optional[Any]:
|
|
701
|
+
return get_adapter(adapter_id, self._registered_adapters)
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
class LoRALRUCache(AdapterLRUCache[LoRAModel]):
|
|
705
|
+
|
|
706
|
+
def __init__(self, capacity: int, deactivate_lora_fn: Callable[[int],
|
|
707
|
+
bool]):
|
|
708
|
+
super().__init__(capacity, deactivate_lora_fn)
|
|
709
|
+
|
|
710
|
+
|
|
711
|
+
class LRUCacheLoRAModelManager(LoRAModelManager):
|
|
712
|
+
"""A model manager that manages multiple LoRAs with LRU cache."""
|
|
713
|
+
|
|
714
|
+
def __init__(self, model: nn.Module, max_num_seqs: int,
|
|
715
|
+
max_num_batched_tokens: int, vocab_size: int,
|
|
716
|
+
lora_config: LoRAConfig, device: torch.device):
|
|
717
|
+
super().__init__(model, max_num_seqs, max_num_batched_tokens,
|
|
718
|
+
vocab_size, lora_config, device)
|
|
719
|
+
self._registered_adapters: LoRALRUCache = LoRALRUCache(
|
|
720
|
+
self.capacity, self.deactivate_adapter)
|
|
721
|
+
self._active_adapters: LoRALRUCache = LoRALRUCache(
|
|
722
|
+
self.lora_slots, self._deactivate_adapter)
|
|
723
|
+
|
|
724
|
+
def list_adapters(self) -> Dict[int, LoRAModel]:
|
|
725
|
+
"""List all registered LoRAModels."""
|
|
726
|
+
return dict(self._registered_adapters.cache)
|
|
727
|
+
|
|
728
|
+
def add_adapter(self, lora: LoRAModel) -> bool:
|
|
729
|
+
"""Add a LoRAModel to the manager."""
|
|
730
|
+
logger.debug(
|
|
731
|
+
"Adding lora. Model id: %d, "
|
|
732
|
+
"int id: %d, "
|
|
733
|
+
"scaling factor: %s", lora.id, lora.id, lora.scaling_factor)
|
|
734
|
+
if lora.id not in self._registered_adapters:
|
|
735
|
+
self._add_adapter(lora)
|
|
736
|
+
was_added = True
|
|
737
|
+
else:
|
|
738
|
+
# We always touch to update the LRU cache order
|
|
739
|
+
self._registered_adapters.touch(lora.id)
|
|
740
|
+
was_added = False
|
|
741
|
+
return was_added
|
|
742
|
+
|
|
743
|
+
def activate_adapter(
|
|
744
|
+
self,
|
|
745
|
+
lora_id: int,
|
|
746
|
+
) -> bool:
|
|
747
|
+
if lora_id not in self._active_adapters and len(
|
|
748
|
+
self._active_adapters) >= self.lora_slots:
|
|
749
|
+
self._active_adapters.remove_oldest()
|
|
750
|
+
result = super().activate_adapter(lora_id)
|
|
751
|
+
# We always touch to update the LRU cache order
|
|
752
|
+
self._active_adapters.touch(lora_id)
|
|
753
|
+
return result
|
|
754
|
+
|
|
755
|
+
def remove_oldest_adapter(self) -> bool:
|
|
756
|
+
if len(self._registered_adapters) > 0:
|
|
757
|
+
self._registered_adapters.remove_oldest()
|
|
758
|
+
return True
|
|
759
|
+
return False
|
|
760
|
+
|
|
761
|
+
def pin_adapter(self, lora_id: int) -> bool:
|
|
762
|
+
"""Pin a LoRAModel in the manager cache."""
|
|
763
|
+
self._pin_lora_in_cpu_cache(lora_id)
|
|
764
|
+
self._pin_lora_in_gpu_cache(lora_id)
|
|
765
|
+
return True
|
|
766
|
+
|
|
767
|
+
def _pin_lora_in_cpu_cache(self, lora_id: int):
|
|
768
|
+
try:
|
|
769
|
+
self._registered_adapters.pin(lora_id)
|
|
770
|
+
except ValueError as err:
|
|
771
|
+
raise ValueError("Pinning failed. "
|
|
772
|
+
f"LoRA {lora_id} is not registered.") from err
|
|
773
|
+
|
|
774
|
+
def _pin_lora_in_gpu_cache(self, lora_id: int):
|
|
775
|
+
if lora_id not in self._active_adapters:
|
|
776
|
+
# move lora to gpu if not already active
|
|
777
|
+
self.activate_adapter(lora_id)
|
|
778
|
+
|
|
779
|
+
self._active_adapters.pin(lora_id)
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
def create_lora_manager(
|
|
783
|
+
model: nn.Module,
|
|
784
|
+
max_num_seqs: int,
|
|
785
|
+
max_num_batched_tokens: int,
|
|
786
|
+
vocab_size: int,
|
|
787
|
+
lora_config: LoRAConfig,
|
|
788
|
+
device: torch.device,
|
|
789
|
+
lora_manager_cls: Type[LoRAModelManager] = LoRAModelManager,
|
|
790
|
+
**kwargs) -> LoRAModelManager:
|
|
791
|
+
"""Create a LoRA adapter for a given model."""
|
|
792
|
+
if not hasattr(model, "packed_modules_mapping"):
|
|
793
|
+
raise ValueError(f"Model {type(model)} is not supported for LoRA.")
|
|
794
|
+
lora_manager = lora_manager_cls(
|
|
795
|
+
model=model,
|
|
796
|
+
max_num_seqs=max_num_seqs,
|
|
797
|
+
max_num_batched_tokens=max_num_batched_tokens,
|
|
798
|
+
vocab_size=vocab_size,
|
|
799
|
+
lora_config=lora_config,
|
|
800
|
+
device=device,
|
|
801
|
+
**kwargs)
|
|
802
|
+
return lora_manager
|