vllm-cpu-avx512bf16 0.9.0.post2__cp310-cp310-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +170 -0
- vllm/_custom_ops.py +1742 -0
- vllm/_ipex_ops.py +243 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +15 -0
- vllm/adapter_commons/models.py +105 -0
- vllm/adapter_commons/request.py +25 -0
- vllm/adapter_commons/utils.py +92 -0
- vllm/adapter_commons/worker_manager.py +38 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +44 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +33 -0
- vllm/assets/video.py +114 -0
- vllm/attention/__init__.py +19 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +306 -0
- vllm/attention/backends/blocksparse_attn.py +457 -0
- vllm/attention/backends/cpu_mla.py +305 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1494 -0
- vllm/attention/backends/flash_attn.py +999 -0
- vllm/attention/backends/flashinfer.py +1100 -0
- vllm/attention/backends/flashmla.py +242 -0
- vllm/attention/backends/hpu_attn.py +309 -0
- vllm/attention/backends/ipex_attn.py +394 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1381 -0
- vllm/attention/backends/pallas.py +347 -0
- vllm/attention/backends/placeholder_attn.py +399 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +970 -0
- vllm/attention/backends/torch_sdpa.py +691 -0
- vllm/attention/backends/triton_mla.py +113 -0
- vllm/attention/backends/utils.py +609 -0
- vllm/attention/backends/xformers.py +798 -0
- vllm/attention/layer.py +452 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +432 -0
- vllm/attention/ops/blocksparse_attention/interface.py +238 -0
- vllm/attention/ops/blocksparse_attention/utils.py +245 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +367 -0
- vllm/attention/ops/flashmla.py +115 -0
- vllm/attention/ops/hpu_paged_attn.py +87 -0
- vllm/attention/ops/ipex_attn.py +194 -0
- vllm/attention/ops/merge_attn_states.py +42 -0
- vllm/attention/ops/nki_flash_attn.py +905 -0
- vllm/attention/ops/paged_attn.py +255 -0
- vllm/attention/ops/prefix_prefill.py +901 -0
- vllm/attention/ops/rocm_aiter_mla.py +99 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +101 -0
- vllm/attention/ops/triton_decode_attention.py +673 -0
- vllm/attention/ops/triton_flash_attention.py +1374 -0
- vllm/attention/ops/triton_merge_attn_states.py +96 -0
- vllm/attention/ops/triton_unified_attention.py +337 -0
- vllm/attention/selector.py +186 -0
- vllm/attention/utils/fa_utils.py +54 -0
- vllm/beam_search.py +82 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +921 -0
- vllm/benchmarks/endpoint_request_func.py +160 -0
- vllm/benchmarks/latency.py +184 -0
- vllm/benchmarks/serve.py +925 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +69 -0
- vllm/collect_env.py +818 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +88 -0
- vllm/compilation/backends.py +560 -0
- vllm/compilation/base_piecewise_backend.py +71 -0
- vllm/compilation/collective_fusion.py +126 -0
- vllm/compilation/compiler_interface.py +533 -0
- vllm/compilation/counter.py +33 -0
- vllm/compilation/cuda_piecewise_backend.py +213 -0
- vllm/compilation/decorators.py +249 -0
- vllm/compilation/fix_functionalization.py +190 -0
- vllm/compilation/fusion.py +617 -0
- vllm/compilation/fx_utils.py +61 -0
- vllm/compilation/inductor_pass.py +114 -0
- vllm/compilation/monitor.py +38 -0
- vllm/compilation/multi_output_match.py +108 -0
- vllm/compilation/noop_elimination.py +136 -0
- vllm/compilation/pass_manager.py +77 -0
- vllm/compilation/sequence_parallelism.py +267 -0
- vllm/compilation/torch25_custom_graph_pass.py +41 -0
- vllm/compilation/vllm_inductor_pass.py +66 -0
- vllm/compilation/wrapper.py +129 -0
- vllm/config.py +4600 -0
- vllm/connections.py +173 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +398 -0
- vllm/core/block/common.py +370 -0
- vllm/core/block/cpu_gpu_block_allocator.py +440 -0
- vllm/core/block/interfaces.py +318 -0
- vllm/core/block/naive_block.py +465 -0
- vllm/core/block/prefix_caching_block.py +1134 -0
- vllm/core/block/utils.py +27 -0
- vllm/core/block_manager.py +520 -0
- vllm/core/evictor.py +156 -0
- vllm/core/interfaces.py +134 -0
- vllm/core/placeholder_block_space_manager.py +99 -0
- vllm/core/scheduler.py +2092 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +280 -0
- vllm/distributed/__init__.py +5 -0
- vllm/distributed/communication_op.py +40 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +126 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +144 -0
- vllm/distributed/device_communicators/cuda_communicator.py +167 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +179 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +303 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +258 -0
- vllm/distributed/device_communicators/hpu_communicator.py +45 -0
- vllm/distributed/device_communicators/neuron_communicator.py +19 -0
- vllm/distributed/device_communicators/pynccl.py +217 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +340 -0
- vllm/distributed/device_communicators/shm_broadcast.py +541 -0
- vllm/distributed/device_communicators/tpu_communicator.py +102 -0
- vllm/distributed/device_communicators/xpu_communicator.py +54 -0
- vllm/distributed/kv_events.py +296 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +11 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +127 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +126 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +98 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +202 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +328 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +91 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +5 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +259 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +133 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +189 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +851 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +383 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +76 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +174 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +160 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +236 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +279 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +70 -0
- vllm/distributed/parallel_state.py +1294 -0
- vllm/distributed/utils.py +520 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1649 -0
- vllm/engine/async_llm_engine.py +1274 -0
- vllm/engine/async_timeout.py +191 -0
- vllm/engine/llm_engine.py +2153 -0
- vllm/engine/metrics.py +717 -0
- vllm/engine/metrics_types.py +96 -0
- vllm/engine/multiprocessing/__init__.py +188 -0
- vllm/engine/multiprocessing/client.py +755 -0
- vllm/engine/multiprocessing/engine.py +459 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +74 -0
- vllm/engine/output_processor/multi_step.py +215 -0
- vllm/engine/output_processor/single_step.py +144 -0
- vllm/engine/output_processor/stop_checker.py +130 -0
- vllm/engine/output_processor/util.py +27 -0
- vllm/engine/protocol.py +310 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +177 -0
- vllm/entrypoints/chat_utils.py +1298 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +38 -0
- vllm/entrypoints/cli/benchmark/latency.py +29 -0
- vllm/entrypoints/cli/benchmark/main.py +53 -0
- vllm/entrypoints/cli/benchmark/serve.py +29 -0
- vllm/entrypoints/cli/benchmark/throughput.py +29 -0
- vllm/entrypoints/cli/collect_env.py +34 -0
- vllm/entrypoints/cli/main.py +62 -0
- vllm/entrypoints/cli/openai.py +204 -0
- vllm/entrypoints/cli/serve.py +141 -0
- vllm/entrypoints/cli/types.py +24 -0
- vllm/entrypoints/launcher.py +146 -0
- vllm/entrypoints/llm.py +1503 -0
- vllm/entrypoints/logger.py +49 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1376 -0
- vllm/entrypoints/openai/cli_args.py +306 -0
- vllm/entrypoints/openai/logits_processors.py +89 -0
- vllm/entrypoints/openai/protocol.py +1890 -0
- vllm/entrypoints/openai/run_batch.py +439 -0
- vllm/entrypoints/openai/serving_chat.py +1192 -0
- vllm/entrypoints/openai/serving_classification.py +159 -0
- vllm/entrypoints/openai/serving_completion.py +590 -0
- vllm/entrypoints/openai/serving_embedding.py +200 -0
- vllm/entrypoints/openai/serving_engine.py +985 -0
- vllm/entrypoints/openai/serving_models.py +314 -0
- vllm/entrypoints/openai/serving_pooling.py +231 -0
- vllm/entrypoints/openai/serving_score.py +432 -0
- vllm/entrypoints/openai/serving_tokenization.py +151 -0
- vllm/entrypoints/openai/serving_transcription.py +421 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +22 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +163 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +258 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +236 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +215 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +307 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +302 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +266 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +342 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +111 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +296 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +123 -0
- vllm/entrypoints/score_utils.py +49 -0
- vllm/entrypoints/ssl.py +74 -0
- vllm/entrypoints/utils.py +219 -0
- vllm/env_override.py +34 -0
- vllm/envs.py +896 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +400 -0
- vllm/executor/mp_distributed_executor.py +243 -0
- vllm/executor/msgspec_utils.py +29 -0
- vllm/executor/multiproc_worker_utils.py +312 -0
- vllm/executor/ray_distributed_executor.py +700 -0
- vllm/executor/ray_utils.py +398 -0
- vllm/executor/uniproc_executor.py +138 -0
- vllm/forward_context.py +147 -0
- vllm/inputs/__init__.py +40 -0
- vllm/inputs/data.py +330 -0
- vllm/inputs/parse.py +150 -0
- vllm/inputs/preprocess.py +908 -0
- vllm/inputs/registry.py +214 -0
- vllm/jsontree.py +79 -0
- vllm/logger.py +211 -0
- vllm/logging_utils/__init__.py +7 -0
- vllm/logging_utils/dump_input.py +84 -0
- vllm/logging_utils/formatter.py +17 -0
- vllm/logits_process.py +118 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +354 -0
- vllm/lora/layers.py +1284 -0
- vllm/lora/lora.py +198 -0
- vllm/lora/models.py +817 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +15 -0
- vllm/lora/ops/torch_ops/lora_ops.py +115 -0
- vllm/lora/ops/triton_ops/__init__.py +11 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +242 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +289 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +147 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +243 -0
- vllm/lora/ops/triton_ops/utils.py +119 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +106 -0
- vllm/lora/ops/xla_ops/pallas.py +133 -0
- vllm/lora/peft_helper.py +135 -0
- vllm/lora/punica_wrapper/__init__.py +9 -0
- vllm/lora/punica_wrapper/punica_base.py +484 -0
- vllm/lora/punica_wrapper/punica_cpu.py +348 -0
- vllm/lora/punica_wrapper/punica_gpu.py +289 -0
- vllm/lora/punica_wrapper/punica_hpu.py +144 -0
- vllm/lora/punica_wrapper/punica_selector.py +19 -0
- vllm/lora/punica_wrapper/punica_tpu.py +325 -0
- vllm/lora/punica_wrapper/utils.py +163 -0
- vllm/lora/request.py +98 -0
- vllm/lora/resolver.py +84 -0
- vllm/lora/utils.py +239 -0
- vllm/lora/worker_manager.py +253 -0
- vllm/model_executor/__init__.py +15 -0
- vllm/model_executor/custom_op.py +151 -0
- vllm/model_executor/guided_decoding/__init__.py +180 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +62 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +103 -0
- vllm/model_executor/guided_decoding/guided_fields.py +42 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +66 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +154 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +283 -0
- vllm/model_executor/guided_decoding/utils.py +241 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +425 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +368 -0
- vllm/model_executor/layers/fused_moe/__init__.py +53 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +382 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +227 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +755 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +231 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1722 -0
- vllm/model_executor/layers/fused_moe/layer.py +1366 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +364 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +242 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +188 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +59 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +146 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +60 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +372 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +112 -0
- vllm/model_executor/layers/fused_moe/utils.py +97 -0
- vllm/model_executor/layers/layernorm.py +287 -0
- vllm/model_executor/layers/lightning_attn.py +651 -0
- vllm/model_executor/layers/linear.py +1523 -0
- vllm/model_executor/layers/logits_processor.py +196 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +124 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +244 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +615 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +104 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +413 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +261 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +588 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +750 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +231 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +205 -0
- vllm/model_executor/layers/pooler.py +343 -0
- vllm/model_executor/layers/quantization/__init__.py +156 -0
- vllm/model_executor/layers/quantization/aqlm.py +375 -0
- vllm/model_executor/layers/quantization/auto_round.py +308 -0
- vllm/model_executor/layers/quantization/awq.py +185 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +518 -0
- vllm/model_executor/layers/quantization/awq_triton.py +319 -0
- vllm/model_executor/layers/quantization/base_config.py +150 -0
- vllm/model_executor/layers/quantization/bitblas.py +460 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +397 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +644 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1252 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +21 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +357 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +54 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +159 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +92 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +120 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +149 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +110 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +205 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +214 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +194 -0
- vllm/model_executor/layers/quantization/experts_int8.py +195 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +171 -0
- vllm/model_executor/layers/quantization/fp8.py +876 -0
- vllm/model_executor/layers/quantization/gguf.py +564 -0
- vllm/model_executor/layers/quantization/gptq.py +277 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +444 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +647 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +296 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +331 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +249 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +89 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +82 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +299 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +142 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +130 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +66 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +86 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +119 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +136 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +40 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +104 -0
- vllm/model_executor/layers/quantization/kv_cache.py +138 -0
- vllm/model_executor/layers/quantization/marlin.py +260 -0
- vllm/model_executor/layers/quantization/modelopt.py +734 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +448 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +68 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +126 -0
- vllm/model_executor/layers/quantization/qqq.py +274 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +440 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +236 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +8 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +54 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +125 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +145 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +121 -0
- vllm/model_executor/layers/quantization/quark/utils.py +104 -0
- vllm/model_executor/layers/quantization/schema.py +85 -0
- vllm/model_executor/layers/quantization/torchao.py +143 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +120 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +5 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +207 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +611 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +94 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +484 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +39 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +32 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +475 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +277 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +324 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +164 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +463 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +125 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +44 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +61 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +572 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +404 -0
- vllm/model_executor/layers/rejection_sampler.py +405 -0
- vllm/model_executor/layers/resampler.py +269 -0
- vllm/model_executor/layers/rotary_embedding.py +1861 -0
- vllm/model_executor/layers/sampler.py +1203 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +258 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +165 -0
- vllm/model_executor/layers/utils.py +99 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +486 -0
- vllm/model_executor/model_loader/__init__.py +75 -0
- vllm/model_executor/model_loader/base_loader.py +24 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +582 -0
- vllm/model_executor/model_loader/default_loader.py +295 -0
- vllm/model_executor/model_loader/dummy_loader.py +37 -0
- vllm/model_executor/model_loader/gguf_loader.py +113 -0
- vllm/model_executor/model_loader/neuron.py +475 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +622 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +120 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +211 -0
- vllm/model_executor/model_loader/tensorizer.py +632 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +122 -0
- vllm/model_executor/model_loader/utils.py +301 -0
- vllm/model_executor/model_loader/weight_utils.py +781 -0
- vllm/model_executor/models/__init__.py +27 -0
- vllm/model_executor/models/adapters.py +247 -0
- vllm/model_executor/models/aimv2.py +199 -0
- vllm/model_executor/models/arctic.py +558 -0
- vllm/model_executor/models/aria.py +656 -0
- vllm/model_executor/models/aya_vision.py +461 -0
- vllm/model_executor/models/baichuan.py +473 -0
- vllm/model_executor/models/bamba.py +542 -0
- vllm/model_executor/models/bart.py +937 -0
- vllm/model_executor/models/bert.py +517 -0
- vllm/model_executor/models/bert_with_rope.py +714 -0
- vllm/model_executor/models/blip.py +338 -0
- vllm/model_executor/models/blip2.py +717 -0
- vllm/model_executor/models/bloom.py +372 -0
- vllm/model_executor/models/chameleon.py +1135 -0
- vllm/model_executor/models/chatglm.py +477 -0
- vllm/model_executor/models/clip.py +411 -0
- vllm/model_executor/models/commandr.py +471 -0
- vllm/model_executor/models/constant_size_cache.py +136 -0
- vllm/model_executor/models/dbrx.py +471 -0
- vllm/model_executor/models/deepseek.py +485 -0
- vllm/model_executor/models/deepseek_mtp.py +268 -0
- vllm/model_executor/models/deepseek_v2.py +842 -0
- vllm/model_executor/models/deepseek_vl2.py +647 -0
- vllm/model_executor/models/eagle.py +259 -0
- vllm/model_executor/models/exaone.py +550 -0
- vllm/model_executor/models/fairseq2_llama.py +153 -0
- vllm/model_executor/models/falcon.py +509 -0
- vllm/model_executor/models/falcon_h1.py +684 -0
- vllm/model_executor/models/florence2.py +1102 -0
- vllm/model_executor/models/fuyu.py +388 -0
- vllm/model_executor/models/gemma.py +424 -0
- vllm/model_executor/models/gemma2.py +424 -0
- vllm/model_executor/models/gemma3.py +532 -0
- vllm/model_executor/models/gemma3_mm.py +708 -0
- vllm/model_executor/models/glm.py +22 -0
- vllm/model_executor/models/glm4.py +304 -0
- vllm/model_executor/models/glm4v.py +647 -0
- vllm/model_executor/models/gpt2.py +327 -0
- vllm/model_executor/models/gpt_bigcode.py +334 -0
- vllm/model_executor/models/gpt_j.py +338 -0
- vllm/model_executor/models/gpt_neox.py +331 -0
- vllm/model_executor/models/granite.py +492 -0
- vllm/model_executor/models/granite_speech.py +778 -0
- vllm/model_executor/models/granitemoe.py +436 -0
- vllm/model_executor/models/granitemoehybrid.py +585 -0
- vllm/model_executor/models/granitemoeshared.py +340 -0
- vllm/model_executor/models/gritlm.py +223 -0
- vllm/model_executor/models/grok1.py +545 -0
- vllm/model_executor/models/h2ovl.py +545 -0
- vllm/model_executor/models/idefics2_vision_model.py +388 -0
- vllm/model_executor/models/idefics3.py +767 -0
- vllm/model_executor/models/interfaces.py +571 -0
- vllm/model_executor/models/interfaces_base.py +163 -0
- vllm/model_executor/models/intern_vit.py +475 -0
- vllm/model_executor/models/internlm2.py +454 -0
- vllm/model_executor/models/internlm2_ve.py +146 -0
- vllm/model_executor/models/internvl.py +1405 -0
- vllm/model_executor/models/jais.py +372 -0
- vllm/model_executor/models/jamba.py +591 -0
- vllm/model_executor/models/kimi_vl.py +576 -0
- vllm/model_executor/models/llama.py +643 -0
- vllm/model_executor/models/llama4.py +531 -0
- vllm/model_executor/models/llama_eagle.py +166 -0
- vllm/model_executor/models/llama_eagle3.py +257 -0
- vllm/model_executor/models/llava.py +865 -0
- vllm/model_executor/models/llava_next.py +585 -0
- vllm/model_executor/models/llava_next_video.py +470 -0
- vllm/model_executor/models/llava_onevision.py +955 -0
- vllm/model_executor/models/mamba.py +272 -0
- vllm/model_executor/models/mamba2.py +302 -0
- vllm/model_executor/models/mamba_cache.py +75 -0
- vllm/model_executor/models/medusa.py +218 -0
- vllm/model_executor/models/mimo.py +191 -0
- vllm/model_executor/models/mimo_mtp.py +284 -0
- vllm/model_executor/models/minicpm.py +590 -0
- vllm/model_executor/models/minicpm3.py +229 -0
- vllm/model_executor/models/minicpmo.py +758 -0
- vllm/model_executor/models/minicpmv.py +1286 -0
- vllm/model_executor/models/minimax_cache.py +35 -0
- vllm/model_executor/models/minimax_text_01.py +1303 -0
- vllm/model_executor/models/minimax_vl_01.py +363 -0
- vllm/model_executor/models/mistral3.py +603 -0
- vllm/model_executor/models/mixtral.py +487 -0
- vllm/model_executor/models/mixtral_quant.py +452 -0
- vllm/model_executor/models/mllama.py +1623 -0
- vllm/model_executor/models/mllama4.py +838 -0
- vllm/model_executor/models/mlp_speculator.py +205 -0
- vllm/model_executor/models/modernbert.py +329 -0
- vllm/model_executor/models/module_mapping.py +71 -0
- vllm/model_executor/models/molmo.py +1567 -0
- vllm/model_executor/models/moonvit.py +629 -0
- vllm/model_executor/models/mpt.py +330 -0
- vllm/model_executor/models/nemotron.py +507 -0
- vllm/model_executor/models/nemotron_nas.py +483 -0
- vllm/model_executor/models/nvlm_d.py +215 -0
- vllm/model_executor/models/olmo.py +388 -0
- vllm/model_executor/models/olmo2.py +413 -0
- vllm/model_executor/models/olmoe.py +446 -0
- vllm/model_executor/models/opt.py +411 -0
- vllm/model_executor/models/orion.py +348 -0
- vllm/model_executor/models/ovis.py +554 -0
- vllm/model_executor/models/paligemma.py +397 -0
- vllm/model_executor/models/persimmon.py +343 -0
- vllm/model_executor/models/phi.py +355 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3_small.py +464 -0
- vllm/model_executor/models/phi3v.py +722 -0
- vllm/model_executor/models/phi4mm.py +1245 -0
- vllm/model_executor/models/phi4mm_audio.py +1232 -0
- vllm/model_executor/models/phi4mm_utils.py +1883 -0
- vllm/model_executor/models/phimoe.py +664 -0
- vllm/model_executor/models/pixtral.py +1315 -0
- vllm/model_executor/models/plamo2.py +737 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +231 -0
- vllm/model_executor/models/qwen.py +361 -0
- vllm/model_executor/models/qwen2.py +567 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +903 -0
- vllm/model_executor/models/qwen2_5_vl.py +1171 -0
- vllm/model_executor/models/qwen2_audio.py +409 -0
- vllm/model_executor/models/qwen2_moe.py +539 -0
- vllm/model_executor/models/qwen2_rm.py +131 -0
- vllm/model_executor/models/qwen2_vl.py +1410 -0
- vllm/model_executor/models/qwen3.py +320 -0
- vllm/model_executor/models/qwen3_moe.py +534 -0
- vllm/model_executor/models/qwen_vl.py +784 -0
- vllm/model_executor/models/registry.py +618 -0
- vllm/model_executor/models/roberta.py +273 -0
- vllm/model_executor/models/siglip.py +523 -0
- vllm/model_executor/models/skyworkr1v.py +950 -0
- vllm/model_executor/models/smolvlm.py +51 -0
- vllm/model_executor/models/solar.py +505 -0
- vllm/model_executor/models/stablelm.py +342 -0
- vllm/model_executor/models/starcoder2.py +355 -0
- vllm/model_executor/models/telechat2.py +139 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/transformers.py +507 -0
- vllm/model_executor/models/ultravox.py +655 -0
- vllm/model_executor/models/utils.py +730 -0
- vllm/model_executor/models/vision.py +146 -0
- vllm/model_executor/models/whisper.py +746 -0
- vllm/model_executor/models/zamba2.py +1008 -0
- vllm/model_executor/parameter.py +458 -0
- vllm/model_executor/pooling_metadata.py +71 -0
- vllm/model_executor/sampling_metadata.py +596 -0
- vllm/model_executor/utils.py +53 -0
- vllm/multimodal/__init__.py +32 -0
- vllm/multimodal/audio.py +105 -0
- vllm/multimodal/base.py +218 -0
- vllm/multimodal/hasher.py +117 -0
- vllm/multimodal/image.py +96 -0
- vllm/multimodal/inputs.py +872 -0
- vllm/multimodal/parse.py +460 -0
- vllm/multimodal/processing.py +1894 -0
- vllm/multimodal/profiling.py +273 -0
- vllm/multimodal/registry.py +330 -0
- vllm/multimodal/utils.py +392 -0
- vllm/multimodal/video.py +197 -0
- vllm/outputs.py +525 -0
- vllm/platforms/__init__.py +290 -0
- vllm/platforms/cpu.py +205 -0
- vllm/platforms/cuda.py +461 -0
- vllm/platforms/hpu.py +105 -0
- vllm/platforms/interface.py +492 -0
- vllm/platforms/neuron.py +152 -0
- vllm/platforms/rocm.py +388 -0
- vllm/platforms/tpu.py +215 -0
- vllm/platforms/xpu.py +155 -0
- vllm/plugins/__init__.py +86 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +49 -0
- vllm/pooling_params.py +53 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +374 -0
- vllm/profiler/utils.py +147 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +82 -0
- vllm/prompt_adapter/models.py +357 -0
- vllm/prompt_adapter/request.py +36 -0
- vllm/prompt_adapter/utils.py +97 -0
- vllm/prompt_adapter/worker_manager.py +178 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +14 -0
- vllm/reasoning/abs_reasoning_parsers.py +191 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +172 -0
- vllm/reasoning/granite_reasoning_parser.py +362 -0
- vllm/reasoning/qwen3_reasoning_parser.py +150 -0
- vllm/sampling_params.py +590 -0
- vllm/scalar_type.py +346 -0
- vllm/scripts.py +14 -0
- vllm/sequence.py +1567 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +505 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +98 -0
- vllm/spec_decode/medusa_worker.py +137 -0
- vllm/spec_decode/metrics.py +212 -0
- vllm/spec_decode/mlp_speculator_worker.py +93 -0
- vllm/spec_decode/mqa_scorer.py +159 -0
- vllm/spec_decode/multi_step_worker.py +422 -0
- vllm/spec_decode/ngram_worker.py +195 -0
- vllm/spec_decode/proposer_worker_base.py +58 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +195 -0
- vllm/spec_decode/spec_decode_worker.py +1325 -0
- vllm/spec_decode/target_model_runner.py +44 -0
- vllm/spec_decode/top1_proposer.py +274 -0
- vllm/spec_decode/util.py +276 -0
- vllm/test_utils.py +129 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6139 -0
- vllm/tracing.py +130 -0
- vllm/transformers_utils/__init__.py +23 -0
- vllm/transformers_utils/chat_templates/__init__.py +4 -0
- vllm/transformers_utils/chat_templates/registry.py +59 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +835 -0
- vllm/transformers_utils/configs/__init__.py +58 -0
- vllm/transformers_utils/configs/arctic.py +206 -0
- vllm/transformers_utils/configs/chatglm.py +71 -0
- vllm/transformers_utils/configs/cohere2.py +194 -0
- vllm/transformers_utils/configs/dbrx.py +279 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +215 -0
- vllm/transformers_utils/configs/eagle.py +84 -0
- vllm/transformers_utils/configs/exaone.py +189 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/h2ovl.py +15 -0
- vllm/transformers_utils/configs/internvl.py +53 -0
- vllm/transformers_utils/configs/jais.py +237 -0
- vllm/transformers_utils/configs/kimi_vl.py +36 -0
- vllm/transformers_utils/configs/medusa.py +62 -0
- vllm/transformers_utils/configs/minimax_text_01.py +69 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +70 -0
- vllm/transformers_utils/configs/mllama.py +30 -0
- vllm/transformers_utils/configs/mlp_speculator.py +67 -0
- vllm/transformers_utils/configs/moonvit.py +32 -0
- vllm/transformers_utils/configs/mpt.py +179 -0
- vllm/transformers_utils/configs/nemotron.py +204 -0
- vllm/transformers_utils/configs/nvlm_d.py +14 -0
- vllm/transformers_utils/configs/ovis.py +183 -0
- vllm/transformers_utils/configs/skyworkr1v.py +53 -0
- vllm/transformers_utils/configs/solar.py +246 -0
- vllm/transformers_utils/configs/telechat2.py +63 -0
- vllm/transformers_utils/configs/ultravox.py +107 -0
- vllm/transformers_utils/detokenizer.py +167 -0
- vllm/transformers_utils/detokenizer_utils.py +188 -0
- vllm/transformers_utils/processor.py +220 -0
- vllm/transformers_utils/processors/__init__.py +7 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +362 -0
- vllm/transformers_utils/processors/ovis.py +419 -0
- vllm/transformers_utils/s3_utils.py +161 -0
- vllm/transformers_utils/tokenizer.py +301 -0
- vllm/transformers_utils/tokenizer_base.py +148 -0
- vllm/transformers_utils/tokenizer_group.py +119 -0
- vllm/transformers_utils/tokenizers/__init__.py +9 -0
- vllm/transformers_utils/tokenizers/mistral.py +490 -0
- vllm/transformers_utils/utils.py +98 -0
- vllm/triton_utils/__init__.py +13 -0
- vllm/triton_utils/importing.py +49 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +255 -0
- vllm/utils.py +2844 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/flash_attn.py +833 -0
- vllm/v1/attention/backends/flashinfer.py +639 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +926 -0
- vllm/v1/attention/backends/mla/flashmla.py +150 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +221 -0
- vllm/v1/attention/backends/mla/triton_mla.py +118 -0
- vllm/v1/attention/backends/pallas.py +235 -0
- vllm/v1/attention/backends/triton_attn.py +279 -0
- vllm/v1/attention/backends/utils.py +18 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +328 -0
- vllm/v1/core/encoder_cache_manager.py +149 -0
- vllm/v1/core/kv_cache_manager.py +372 -0
- vllm/v1/core/kv_cache_utils.py +748 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +143 -0
- vllm/v1/core/sched/output.py +153 -0
- vllm/v1/core/sched/scheduler.py +1015 -0
- vllm/v1/core/sched/utils.py +22 -0
- vllm/v1/core/single_type_kv_cache_manager.py +358 -0
- vllm/v1/engine/__init__.py +171 -0
- vllm/v1/engine/async_llm.py +546 -0
- vllm/v1/engine/core.py +801 -0
- vllm/v1/engine/core_client.py +1020 -0
- vllm/v1/engine/detokenizer.py +260 -0
- vllm/v1/engine/exceptions.py +16 -0
- vllm/v1/engine/llm_engine.py +316 -0
- vllm/v1/engine/logprobs.py +198 -0
- vllm/v1/engine/mm_input_cache.py +90 -0
- vllm/v1/engine/output_processor.py +427 -0
- vllm/v1/engine/parallel_sampling.py +132 -0
- vllm/v1/engine/processor.py +398 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +112 -0
- vllm/v1/executor/multiproc_executor.py +532 -0
- vllm/v1/executor/ray_distributed_executor.py +61 -0
- vllm/v1/kv_cache_interface.py +208 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +511 -0
- vllm/v1/metrics/ray_wrappers.py +120 -0
- vllm/v1/metrics/reader.py +245 -0
- vllm/v1/metrics/stats.py +238 -0
- vllm/v1/outputs.py +115 -0
- vllm/v1/request.py +191 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +43 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +38 -0
- vllm/v1/sample/ops/penalties.py +58 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +292 -0
- vllm/v1/sample/rejection_sampler.py +630 -0
- vllm/v1/sample/sampler.py +270 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +123 -0
- vllm/v1/sample/tpu/sampler.py +144 -0
- vllm/v1/serial_utils.py +313 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +424 -0
- vllm/v1/spec_decode/medusa.py +61 -0
- vllm/v1/spec_decode/metadata.py +61 -0
- vllm/v1/spec_decode/metrics.py +177 -0
- vllm/v1/spec_decode/ngram_proposer.py +131 -0
- vllm/v1/spec_decode/utils.py +45 -0
- vllm/v1/structured_output/__init__.py +215 -0
- vllm/v1/structured_output/backend_guidance.py +244 -0
- vllm/v1/structured_output/backend_types.py +133 -0
- vllm/v1/structured_output/backend_xgrammar.py +317 -0
- vllm/v1/structured_output/request.py +85 -0
- vllm/v1/structured_output/utils.py +174 -0
- vllm/v1/utils.py +294 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +139 -0
- vllm/v1/worker/gpu_input_batch.py +680 -0
- vllm/v1/worker/gpu_model_runner.py +2084 -0
- vllm/v1/worker/gpu_worker.py +373 -0
- vllm/v1/worker/lora_model_runner_mixin.py +145 -0
- vllm/v1/worker/tpu_model_runner.py +1510 -0
- vllm/v1/worker/tpu_worker.py +276 -0
- vllm/v1/worker/utils.py +74 -0
- vllm/v1/worker/worker_base.py +64 -0
- vllm/version.py +40 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +144 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +400 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2319 -0
- vllm/worker/hpu_worker.py +483 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +281 -0
- vllm/worker/multi_step_hpu_worker.py +122 -0
- vllm/worker/multi_step_model_runner.py +910 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +107 -0
- vllm/worker/multi_step_worker.py +196 -0
- vllm/worker/neuron_model_runner.py +418 -0
- vllm/worker/neuron_worker.py +158 -0
- vllm/worker/neuronx_distributed_model_runner.py +136 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +908 -0
- vllm/worker/tpu_worker.py +336 -0
- vllm/worker/utils.py +52 -0
- vllm/worker/worker.py +574 -0
- vllm/worker/worker_base.py +644 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +185 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/METADATA +335 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/RECORD +1175 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/WHEEL +5 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu_avx512bf16-0.9.0.post2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from vllm.sequence import (VLLM_INVALID_TOKEN_ID, Logprob, SamplingParams,
|
|
6
|
+
Sequence, SequenceGroup)
|
|
7
|
+
|
|
8
|
+
from .detokenizer_utils import (convert_prompt_ids_to_tokens,
|
|
9
|
+
detokenize_incrementally)
|
|
10
|
+
from .tokenizer import AnyTokenizer
|
|
11
|
+
from .tokenizer_group import TokenizerGroup
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Detokenizer:
|
|
15
|
+
"""Provides methods to decode the output of a model into text."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, tokenizer_group: TokenizerGroup):
|
|
18
|
+
self.tokenizer_group = tokenizer_group
|
|
19
|
+
|
|
20
|
+
def get_tokenizer_for_seq(self, sequence: Sequence) -> AnyTokenizer:
|
|
21
|
+
"""Returns the HF tokenizer to use for a given sequence."""
|
|
22
|
+
return self.tokenizer_group.get_lora_tokenizer(sequence.lora_request)
|
|
23
|
+
|
|
24
|
+
def decode_prompt_logprobs_inplace(self, seq_group: SequenceGroup,
|
|
25
|
+
prompt_logprobs: list[Optional[dict[
|
|
26
|
+
int, Logprob]]],
|
|
27
|
+
position_offset: int) -> None:
|
|
28
|
+
"""Decodes the logprobs for the prompt of a sequence group.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
seq_group: The sequence group to decode.
|
|
32
|
+
prompt_logprobs: The logprobs to decode.
|
|
33
|
+
position_offset: Offset of the first index of the logprobs
|
|
34
|
+
relative to the start of the sequence (for chunked prefill).
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
The prompt logprobs with the decoded tokens.
|
|
38
|
+
"""
|
|
39
|
+
prms = seq_group.sampling_params
|
|
40
|
+
assert prms is not None
|
|
41
|
+
|
|
42
|
+
# We can pick any sequence for the prompt.
|
|
43
|
+
seq = seq_group.get_seqs()[0]
|
|
44
|
+
# Only prompt, without the generated token.
|
|
45
|
+
all_token_ids = seq.get_token_ids()
|
|
46
|
+
prompt_token_ids = all_token_ids[:-1]
|
|
47
|
+
tokenizer = self.get_tokenizer_for_seq(seq)
|
|
48
|
+
prefix_offset = 0
|
|
49
|
+
read_offset = 0
|
|
50
|
+
next_iter_prefix_offset = 0
|
|
51
|
+
next_iter_read_offset = 0
|
|
52
|
+
next_iter_tokens: list[str] = []
|
|
53
|
+
prev_tokens = None
|
|
54
|
+
|
|
55
|
+
for token_position_in_logprob, prompt_logprobs_for_token in enumerate(
|
|
56
|
+
prompt_logprobs):
|
|
57
|
+
|
|
58
|
+
# Absolute token position equals the index in the logprobs
|
|
59
|
+
# list plus the offset of the entire logprobs list relative
|
|
60
|
+
# to the start of the sequence.
|
|
61
|
+
token_position = token_position_in_logprob + position_offset
|
|
62
|
+
if not prompt_logprobs_for_token:
|
|
63
|
+
continue
|
|
64
|
+
for token_id, sample_logprob in prompt_logprobs_for_token.items():
|
|
65
|
+
if (sample_logprob.decoded_token is None
|
|
66
|
+
and token_id != VLLM_INVALID_TOKEN_ID):
|
|
67
|
+
prompt_token_ids_with_token = (
|
|
68
|
+
prompt_token_ids[:token_position] + [token_id])
|
|
69
|
+
(new_tokens, new_text, new_prefix_offset,
|
|
70
|
+
new_read_offset) = detokenize_incrementally(
|
|
71
|
+
tokenizer=tokenizer,
|
|
72
|
+
all_input_ids=prompt_token_ids_with_token,
|
|
73
|
+
prev_tokens=prev_tokens,
|
|
74
|
+
prefix_offset=prefix_offset,
|
|
75
|
+
read_offset=read_offset,
|
|
76
|
+
skip_special_tokens=prms.skip_special_tokens,
|
|
77
|
+
spaces_between_special_tokens=prms.
|
|
78
|
+
spaces_between_special_tokens,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
sample_logprob.decoded_token = new_text
|
|
82
|
+
|
|
83
|
+
# Use the offsets & prev tokens corresponding to
|
|
84
|
+
# real tokens to ensure detokenization is consistent
|
|
85
|
+
# actual with prompt.
|
|
86
|
+
if token_id == all_token_ids[token_position]:
|
|
87
|
+
next_iter_prefix_offset = new_prefix_offset
|
|
88
|
+
next_iter_read_offset = new_read_offset
|
|
89
|
+
next_iter_tokens = new_tokens
|
|
90
|
+
|
|
91
|
+
# Advance to the next token position.
|
|
92
|
+
prefix_offset = next_iter_prefix_offset
|
|
93
|
+
read_offset = next_iter_read_offset
|
|
94
|
+
if prev_tokens is None:
|
|
95
|
+
prev_tokens = next_iter_tokens.copy()
|
|
96
|
+
else:
|
|
97
|
+
prev_tokens.extend(next_iter_tokens)
|
|
98
|
+
|
|
99
|
+
def decode_sequence_inplace(self, seq: Sequence,
|
|
100
|
+
prms: SamplingParams) -> int:
|
|
101
|
+
"""Decodes the new token for a sequence. In-place operation.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
seq: The sequence to decode.
|
|
105
|
+
prms: The sampling parameters used to generate the sequence.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
The number of characters added to the output text.
|
|
109
|
+
"""
|
|
110
|
+
all_input_ids = seq.get_token_ids()
|
|
111
|
+
token_id_generated_this_iteration = all_input_ids[-1]
|
|
112
|
+
tokenizer = self.get_tokenizer_for_seq(seq)
|
|
113
|
+
|
|
114
|
+
# Convert prompt token IDs to tokens if necessary.
|
|
115
|
+
# Do it here so that we don't have to repeat this
|
|
116
|
+
# computation for each logprob.
|
|
117
|
+
if seq.tokens is None:
|
|
118
|
+
(seq.tokens, seq.prefix_offset,
|
|
119
|
+
seq.read_offset) = convert_prompt_ids_to_tokens(
|
|
120
|
+
tokenizer=tokenizer,
|
|
121
|
+
prompt_ids=all_input_ids[:-1],
|
|
122
|
+
skip_special_tokens=prms.skip_special_tokens,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
(new_tokens, new_decoded_token_text, prefix_offset,
|
|
126
|
+
read_offset) = detokenize_incrementally(
|
|
127
|
+
tokenizer=tokenizer,
|
|
128
|
+
all_input_ids=all_input_ids,
|
|
129
|
+
prev_tokens=seq.tokens,
|
|
130
|
+
prefix_offset=seq.prefix_offset,
|
|
131
|
+
read_offset=seq.read_offset,
|
|
132
|
+
skip_special_tokens=prms.skip_special_tokens,
|
|
133
|
+
spaces_between_special_tokens=prms.spaces_between_special_tokens,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Decode logprobs
|
|
137
|
+
logprobs = seq.output_logprobs[-1]
|
|
138
|
+
if logprobs:
|
|
139
|
+
previous_tokens = all_input_ids[:-1]
|
|
140
|
+
for token_id, sample_logprob in logprobs.items():
|
|
141
|
+
# If the token was generated this iteration,
|
|
142
|
+
# use the provided text.
|
|
143
|
+
if token_id == token_id_generated_this_iteration:
|
|
144
|
+
sample_logprob.decoded_token = new_decoded_token_text
|
|
145
|
+
continue
|
|
146
|
+
|
|
147
|
+
if (sample_logprob.decoded_token is None
|
|
148
|
+
and token_id != VLLM_INVALID_TOKEN_ID):
|
|
149
|
+
all_input_ids_with_logprob = previous_tokens + [token_id]
|
|
150
|
+
(_, new_text, _, _) = detokenize_incrementally(
|
|
151
|
+
tokenizer=tokenizer,
|
|
152
|
+
all_input_ids=all_input_ids_with_logprob,
|
|
153
|
+
prev_tokens=seq.tokens,
|
|
154
|
+
prefix_offset=seq.prefix_offset,
|
|
155
|
+
read_offset=seq.read_offset,
|
|
156
|
+
skip_special_tokens=prms.skip_special_tokens,
|
|
157
|
+
spaces_between_special_tokens=prms.
|
|
158
|
+
spaces_between_special_tokens,
|
|
159
|
+
)
|
|
160
|
+
sample_logprob.decoded_token = new_text
|
|
161
|
+
|
|
162
|
+
seq.tokens.extend(new_tokens)
|
|
163
|
+
seq.prefix_offset = prefix_offset
|
|
164
|
+
seq.read_offset = read_offset
|
|
165
|
+
seq.output_text += new_decoded_token_text
|
|
166
|
+
|
|
167
|
+
return len(new_decoded_token_text)
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from .tokenizer import AnyTokenizer
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _replace_none_with_empty(tokens: list[Optional[str]]):
|
|
9
|
+
for i, token in enumerate(tokens):
|
|
10
|
+
if token is None:
|
|
11
|
+
tokens[i] = ""
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _convert_tokens_to_string_with_added_encoders(
|
|
15
|
+
tokenizer: AnyTokenizer,
|
|
16
|
+
output_tokens: list[str],
|
|
17
|
+
skip_special_tokens: bool,
|
|
18
|
+
spaces_between_special_tokens: bool,
|
|
19
|
+
) -> str:
|
|
20
|
+
# Adapted from
|
|
21
|
+
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/tokenization_utils.py#L921
|
|
22
|
+
# NOTE(woosuk): The following code is slow because it runs a for loop over
|
|
23
|
+
# the output_tokens. In Python, running a for loop over a list can be slow
|
|
24
|
+
# even when the loop body is very simple.
|
|
25
|
+
sub_texts: list[str] = []
|
|
26
|
+
current_sub_text: list[str] = []
|
|
27
|
+
all_special_tokens = set(tokenizer.all_special_tokens)
|
|
28
|
+
for token in output_tokens:
|
|
29
|
+
if skip_special_tokens and token in all_special_tokens:
|
|
30
|
+
continue
|
|
31
|
+
if token in tokenizer.get_added_vocab():
|
|
32
|
+
if current_sub_text:
|
|
33
|
+
sub_text = tokenizer.convert_tokens_to_string(current_sub_text)
|
|
34
|
+
sub_texts.append(sub_text)
|
|
35
|
+
current_sub_text = []
|
|
36
|
+
sub_texts.append(token)
|
|
37
|
+
else:
|
|
38
|
+
current_sub_text.append(token)
|
|
39
|
+
if current_sub_text:
|
|
40
|
+
sub_text = tokenizer.convert_tokens_to_string(current_sub_text)
|
|
41
|
+
sub_texts.append(sub_text)
|
|
42
|
+
if spaces_between_special_tokens:
|
|
43
|
+
return " ".join(sub_texts)
|
|
44
|
+
else:
|
|
45
|
+
return "".join(sub_texts)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# 5 is an arbitrary value that should work for all
|
|
49
|
+
# tokenizers (bigger = more conservative).
|
|
50
|
+
INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET = 5
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def convert_prompt_ids_to_tokens(
|
|
54
|
+
tokenizer: AnyTokenizer,
|
|
55
|
+
prompt_ids: list[int],
|
|
56
|
+
skip_special_tokens: bool = False,
|
|
57
|
+
) -> tuple[list[str], int, int]:
|
|
58
|
+
"""Converts the prompt ids to tokens and returns the tokens and offsets
|
|
59
|
+
for incremental detokenization.
|
|
60
|
+
|
|
61
|
+
Note that not all tokens are converted to strings. Only the tokens that
|
|
62
|
+
are necessary for incremental detokenization are converted to strings.
|
|
63
|
+
"""
|
|
64
|
+
# We do not need to convert the whole prompt to tokens.
|
|
65
|
+
# Offset a little more in case we have special tokens.
|
|
66
|
+
new_tokens = tokenizer.convert_ids_to_tokens(
|
|
67
|
+
prompt_ids[-INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET - 2:],
|
|
68
|
+
skip_special_tokens=skip_special_tokens)
|
|
69
|
+
read_offset = len(new_tokens)
|
|
70
|
+
prefix_offset = max(
|
|
71
|
+
read_offset - INITIAL_INCREMENTAL_DETOKENIZATION_OFFSET, 0)
|
|
72
|
+
# This is required to guard against out-of-vocab prompt token ids
|
|
73
|
+
_replace_none_with_empty(new_tokens) # type: ignore[arg-type]
|
|
74
|
+
return new_tokens, prefix_offset, read_offset
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def convert_ids_list_to_tokens(
|
|
78
|
+
tokenizer: AnyTokenizer,
|
|
79
|
+
token_ids: list[int],
|
|
80
|
+
) -> list[str]:
|
|
81
|
+
"""Detokenize the input ids individually.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
tokenizer: tokenizer used by model under test
|
|
85
|
+
token_ids: convert these tokens (Python list form)
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Python list of token string representations
|
|
89
|
+
|
|
90
|
+
"""
|
|
91
|
+
token_str_lst = tokenizer.convert_ids_to_tokens(token_ids)
|
|
92
|
+
_replace_none_with_empty(token_str_lst) # type: ignore
|
|
93
|
+
return token_str_lst
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# Based on
|
|
97
|
+
# https://github.com/huggingface/text-generation-inference/blob/v0.9.4/server/text_generation_server/models/model.py#L62C9-L62C15
|
|
98
|
+
# under Apache 2.0 license
|
|
99
|
+
def detokenize_incrementally(
|
|
100
|
+
tokenizer: AnyTokenizer,
|
|
101
|
+
all_input_ids: list[int],
|
|
102
|
+
prev_tokens: Optional[list[str]],
|
|
103
|
+
prefix_offset: int,
|
|
104
|
+
read_offset: int,
|
|
105
|
+
skip_special_tokens: bool = False,
|
|
106
|
+
spaces_between_special_tokens: bool = True,
|
|
107
|
+
) -> tuple[list[str], str, int, int]:
|
|
108
|
+
"""Detokenizes the input ids incrementally and returns the new tokens
|
|
109
|
+
and the new text.
|
|
110
|
+
|
|
111
|
+
If `prev_tokens` is None, this function will convert the input ids to
|
|
112
|
+
tokens and return the tokens and the new text. Otherwise, it will return the
|
|
113
|
+
new tokens and the new text.
|
|
114
|
+
|
|
115
|
+
This function will also return the new prefix offset and the new read
|
|
116
|
+
offset to be used in the next iteration.
|
|
117
|
+
|
|
118
|
+
The offsets are necessary to defeat cleanup algorithms in the decode which
|
|
119
|
+
decide to add a space or not depending on the surrounding ids.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
tokenizer: The tokenizer to use.
|
|
123
|
+
all_input_ids: The input ids. The last id is the new token id.
|
|
124
|
+
prev_tokens: The previous tokens. If None, this function will convert
|
|
125
|
+
the input ids to tokens and return the tokens and the new text.
|
|
126
|
+
prefix_offset: The prefix offset.
|
|
127
|
+
read_offset: The read offset.
|
|
128
|
+
skip_special_tokens: Whether to skip special tokens.
|
|
129
|
+
spaces_between_special_tokens: Whether to add spaces between special
|
|
130
|
+
tokens.
|
|
131
|
+
"""
|
|
132
|
+
new_token_id = all_input_ids[-1]
|
|
133
|
+
# This is the first iteration for this sequence
|
|
134
|
+
is_first_iter = prev_tokens is None
|
|
135
|
+
if is_first_iter:
|
|
136
|
+
(prev_tokens, prefix_offset,
|
|
137
|
+
read_offset) = convert_prompt_ids_to_tokens(
|
|
138
|
+
tokenizer,
|
|
139
|
+
all_input_ids[:-1],
|
|
140
|
+
skip_special_tokens=skip_special_tokens)
|
|
141
|
+
assert prev_tokens is not None
|
|
142
|
+
|
|
143
|
+
# If the new token id is out of bounds, return an empty string.
|
|
144
|
+
if 0 <= new_token_id < len(tokenizer):
|
|
145
|
+
# Put new_token_id in a list so skip_special_tokens is respected
|
|
146
|
+
new_tokens = tokenizer.convert_ids_to_tokens(
|
|
147
|
+
[new_token_id], skip_special_tokens=skip_special_tokens)
|
|
148
|
+
if isinstance(new_tokens, str):
|
|
149
|
+
new_tokens = [new_tokens]
|
|
150
|
+
else:
|
|
151
|
+
new_tokens = [""]
|
|
152
|
+
output_tokens = prev_tokens + new_tokens
|
|
153
|
+
|
|
154
|
+
# If this is the first iteration, return all tokens.
|
|
155
|
+
if is_first_iter:
|
|
156
|
+
new_tokens = output_tokens
|
|
157
|
+
|
|
158
|
+
# The prefix text is necessary only to defeat cleanup algorithms in
|
|
159
|
+
# the decode which decide to add a space or not depending on the
|
|
160
|
+
# surrounding ids.
|
|
161
|
+
if tokenizer.is_fast or not tokenizer.get_added_vocab():
|
|
162
|
+
prefix_text = tokenizer.convert_tokens_to_string(
|
|
163
|
+
output_tokens[prefix_offset:read_offset])
|
|
164
|
+
new_text = tokenizer.convert_tokens_to_string(
|
|
165
|
+
output_tokens[prefix_offset:])
|
|
166
|
+
else:
|
|
167
|
+
prefix_text = _convert_tokens_to_string_with_added_encoders(
|
|
168
|
+
tokenizer,
|
|
169
|
+
output_tokens[prefix_offset:read_offset],
|
|
170
|
+
skip_special_tokens=skip_special_tokens,
|
|
171
|
+
spaces_between_special_tokens=spaces_between_special_tokens,
|
|
172
|
+
)
|
|
173
|
+
new_text = _convert_tokens_to_string_with_added_encoders(
|
|
174
|
+
tokenizer,
|
|
175
|
+
output_tokens[prefix_offset:],
|
|
176
|
+
skip_special_tokens=skip_special_tokens,
|
|
177
|
+
spaces_between_special_tokens=spaces_between_special_tokens,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
if len(new_text) <= len(prefix_text) or new_text.endswith("�"):
|
|
181
|
+
# utf-8 char at the end means it's a potential unfinished byte sequence
|
|
182
|
+
# from byte fallback tokenization.
|
|
183
|
+
# If it's in the middle, it's probably a real invalid id generated
|
|
184
|
+
# by the model
|
|
185
|
+
return new_tokens, "", prefix_offset, read_offset
|
|
186
|
+
|
|
187
|
+
new_text = new_text[len(prefix_text):]
|
|
188
|
+
return new_tokens, new_text, read_offset, len(output_tokens)
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
|
|
3
|
+
from functools import lru_cache
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Optional, Union, cast
|
|
5
|
+
|
|
6
|
+
from transformers.processing_utils import ProcessorMixin
|
|
7
|
+
from typing_extensions import TypeVar
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from vllm.config import ModelConfig
|
|
11
|
+
|
|
12
|
+
_P = TypeVar("_P", bound=ProcessorMixin, default=ProcessorMixin)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class HashableDict(dict):
|
|
16
|
+
"""
|
|
17
|
+
A dictionary that can be hashed by lru_cache.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
# NOTE: pythonic dict is not hashable,
|
|
21
|
+
# we override on it directly for simplicity
|
|
22
|
+
def __hash__(self) -> int: # type: ignore[override]
|
|
23
|
+
return hash(frozenset(self.items()))
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class HashableList(list):
|
|
27
|
+
"""
|
|
28
|
+
A list that can be hashed by lru_cache.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __hash__(self) -> int: # type: ignore[override]
|
|
32
|
+
return hash(tuple(self))
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _merge_mm_kwargs(model_config: "ModelConfig", **kwargs):
|
|
36
|
+
mm_config = model_config.get_multimodal_config()
|
|
37
|
+
base_kwargs = mm_config.mm_processor_kwargs
|
|
38
|
+
if base_kwargs is None:
|
|
39
|
+
base_kwargs = {}
|
|
40
|
+
|
|
41
|
+
merged_kwargs = {**base_kwargs, **kwargs}
|
|
42
|
+
|
|
43
|
+
# NOTE: Pythonic dict is not hashable and will raise unhashable type
|
|
44
|
+
# error when calling `cached_get_processor`, therefore we need to
|
|
45
|
+
# wrap it to a hashable dict.
|
|
46
|
+
for key, value in merged_kwargs.items():
|
|
47
|
+
if isinstance(value, dict):
|
|
48
|
+
merged_kwargs[key] = HashableDict(value)
|
|
49
|
+
if isinstance(value, list):
|
|
50
|
+
merged_kwargs[key] = HashableList(value)
|
|
51
|
+
return merged_kwargs
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_processor(
|
|
55
|
+
processor_name: str,
|
|
56
|
+
*args: Any,
|
|
57
|
+
revision: Optional[str] = None,
|
|
58
|
+
trust_remote_code: bool = False,
|
|
59
|
+
processor_cls: Union[type[_P], tuple[type[_P], ...]] = ProcessorMixin,
|
|
60
|
+
**kwargs: Any,
|
|
61
|
+
) -> _P:
|
|
62
|
+
"""Load a processor for the given model name via HuggingFace."""
|
|
63
|
+
# don't put this import at the top level
|
|
64
|
+
# it will call torch.cuda.device_count()
|
|
65
|
+
from transformers import AutoProcessor
|
|
66
|
+
|
|
67
|
+
processor_factory = (AutoProcessor if processor_cls == ProcessorMixin or
|
|
68
|
+
isinstance(processor_cls, tuple) else processor_cls)
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
processor = processor_factory.from_pretrained(
|
|
72
|
+
processor_name,
|
|
73
|
+
*args,
|
|
74
|
+
revision=revision,
|
|
75
|
+
trust_remote_code=trust_remote_code,
|
|
76
|
+
**kwargs,
|
|
77
|
+
)
|
|
78
|
+
except ValueError as e:
|
|
79
|
+
# If the error pertains to the processor class not existing or not
|
|
80
|
+
# currently being imported, suggest using the --trust-remote-code flag.
|
|
81
|
+
# Unlike AutoTokenizer, AutoProcessor does not separate such errors
|
|
82
|
+
if not trust_remote_code:
|
|
83
|
+
err_msg = (
|
|
84
|
+
"Failed to load the processor. If the processor is "
|
|
85
|
+
"a custom processor not yet available in the HuggingFace "
|
|
86
|
+
"transformers library, consider setting "
|
|
87
|
+
"`trust_remote_code=True` in LLM or using the "
|
|
88
|
+
"`--trust-remote-code` flag in the CLI.")
|
|
89
|
+
raise RuntimeError(err_msg) from e
|
|
90
|
+
else:
|
|
91
|
+
raise e
|
|
92
|
+
|
|
93
|
+
if not isinstance(processor, processor_cls):
|
|
94
|
+
raise TypeError("Invalid type of HuggingFace processor. "
|
|
95
|
+
f"Expected type: {processor_cls}, but "
|
|
96
|
+
f"found type: {type(processor)}")
|
|
97
|
+
|
|
98
|
+
return processor
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
cached_get_processor = lru_cache(get_processor)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def cached_processor_from_config(
|
|
105
|
+
model_config: "ModelConfig",
|
|
106
|
+
processor_cls: Union[type[_P], tuple[type[_P], ...]] = ProcessorMixin,
|
|
107
|
+
**kwargs: Any,
|
|
108
|
+
) -> _P:
|
|
109
|
+
return cached_get_processor(
|
|
110
|
+
model_config.model,
|
|
111
|
+
revision=model_config.revision,
|
|
112
|
+
trust_remote_code=model_config.trust_remote_code,
|
|
113
|
+
processor_cls=processor_cls, # type: ignore[arg-type]
|
|
114
|
+
**_merge_mm_kwargs(model_config, **kwargs),
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def get_feature_extractor(
|
|
119
|
+
processor_name: str,
|
|
120
|
+
*args: Any,
|
|
121
|
+
revision: Optional[str] = None,
|
|
122
|
+
trust_remote_code: bool = False,
|
|
123
|
+
**kwargs: Any,
|
|
124
|
+
):
|
|
125
|
+
"""Load an audio feature extractor for the given model name
|
|
126
|
+
via HuggingFace."""
|
|
127
|
+
# don't put this import at the top level
|
|
128
|
+
# it will call torch.cuda.device_count()
|
|
129
|
+
from transformers import AutoFeatureExtractor
|
|
130
|
+
from transformers.feature_extraction_utils import FeatureExtractionMixin
|
|
131
|
+
try:
|
|
132
|
+
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
|
133
|
+
processor_name,
|
|
134
|
+
*args,
|
|
135
|
+
revision=revision,
|
|
136
|
+
trust_remote_code=trust_remote_code,
|
|
137
|
+
**kwargs)
|
|
138
|
+
except ValueError as e:
|
|
139
|
+
# If the error pertains to the processor class not existing or not
|
|
140
|
+
# currently being imported, suggest using the --trust-remote-code flag.
|
|
141
|
+
# Unlike AutoTokenizer, AutoImageProcessor does not separate such errors
|
|
142
|
+
if not trust_remote_code:
|
|
143
|
+
err_msg = (
|
|
144
|
+
"Failed to load the feature extractor. If the feature "
|
|
145
|
+
"extractor is a custom extractor not yet available in the "
|
|
146
|
+
"HuggingFace transformers library, consider setting "
|
|
147
|
+
"`trust_remote_code=True` in LLM or using the "
|
|
148
|
+
"`--trust-remote-code` flag in the CLI.")
|
|
149
|
+
raise RuntimeError(err_msg) from e
|
|
150
|
+
else:
|
|
151
|
+
raise e
|
|
152
|
+
return cast(FeatureExtractionMixin, feature_extractor)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
cached_get_feature_extractor = lru_cache(get_feature_extractor)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def cached_feature_extractor_from_config(
|
|
159
|
+
model_config: "ModelConfig",
|
|
160
|
+
**kwargs: Any,
|
|
161
|
+
):
|
|
162
|
+
return cached_get_feature_extractor(
|
|
163
|
+
model_config.model,
|
|
164
|
+
revision=model_config.revision,
|
|
165
|
+
trust_remote_code=model_config.trust_remote_code,
|
|
166
|
+
**_merge_mm_kwargs(model_config, **kwargs),
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def get_image_processor(
|
|
171
|
+
processor_name: str,
|
|
172
|
+
*args: Any,
|
|
173
|
+
revision: Optional[str] = None,
|
|
174
|
+
trust_remote_code: bool = False,
|
|
175
|
+
**kwargs: Any,
|
|
176
|
+
):
|
|
177
|
+
"""Load an image processor for the given model name via HuggingFace."""
|
|
178
|
+
# don't put this import at the top level
|
|
179
|
+
# it will call torch.cuda.device_count()
|
|
180
|
+
from transformers import AutoImageProcessor
|
|
181
|
+
from transformers.image_processing_utils import BaseImageProcessor
|
|
182
|
+
|
|
183
|
+
try:
|
|
184
|
+
processor = AutoImageProcessor.from_pretrained(
|
|
185
|
+
processor_name,
|
|
186
|
+
*args,
|
|
187
|
+
revision=revision,
|
|
188
|
+
trust_remote_code=trust_remote_code,
|
|
189
|
+
**kwargs)
|
|
190
|
+
except ValueError as e:
|
|
191
|
+
# If the error pertains to the processor class not existing or not
|
|
192
|
+
# currently being imported, suggest using the --trust-remote-code flag.
|
|
193
|
+
# Unlike AutoTokenizer, AutoImageProcessor does not separate such errors
|
|
194
|
+
if not trust_remote_code:
|
|
195
|
+
err_msg = (
|
|
196
|
+
"Failed to load the image processor. If the image processor is "
|
|
197
|
+
"a custom processor not yet available in the HuggingFace "
|
|
198
|
+
"transformers library, consider setting "
|
|
199
|
+
"`trust_remote_code=True` in LLM or using the "
|
|
200
|
+
"`--trust-remote-code` flag in the CLI.")
|
|
201
|
+
raise RuntimeError(err_msg) from e
|
|
202
|
+
else:
|
|
203
|
+
raise e
|
|
204
|
+
|
|
205
|
+
return cast(BaseImageProcessor, processor)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
cached_get_image_processor = lru_cache(get_image_processor)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def cached_image_processor_from_config(
|
|
212
|
+
model_config: "ModelConfig",
|
|
213
|
+
**kwargs: Any,
|
|
214
|
+
):
|
|
215
|
+
return cached_get_image_processor(
|
|
216
|
+
model_config.model,
|
|
217
|
+
revision=model_config.revision,
|
|
218
|
+
trust_remote_code=model_config.trust_remote_code,
|
|
219
|
+
**_merge_mm_kwargs(model_config, **kwargs),
|
|
220
|
+
)
|