vllm-cpu-amxbf16 0.9.1__cp312-cp312-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +53 -0
- vllm/_custom_ops.py +1828 -0
- vllm/_ipex_ops.py +244 -0
- vllm/_version.py +34 -0
- vllm/adapter_commons/__init__.py +0 -0
- vllm/adapter_commons/layers.py +16 -0
- vllm/adapter_commons/models.py +106 -0
- vllm/adapter_commons/request.py +26 -0
- vllm/adapter_commons/utils.py +93 -0
- vllm/adapter_commons/worker_manager.py +39 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +45 -0
- vllm/assets/base.py +41 -0
- vllm/assets/image.py +34 -0
- vllm/assets/video.py +115 -0
- vllm/attention/__init__.py +20 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +308 -0
- vllm/attention/backends/blocksparse_attn.py +461 -0
- vllm/attention/backends/cpu_mla.py +307 -0
- vllm/attention/backends/dual_chunk_flash_attn.py +1498 -0
- vllm/attention/backends/flash_attn.py +1003 -0
- vllm/attention/backends/flashinfer.py +1104 -0
- vllm/attention/backends/flashmla.py +244 -0
- vllm/attention/backends/hpu_attn.py +313 -0
- vllm/attention/backends/ipex_attn.py +398 -0
- vllm/attention/backends/mla/__init__.py +0 -0
- vllm/attention/backends/mla/common.py +1385 -0
- vllm/attention/backends/pallas.py +351 -0
- vllm/attention/backends/placeholder_attn.py +400 -0
- vllm/attention/backends/rocm_aiter_mla.py +435 -0
- vllm/attention/backends/rocm_flash_attn.py +975 -0
- vllm/attention/backends/torch_sdpa.py +703 -0
- vllm/attention/backends/triton_mla.py +115 -0
- vllm/attention/backends/utils.py +610 -0
- vllm/attention/backends/xformers.py +802 -0
- vllm/attention/layer.py +468 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/__init__.py +0 -0
- vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +433 -0
- vllm/attention/ops/blocksparse_attention/interface.py +239 -0
- vllm/attention/ops/blocksparse_attention/utils.py +246 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +368 -0
- vllm/attention/ops/flashmla.py +116 -0
- vllm/attention/ops/hpu_paged_attn.py +88 -0
- vllm/attention/ops/ipex_attn.py +195 -0
- vllm/attention/ops/merge_attn_states.py +43 -0
- vllm/attention/ops/nki_flash_attn.py +906 -0
- vllm/attention/ops/paged_attn.py +256 -0
- vllm/attention/ops/prefix_prefill.py +902 -0
- vllm/attention/ops/rocm_aiter_mla.py +100 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +102 -0
- vllm/attention/ops/triton_decode_attention.py +674 -0
- vllm/attention/ops/triton_flash_attention.py +979 -0
- vllm/attention/ops/triton_merge_attn_states.py +97 -0
- vllm/attention/ops/triton_unified_attention.py +334 -0
- vllm/attention/selector.py +187 -0
- vllm/attention/utils/fa_utils.py +55 -0
- vllm/beam_search.py +87 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +1185 -0
- vllm/benchmarks/endpoint_request_func.py +381 -0
- vllm/benchmarks/latency.py +168 -0
- vllm/benchmarks/serve.py +1135 -0
- vllm/benchmarks/throughput.py +609 -0
- vllm/benchmarks/utils.py +70 -0
- vllm/collect_env.py +820 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +89 -0
- vllm/compilation/backends.py +563 -0
- vllm/compilation/base_piecewise_backend.py +72 -0
- vllm/compilation/collective_fusion.py +127 -0
- vllm/compilation/compiler_interface.py +544 -0
- vllm/compilation/counter.py +38 -0
- vllm/compilation/cuda_piecewise_backend.py +214 -0
- vllm/compilation/decorators.py +250 -0
- vllm/compilation/fix_functionalization.py +191 -0
- vllm/compilation/fusion.py +618 -0
- vllm/compilation/fx_utils.py +62 -0
- vllm/compilation/inductor_pass.py +115 -0
- vllm/compilation/monitor.py +39 -0
- vllm/compilation/multi_output_match.py +109 -0
- vllm/compilation/noop_elimination.py +137 -0
- vllm/compilation/pass_manager.py +78 -0
- vllm/compilation/sequence_parallelism.py +268 -0
- vllm/compilation/torch25_custom_graph_pass.py +42 -0
- vllm/compilation/vllm_inductor_pass.py +67 -0
- vllm/compilation/wrapper.py +135 -0
- vllm/config.py +4746 -0
- vllm/connections.py +174 -0
- vllm/core/__init__.py +0 -0
- vllm/core/block/__init__.py +0 -0
- vllm/core/block/block_table.py +399 -0
- vllm/core/block/common.py +371 -0
- vllm/core/block/cpu_gpu_block_allocator.py +441 -0
- vllm/core/block/interfaces.py +319 -0
- vllm/core/block/naive_block.py +466 -0
- vllm/core/block/prefix_caching_block.py +1135 -0
- vllm/core/block/utils.py +28 -0
- vllm/core/block_manager.py +521 -0
- vllm/core/evictor.py +157 -0
- vllm/core/interfaces.py +135 -0
- vllm/core/placeholder_block_space_manager.py +100 -0
- vllm/core/scheduler.py +2093 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +281 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +41 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +264 -0
- vllm/distributed/device_communicators/base_device_communicator.py +260 -0
- vllm/distributed/device_communicators/cpu_communicator.py +145 -0
- vllm/distributed/device_communicators/cuda_communicator.py +176 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +180 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +304 -0
- vllm/distributed/device_communicators/custom_all_reduce_utils.py +259 -0
- vllm/distributed/device_communicators/hpu_communicator.py +46 -0
- vllm/distributed/device_communicators/neuron_communicator.py +20 -0
- vllm/distributed/device_communicators/pynccl.py +218 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +341 -0
- vllm/distributed/device_communicators/shm_broadcast.py +585 -0
- vllm/distributed/device_communicators/tpu_communicator.py +103 -0
- vllm/distributed/device_communicators/xpu_communicator.py +55 -0
- vllm/distributed/kv_events.py +356 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +12 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +128 -0
- vllm/distributed/kv_transfer/kv_connector/lmcache_connector.py +99 -0
- vllm/distributed/kv_transfer/kv_connector/mooncake_store_connector.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/simple_connector.py +329 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +108 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +6 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +283 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +134 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +201 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +1030 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +384 -0
- vllm/distributed/kv_transfer/kv_connector_agent.py +77 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +175 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +161 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +237 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +67 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +280 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +71 -0
- vllm/distributed/parallel_state.py +1296 -0
- vllm/distributed/tpu_distributed_utils.py +177 -0
- vllm/distributed/utils.py +536 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +1708 -0
- vllm/engine/async_llm_engine.py +1200 -0
- vllm/engine/async_timeout.py +173 -0
- vllm/engine/llm_engine.py +2097 -0
- vllm/engine/metrics.py +629 -0
- vllm/engine/metrics_types.py +94 -0
- vllm/engine/multiprocessing/__init__.py +148 -0
- vllm/engine/multiprocessing/client.py +681 -0
- vllm/engine/multiprocessing/engine.py +460 -0
- vllm/engine/output_processor/__init__.py +0 -0
- vllm/engine/output_processor/interfaces.py +75 -0
- vllm/engine/output_processor/multi_step.py +216 -0
- vllm/engine/output_processor/single_step.py +145 -0
- vllm/engine/output_processor/stop_checker.py +131 -0
- vllm/engine/output_processor/util.py +28 -0
- vllm/engine/protocol.py +317 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/api_server.py +178 -0
- vllm/entrypoints/chat_utils.py +1299 -0
- vllm/entrypoints/cli/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +39 -0
- vllm/entrypoints/cli/benchmark/latency.py +30 -0
- vllm/entrypoints/cli/benchmark/main.py +54 -0
- vllm/entrypoints/cli/benchmark/serve.py +30 -0
- vllm/entrypoints/cli/benchmark/throughput.py +30 -0
- vllm/entrypoints/cli/collect_env.py +35 -0
- vllm/entrypoints/cli/main.py +65 -0
- vllm/entrypoints/cli/openai.py +205 -0
- vllm/entrypoints/cli/run_batch.py +62 -0
- vllm/entrypoints/cli/serve.py +328 -0
- vllm/entrypoints/cli/types.py +25 -0
- vllm/entrypoints/launcher.py +147 -0
- vllm/entrypoints/llm.py +1544 -0
- vllm/entrypoints/logger.py +50 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1387 -0
- vllm/entrypoints/openai/cli_args.py +315 -0
- vllm/entrypoints/openai/logits_processors.py +90 -0
- vllm/entrypoints/openai/protocol.py +1913 -0
- vllm/entrypoints/openai/run_batch.py +463 -0
- vllm/entrypoints/openai/serving_chat.py +1221 -0
- vllm/entrypoints/openai/serving_classification.py +160 -0
- vllm/entrypoints/openai/serving_completion.py +592 -0
- vllm/entrypoints/openai/serving_embedding.py +201 -0
- vllm/entrypoints/openai/serving_engine.py +986 -0
- vllm/entrypoints/openai/serving_models.py +315 -0
- vllm/entrypoints/openai/serving_pooling.py +232 -0
- vllm/entrypoints/openai/serving_score.py +433 -0
- vllm/entrypoints/openai/serving_tokenization.py +157 -0
- vllm/entrypoints/openai/serving_transcription.py +424 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +23 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +164 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +370 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +259 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +237 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +371 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +216 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +316 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +267 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +369 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +112 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +308 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +124 -0
- vllm/entrypoints/score_utils.py +50 -0
- vllm/entrypoints/ssl.py +75 -0
- vllm/entrypoints/utils.py +233 -0
- vllm/env_override.py +41 -0
- vllm/envs.py +944 -0
- vllm/executor/__init__.py +0 -0
- vllm/executor/executor_base.py +401 -0
- vllm/executor/mp_distributed_executor.py +244 -0
- vllm/executor/msgspec_utils.py +30 -0
- vllm/executor/multiproc_worker_utils.py +313 -0
- vllm/executor/ray_distributed_executor.py +701 -0
- vllm/executor/ray_utils.py +399 -0
- vllm/executor/uniproc_executor.py +139 -0
- vllm/forward_context.py +179 -0
- vllm/inputs/__init__.py +41 -0
- vllm/inputs/data.py +331 -0
- vllm/inputs/parse.py +151 -0
- vllm/inputs/preprocess.py +909 -0
- vllm/inputs/registry.py +237 -0
- vllm/jsontree.py +80 -0
- vllm/logger.py +212 -0
- vllm/logging_utils/__init__.py +8 -0
- vllm/logging_utils/dump_input.py +85 -0
- vllm/logging_utils/formatter.py +18 -0
- vllm/logits_process.py +119 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/fully_sharded_layers.py +355 -0
- vllm/lora/layers.py +1285 -0
- vllm/lora/lora.py +199 -0
- vllm/lora/models.py +818 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/torch_ops/__init__.py +16 -0
- vllm/lora/ops/torch_ops/lora_ops.py +119 -0
- vllm/lora/ops/triton_ops/__init__.py +12 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +243 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +290 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +148 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +244 -0
- vllm/lora/ops/triton_ops/utils.py +120 -0
- vllm/lora/ops/xla_ops/__init__.py +7 -0
- vllm/lora/ops/xla_ops/lora_ops.py +145 -0
- vllm/lora/peft_helper.py +136 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +485 -0
- vllm/lora/punica_wrapper/punica_cpu.py +349 -0
- vllm/lora/punica_wrapper/punica_gpu.py +290 -0
- vllm/lora/punica_wrapper/punica_hpu.py +145 -0
- vllm/lora/punica_wrapper/punica_selector.py +20 -0
- vllm/lora/punica_wrapper/punica_tpu.py +405 -0
- vllm/lora/punica_wrapper/utils.py +164 -0
- vllm/lora/request.py +99 -0
- vllm/lora/resolver.py +85 -0
- vllm/lora/utils.py +240 -0
- vllm/lora/worker_manager.py +259 -0
- vllm/model_executor/__init__.py +16 -0
- vllm/model_executor/custom_op.py +152 -0
- vllm/model_executor/guided_decoding/__init__.py +181 -0
- vllm/model_executor/guided_decoding/guidance_decoding.py +63 -0
- vllm/model_executor/guided_decoding/guidance_logits_processors.py +104 -0
- vllm/model_executor/guided_decoding/guided_fields.py +41 -0
- vllm/model_executor/guided_decoding/lm_format_enforcer_decoding.py +67 -0
- vllm/model_executor/guided_decoding/outlines_decoding.py +155 -0
- vllm/model_executor/guided_decoding/outlines_logits_processors.py +284 -0
- vllm/model_executor/guided_decoding/utils.py +242 -0
- vllm/model_executor/guided_decoding/xgrammar_decoding.py +426 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +369 -0
- vllm/model_executor/layers/fused_moe/__init__.py +54 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +125 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +461 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +240 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +186 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +775 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +232 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +1724 -0
- vllm/model_executor/layers/fused_moe/layer.py +1535 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +446 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +243 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +80 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +190 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +159 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +69 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +421 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +117 -0
- vllm/model_executor/layers/fused_moe/utils.py +98 -0
- vllm/model_executor/layers/layernorm.py +288 -0
- vllm/model_executor/layers/lightning_attn.py +652 -0
- vllm/model_executor/layers/linear.py +1524 -0
- vllm/model_executor/layers/logits_processor.py +197 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/mamba2_metadata.py +125 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +245 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +616 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +105 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +414 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +262 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +589 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +751 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +232 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +206 -0
- vllm/model_executor/layers/pooler.py +350 -0
- vllm/model_executor/layers/quantization/__init__.py +157 -0
- vllm/model_executor/layers/quantization/aqlm.py +376 -0
- vllm/model_executor/layers/quantization/auto_round.py +310 -0
- vllm/model_executor/layers/quantization/awq.py +194 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +519 -0
- vllm/model_executor/layers/quantization/awq_triton.py +320 -0
- vllm/model_executor/layers/quantization/base_config.py +151 -0
- vllm/model_executor/layers/quantization/bitblas.py +461 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +396 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +668 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +1260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +24 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +358 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +160 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +93 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +178 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +121 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +150 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +111 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +201 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +206 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +195 -0
- vllm/model_executor/layers/quantization/experts_int8.py +196 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +172 -0
- vllm/model_executor/layers/quantization/fp8.py +906 -0
- vllm/model_executor/layers/quantization/gguf.py +565 -0
- vllm/model_executor/layers/quantization/gptq.py +278 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +445 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +648 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +297 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +332 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +250 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +90 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +83 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +116 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +300 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +143 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +120 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +131 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +67 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +87 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +137 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +41 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +139 -0
- vllm/model_executor/layers/quantization/marlin.py +261 -0
- vllm/model_executor/layers/quantization/modelopt.py +737 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +449 -0
- vllm/model_executor/layers/quantization/neuron_quant.py +76 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +127 -0
- vllm/model_executor/layers/quantization/qqq.py +275 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +441 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +237 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +126 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +146 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +122 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/schema.py +86 -0
- vllm/model_executor/layers/quantization/torchao.py +161 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +121 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +52 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +208 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +618 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +95 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +485 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +40 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +33 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +476 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +283 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +325 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +165 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +464 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_qqq.py +126 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +45 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +104 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +573 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +405 -0
- vllm/model_executor/layers/rejection_sampler.py +406 -0
- vllm/model_executor/layers/resampler.py +270 -0
- vllm/model_executor/layers/rotary_embedding.py +1862 -0
- vllm/model_executor/layers/sampler.py +1204 -0
- vllm/model_executor/layers/spec_decode_base_sampler.py +259 -0
- vllm/model_executor/layers/typical_acceptance_sampler.py +166 -0
- vllm/model_executor/layers/utils.py +95 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +487 -0
- vllm/model_executor/model_loader/__init__.py +76 -0
- vllm/model_executor/model_loader/base_loader.py +43 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +570 -0
- vllm/model_executor/model_loader/default_loader.py +282 -0
- vllm/model_executor/model_loader/dummy_loader.py +27 -0
- vllm/model_executor/model_loader/gguf_loader.py +120 -0
- vllm/model_executor/model_loader/neuron.py +476 -0
- vllm/model_executor/model_loader/neuronx_distributed.py +685 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +109 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +201 -0
- vllm/model_executor/model_loader/tensorizer.py +600 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +123 -0
- vllm/model_executor/model_loader/tpu.py +112 -0
- vllm/model_executor/model_loader/utils.py +302 -0
- vllm/model_executor/model_loader/weight_utils.py +782 -0
- vllm/model_executor/models/__init__.py +28 -0
- vllm/model_executor/models/adapters.py +248 -0
- vllm/model_executor/models/aimv2.py +246 -0
- vllm/model_executor/models/arctic.py +559 -0
- vllm/model_executor/models/aria.py +657 -0
- vllm/model_executor/models/aya_vision.py +466 -0
- vllm/model_executor/models/baichuan.py +474 -0
- vllm/model_executor/models/bamba.py +543 -0
- vllm/model_executor/models/bart.py +938 -0
- vllm/model_executor/models/bert.py +523 -0
- vllm/model_executor/models/bert_with_rope.py +769 -0
- vllm/model_executor/models/blip.py +339 -0
- vllm/model_executor/models/blip2.py +718 -0
- vllm/model_executor/models/bloom.py +373 -0
- vllm/model_executor/models/chameleon.py +1136 -0
- vllm/model_executor/models/chatglm.py +478 -0
- vllm/model_executor/models/clip.py +407 -0
- vllm/model_executor/models/commandr.py +472 -0
- vllm/model_executor/models/constant_size_cache.py +137 -0
- vllm/model_executor/models/dbrx.py +472 -0
- vllm/model_executor/models/deepseek.py +486 -0
- vllm/model_executor/models/deepseek_mtp.py +269 -0
- vllm/model_executor/models/deepseek_v2.py +843 -0
- vllm/model_executor/models/deepseek_vl2.py +648 -0
- vllm/model_executor/models/eagle.py +260 -0
- vllm/model_executor/models/exaone.py +551 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +510 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/florence2.py +1103 -0
- vllm/model_executor/models/fuyu.py +389 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +425 -0
- vllm/model_executor/models/gemma3.py +533 -0
- vllm/model_executor/models/gemma3_mm.py +709 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4v.py +648 -0
- vllm/model_executor/models/gpt2.py +328 -0
- vllm/model_executor/models/gpt_bigcode.py +335 -0
- vllm/model_executor/models/gpt_j.py +339 -0
- vllm/model_executor/models/gpt_neox.py +332 -0
- vllm/model_executor/models/granite.py +493 -0
- vllm/model_executor/models/granite_speech.py +779 -0
- vllm/model_executor/models/granitemoe.py +437 -0
- vllm/model_executor/models/granitemoehybrid.py +586 -0
- vllm/model_executor/models/granitemoeshared.py +341 -0
- vllm/model_executor/models/gritlm.py +224 -0
- vllm/model_executor/models/grok1.py +546 -0
- vllm/model_executor/models/h2ovl.py +546 -0
- vllm/model_executor/models/idefics2_vision_model.py +389 -0
- vllm/model_executor/models/idefics3.py +776 -0
- vllm/model_executor/models/interfaces.py +572 -0
- vllm/model_executor/models/interfaces_base.py +164 -0
- vllm/model_executor/models/intern_vit.py +480 -0
- vllm/model_executor/models/internlm2.py +455 -0
- vllm/model_executor/models/internlm2_ve.py +147 -0
- vllm/model_executor/models/internvl.py +1418 -0
- vllm/model_executor/models/jais.py +373 -0
- vllm/model_executor/models/jamba.py +592 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/llama.py +644 -0
- vllm/model_executor/models/llama4.py +532 -0
- vllm/model_executor/models/llama_eagle.py +165 -0
- vllm/model_executor/models/llama_eagle3.py +263 -0
- vllm/model_executor/models/llava.py +866 -0
- vllm/model_executor/models/llava_next.py +586 -0
- vllm/model_executor/models/llava_next_video.py +471 -0
- vllm/model_executor/models/llava_onevision.py +956 -0
- vllm/model_executor/models/mamba.py +273 -0
- vllm/model_executor/models/mamba2.py +308 -0
- vllm/model_executor/models/mamba_cache.py +76 -0
- vllm/model_executor/models/medusa.py +219 -0
- vllm/model_executor/models/mimo.py +192 -0
- vllm/model_executor/models/mimo_mtp.py +285 -0
- vllm/model_executor/models/minicpm.py +592 -0
- vllm/model_executor/models/minicpm3.py +230 -0
- vllm/model_executor/models/minicpm_eagle.py +391 -0
- vllm/model_executor/models/minicpmo.py +759 -0
- vllm/model_executor/models/minicpmv.py +1287 -0
- vllm/model_executor/models/minimax_cache.py +36 -0
- vllm/model_executor/models/minimax_text_01.py +1301 -0
- vllm/model_executor/models/minimax_vl_01.py +364 -0
- vllm/model_executor/models/mistral3.py +604 -0
- vllm/model_executor/models/mixtral.py +488 -0
- vllm/model_executor/models/mixtral_quant.py +453 -0
- vllm/model_executor/models/mllama.py +1624 -0
- vllm/model_executor/models/mllama4.py +938 -0
- vllm/model_executor/models/mlp_speculator.py +206 -0
- vllm/model_executor/models/modernbert.py +331 -0
- vllm/model_executor/models/module_mapping.py +72 -0
- vllm/model_executor/models/molmo.py +1568 -0
- vllm/model_executor/models/moonvit.py +630 -0
- vllm/model_executor/models/mpt.py +331 -0
- vllm/model_executor/models/nemotron.py +508 -0
- vllm/model_executor/models/nemotron_h.py +573 -0
- vllm/model_executor/models/nemotron_nas.py +484 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +389 -0
- vllm/model_executor/models/olmo2.py +414 -0
- vllm/model_executor/models/olmoe.py +468 -0
- vllm/model_executor/models/opt.py +412 -0
- vllm/model_executor/models/orion.py +349 -0
- vllm/model_executor/models/ovis.py +567 -0
- vllm/model_executor/models/paligemma.py +398 -0
- vllm/model_executor/models/persimmon.py +344 -0
- vllm/model_executor/models/phi.py +356 -0
- vllm/model_executor/models/phi3.py +19 -0
- vllm/model_executor/models/phi3_small.py +465 -0
- vllm/model_executor/models/phi3v.py +723 -0
- vllm/model_executor/models/phi4mm.py +1246 -0
- vllm/model_executor/models/phi4mm_audio.py +1233 -0
- vllm/model_executor/models/phi4mm_utils.py +1884 -0
- vllm/model_executor/models/phimoe.py +665 -0
- vllm/model_executor/models/pixtral.py +1316 -0
- vllm/model_executor/models/plamo2.py +738 -0
- vllm/model_executor/models/prithvi_geospatial_mae.py +232 -0
- vllm/model_executor/models/qwen.py +362 -0
- vllm/model_executor/models/qwen2.py +497 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +904 -0
- vllm/model_executor/models/qwen2_5_vl.py +1166 -0
- vllm/model_executor/models/qwen2_audio.py +410 -0
- vllm/model_executor/models/qwen2_moe.py +540 -0
- vllm/model_executor/models/qwen2_rm.py +132 -0
- vllm/model_executor/models/qwen2_vl.py +1405 -0
- vllm/model_executor/models/qwen3.py +321 -0
- vllm/model_executor/models/qwen3_moe.py +535 -0
- vllm/model_executor/models/qwen_vl.py +785 -0
- vllm/model_executor/models/registry.py +622 -0
- vllm/model_executor/models/roberta.py +276 -0
- vllm/model_executor/models/siglip.py +524 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +52 -0
- vllm/model_executor/models/solar.py +506 -0
- vllm/model_executor/models/stablelm.py +343 -0
- vllm/model_executor/models/starcoder2.py +356 -0
- vllm/model_executor/models/tarsier.py +643 -0
- vllm/model_executor/models/telechat2.py +140 -0
- vllm/model_executor/models/teleflm.py +79 -0
- vllm/model_executor/models/transformers.py +508 -0
- vllm/model_executor/models/ultravox.py +656 -0
- vllm/model_executor/models/utils.py +731 -0
- vllm/model_executor/models/vision.py +147 -0
- vllm/model_executor/models/whisper.py +747 -0
- vllm/model_executor/models/zamba2.py +1009 -0
- vllm/model_executor/parameter.py +459 -0
- vllm/model_executor/pooling_metadata.py +72 -0
- vllm/model_executor/sampling_metadata.py +597 -0
- vllm/model_executor/utils.py +77 -0
- vllm/multimodal/__init__.py +33 -0
- vllm/multimodal/audio.py +106 -0
- vllm/multimodal/base.py +219 -0
- vllm/multimodal/hasher.py +118 -0
- vllm/multimodal/image.py +97 -0
- vllm/multimodal/inputs.py +876 -0
- vllm/multimodal/parse.py +461 -0
- vllm/multimodal/processing.py +1895 -0
- vllm/multimodal/profiling.py +258 -0
- vllm/multimodal/registry.py +331 -0
- vllm/multimodal/utils.py +436 -0
- vllm/multimodal/video.py +198 -0
- vllm/outputs.py +512 -0
- vllm/platforms/__init__.py +291 -0
- vllm/platforms/cpu.py +266 -0
- vllm/platforms/cuda.py +526 -0
- vllm/platforms/hpu.py +106 -0
- vllm/platforms/interface.py +538 -0
- vllm/platforms/neuron.py +150 -0
- vllm/platforms/rocm.py +435 -0
- vllm/platforms/tpu.py +216 -0
- vllm/platforms/xpu.py +156 -0
- vllm/plugins/__init__.py +94 -0
- vllm/plugins/lora_resolvers/README.md +15 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +50 -0
- vllm/pooling_params.py +54 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +375 -0
- vllm/profiler/utils.py +148 -0
- vllm/prompt_adapter/__init__.py +0 -0
- vllm/prompt_adapter/layers.py +83 -0
- vllm/prompt_adapter/models.py +358 -0
- vllm/prompt_adapter/request.py +37 -0
- vllm/prompt_adapter/utils.py +98 -0
- vllm/prompt_adapter/worker_manager.py +179 -0
- vllm/py.typed +2 -0
- vllm/reasoning/__init__.py +15 -0
- vllm/reasoning/abs_reasoning_parsers.py +192 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/qwen3_reasoning_parser.py +151 -0
- vllm/sampling_params.py +602 -0
- vllm/scalar_type.py +347 -0
- vllm/scripts.py +15 -0
- vllm/sequence.py +1568 -0
- vllm/spec_decode/__init__.py +0 -0
- vllm/spec_decode/batch_expansion.py +506 -0
- vllm/spec_decode/draft_model_runner.py +349 -0
- vllm/spec_decode/interfaces.py +99 -0
- vllm/spec_decode/medusa_worker.py +138 -0
- vllm/spec_decode/metrics.py +213 -0
- vllm/spec_decode/mlp_speculator_worker.py +94 -0
- vllm/spec_decode/mqa_scorer.py +160 -0
- vllm/spec_decode/multi_step_worker.py +423 -0
- vllm/spec_decode/ngram_worker.py +196 -0
- vllm/spec_decode/proposer_worker_base.py +59 -0
- vllm/spec_decode/smaller_tp_proposer_worker.py +196 -0
- vllm/spec_decode/spec_decode_worker.py +1326 -0
- vllm/spec_decode/target_model_runner.py +45 -0
- vllm/spec_decode/top1_proposer.py +275 -0
- vllm/spec_decode/util.py +277 -0
- vllm/test_utils.py +130 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +131 -0
- vllm/transformers_utils/__init__.py +24 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +60 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/config.py +887 -0
- vllm/transformers_utils/configs/__init__.py +61 -0
- vllm/transformers_utils/configs/arctic.py +207 -0
- vllm/transformers_utils/configs/chatglm.py +72 -0
- vllm/transformers_utils/configs/cohere2.py +195 -0
- vllm/transformers_utils/configs/dbrx.py +280 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +216 -0
- vllm/transformers_utils/configs/eagle.py +85 -0
- vllm/transformers_utils/configs/exaone.py +190 -0
- vllm/transformers_utils/configs/falcon.py +90 -0
- vllm/transformers_utils/configs/h2ovl.py +16 -0
- vllm/transformers_utils/configs/internvl.py +54 -0
- vllm/transformers_utils/configs/jais.py +238 -0
- vllm/transformers_utils/configs/kimi_vl.py +37 -0
- vllm/transformers_utils/configs/medusa.py +63 -0
- vllm/transformers_utils/configs/minimax_text_01.py +70 -0
- vllm/transformers_utils/configs/minimax_vl_01.py +71 -0
- vllm/transformers_utils/configs/mllama.py +31 -0
- vllm/transformers_utils/configs/mlp_speculator.py +68 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/mpt.py +180 -0
- vllm/transformers_utils/configs/nemotron.py +205 -0
- vllm/transformers_utils/configs/nemotron_h.py +258 -0
- vllm/transformers_utils/configs/nvlm_d.py +15 -0
- vllm/transformers_utils/configs/ovis.py +184 -0
- vllm/transformers_utils/configs/skyworkr1v.py +54 -0
- vllm/transformers_utils/configs/solar.py +247 -0
- vllm/transformers_utils/configs/telechat2.py +64 -0
- vllm/transformers_utils/configs/ultravox.py +108 -0
- vllm/transformers_utils/detokenizer.py +168 -0
- vllm/transformers_utils/detokenizer_utils.py +189 -0
- vllm/transformers_utils/processor.py +221 -0
- vllm/transformers_utils/processors/__init__.py +8 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +363 -0
- vllm/transformers_utils/processors/ovis.py +420 -0
- vllm/transformers_utils/s3_utils.py +162 -0
- vllm/transformers_utils/tokenizer.py +302 -0
- vllm/transformers_utils/tokenizer_base.py +149 -0
- vllm/transformers_utils/tokenizer_group.py +120 -0
- vllm/transformers_utils/tokenizers/__init__.py +10 -0
- vllm/transformers_utils/tokenizers/mistral.py +493 -0
- vllm/transformers_utils/utils.py +99 -0
- vllm/triton_utils/__init__.py +14 -0
- vllm/triton_utils/importing.py +50 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +256 -0
- vllm/utils.py +2910 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +163 -0
- vllm/v1/attention/backends/flash_attn.py +869 -0
- vllm/v1/attention/backends/flashinfer.py +651 -0
- vllm/v1/attention/backends/flex_attention.py +477 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +931 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +97 -0
- vllm/v1/attention/backends/mla/flashmla.py +152 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +220 -0
- vllm/v1/attention/backends/mla/triton_mla.py +120 -0
- vllm/v1/attention/backends/pallas.py +240 -0
- vllm/v1/attention/backends/triton_attn.py +285 -0
- vllm/v1/attention/backends/utils.py +52 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +349 -0
- vllm/v1/core/encoder_cache_manager.py +150 -0
- vllm/v1/core/kv_cache_coordinator.py +363 -0
- vllm/v1/core/kv_cache_manager.py +392 -0
- vllm/v1/core/kv_cache_utils.py +996 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/interface.py +150 -0
- vllm/v1/core/sched/output.py +154 -0
- vllm/v1/core/sched/scheduler.py +1044 -0
- vllm/v1/core/sched/utils.py +23 -0
- vllm/v1/core/single_type_kv_cache_manager.py +403 -0
- vllm/v1/engine/__init__.py +173 -0
- vllm/v1/engine/async_llm.py +558 -0
- vllm/v1/engine/coordinator.py +253 -0
- vllm/v1/engine/core.py +961 -0
- vllm/v1/engine/core_client.py +1129 -0
- vllm/v1/engine/detokenizer.py +261 -0
- vllm/v1/engine/exceptions.py +17 -0
- vllm/v1/engine/llm_engine.py +317 -0
- vllm/v1/engine/logprobs.py +199 -0
- vllm/v1/engine/mm_input_cache.py +91 -0
- vllm/v1/engine/output_processor.py +428 -0
- vllm/v1/engine/parallel_sampling.py +133 -0
- vllm/v1/engine/processor.py +407 -0
- vllm/v1/executor/__init__.py +0 -0
- vllm/v1/executor/abstract.py +113 -0
- vllm/v1/executor/multiproc_executor.py +537 -0
- vllm/v1/executor/ray_distributed_executor.py +62 -0
- vllm/v1/kv_cache_interface.py +194 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +523 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +131 -0
- vllm/v1/metrics/reader.py +246 -0
- vllm/v1/metrics/stats.py +239 -0
- vllm/v1/outputs.py +116 -0
- vllm/v1/request.py +193 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +39 -0
- vllm/v1/sample/ops/penalties.py +59 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +293 -0
- vllm/v1/sample/rejection_sampler.py +631 -0
- vllm/v1/sample/sampler.py +286 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +124 -0
- vllm/v1/sample/tpu/sampler.py +145 -0
- vllm/v1/serial_utils.py +315 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +432 -0
- vllm/v1/spec_decode/medusa.py +62 -0
- vllm/v1/spec_decode/metadata.py +62 -0
- vllm/v1/spec_decode/metrics.py +178 -0
- vllm/v1/spec_decode/ngram_proposer.py +132 -0
- vllm/v1/spec_decode/utils.py +46 -0
- vllm/v1/structured_output/__init__.py +222 -0
- vllm/v1/structured_output/backend_guidance.py +245 -0
- vllm/v1/structured_output/backend_types.py +134 -0
- vllm/v1/structured_output/backend_xgrammar.py +318 -0
- vllm/v1/structured_output/request.py +86 -0
- vllm/v1/structured_output/utils.py +175 -0
- vllm/v1/utils.py +743 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +142 -0
- vllm/v1/worker/cpu_model_runner.py +86 -0
- vllm/v1/worker/cpu_worker.py +152 -0
- vllm/v1/worker/gpu_input_batch.py +681 -0
- vllm/v1/worker/gpu_model_runner.py +2320 -0
- vllm/v1/worker/gpu_worker.py +393 -0
- vllm/v1/worker/lora_model_runner_mixin.py +173 -0
- vllm/v1/worker/tpu_model_runner.py +1673 -0
- vllm/v1/worker/tpu_worker.py +299 -0
- vllm/v1/worker/utils.py +111 -0
- vllm/v1/worker/worker_base.py +65 -0
- vllm/version.py +41 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm/worker/__init__.py +0 -0
- vllm/worker/cache_engine.py +145 -0
- vllm/worker/cpu_enc_dec_model_runner.py +326 -0
- vllm/worker/cpu_model_runner.py +671 -0
- vllm/worker/cpu_pooling_model_runner.py +125 -0
- vllm/worker/cpu_worker.py +450 -0
- vllm/worker/enc_dec_model_runner.py +555 -0
- vllm/worker/hpu_model_runner.py +2320 -0
- vllm/worker/hpu_worker.py +484 -0
- vllm/worker/model_runner.py +2178 -0
- vllm/worker/model_runner_base.py +282 -0
- vllm/worker/multi_step_hpu_worker.py +123 -0
- vllm/worker/multi_step_model_runner.py +911 -0
- vllm/worker/multi_step_neuron_model_runner.py +84 -0
- vllm/worker/multi_step_neuronx_distributed_model_runner.py +63 -0
- vllm/worker/multi_step_tpu_worker.py +108 -0
- vllm/worker/multi_step_worker.py +197 -0
- vllm/worker/neuron_model_runner.py +460 -0
- vllm/worker/neuron_worker.py +193 -0
- vllm/worker/neuronx_distributed_model_runner.py +294 -0
- vllm/worker/pooling_model_runner.py +211 -0
- vllm/worker/tpu_model_runner.py +909 -0
- vllm/worker/tpu_worker.py +337 -0
- vllm/worker/utils.py +53 -0
- vllm/worker/worker.py +577 -0
- vllm/worker/worker_base.py +646 -0
- vllm/worker/xpu_model_runner.py +606 -0
- vllm/worker/xpu_worker.py +186 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/METADATA +305 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/RECORD +1197 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.9.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,951 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
|
|
4
|
+
# adapted from https://huggingface.co/Skywork/Skywork-R1V-38B/blob/main/modeling_skywork_chat.py
|
|
5
|
+
# --------------------------------------------------------
|
|
6
|
+
# SkyworkR1V
|
|
7
|
+
# Copyright (c) 2025 Skywork
|
|
8
|
+
# Licensed under The MIT License [see LICENSE for details]
|
|
9
|
+
# --------------------------------------------------------
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from collections.abc import Iterable, Mapping, Sequence
|
|
12
|
+
from typing import Literal, Optional, TypedDict, TypeVar, Union
|
|
13
|
+
|
|
14
|
+
import torch
|
|
15
|
+
import torch.nn as nn
|
|
16
|
+
import torchvision.transforms as T
|
|
17
|
+
from PIL import Image
|
|
18
|
+
from transformers import BatchEncoding, PretrainedConfig, TensorType
|
|
19
|
+
|
|
20
|
+
from vllm.config import VllmConfig
|
|
21
|
+
from vllm.model_executor.layers.linear import ReplicatedLinear
|
|
22
|
+
from vllm.model_executor.layers.quantization import QuantizationConfig
|
|
23
|
+
from vllm.model_executor.layers.quantization.awq import AWQConfig
|
|
24
|
+
from vllm.model_executor.models.intern_vit import (InternVisionModel,
|
|
25
|
+
InternVisionPatchModel)
|
|
26
|
+
from vllm.model_executor.sampling_metadata import SamplingMetadata
|
|
27
|
+
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
28
|
+
from vllm.multimodal.image import convert_image_mode
|
|
29
|
+
from vllm.multimodal.inputs import (MultiModalDataDict, MultiModalFieldConfig,
|
|
30
|
+
MultiModalKwargs, NestedTensors)
|
|
31
|
+
from vllm.multimodal.parse import (ImageEmbeddingItems, ImageProcessorItems,
|
|
32
|
+
ImageSize, MultiModalDataItems)
|
|
33
|
+
from vllm.multimodal.processing import (BaseMultiModalProcessor,
|
|
34
|
+
BaseProcessingInfo, PromptReplacement,
|
|
35
|
+
PromptUpdate, PromptUpdateDetails)
|
|
36
|
+
from vllm.multimodal.profiling import BaseDummyInputsBuilder
|
|
37
|
+
from vllm.sequence import IntermediateTensors
|
|
38
|
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
|
39
|
+
|
|
40
|
+
from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP
|
|
41
|
+
from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model,
|
|
42
|
+
maybe_prefix, merge_multimodal_embeddings)
|
|
43
|
+
|
|
44
|
+
IMG_START = '<img>'
|
|
45
|
+
IMG_END = '</img>'
|
|
46
|
+
IMG_CONTEXT = '<IMG_CONTEXT>'
|
|
47
|
+
|
|
48
|
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
|
49
|
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SkyworkR1VImagePixelInputs(TypedDict):
|
|
53
|
+
type: Literal["pixel_values"]
|
|
54
|
+
pixel_values_flat: torch.Tensor
|
|
55
|
+
"""
|
|
56
|
+
Shape:
|
|
57
|
+
`(batch_size * num_images * (1 + num_patches), num_channels, height, width)`
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
num_patches: torch.Tensor
|
|
61
|
+
"""Shape: `(batch_size * num_images)`"""
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class SkyworkR1VImageEmbeddingInputs(TypedDict):
|
|
65
|
+
type: Literal["image_embeds"]
|
|
66
|
+
data: Union[torch.Tensor, list[torch.Tensor]]
|
|
67
|
+
"""
|
|
68
|
+
A tensor of shape `(num_images, total_image_feature_size, hidden_size)`
|
|
69
|
+
or a list of tensors of shape `(total_image_feature_size, hidden_size)`
|
|
70
|
+
|
|
71
|
+
`hidden_size` must match the hidden size of language model backbone.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
SkyworkR1VImageInputs = Union[SkyworkR1VImagePixelInputs,
|
|
76
|
+
SkyworkR1VImageEmbeddingInputs]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# adapted from https://huggingface.co/Skywork/Skywork-R1V-38B/
|
|
80
|
+
def build_transform(input_size: int):
|
|
81
|
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
|
82
|
+
return T.Compose([
|
|
83
|
+
T.Lambda(lambda img: convert_image_mode(img, 'RGB')),
|
|
84
|
+
T.Resize((input_size, input_size),
|
|
85
|
+
interpolation=T.InterpolationMode.BICUBIC),
|
|
86
|
+
T.ToTensor(),
|
|
87
|
+
T.Normalize(mean=MEAN, std=STD)
|
|
88
|
+
])
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# adapted from https://huggingface.co/Skywork/Skywork-R1V-38B/
|
|
92
|
+
def find_closest_aspect_ratio(
|
|
93
|
+
aspect_ratio: float,
|
|
94
|
+
target_ratios: list[tuple[int, int]],
|
|
95
|
+
*,
|
|
96
|
+
width: int,
|
|
97
|
+
height: int,
|
|
98
|
+
image_size: int,
|
|
99
|
+
) -> tuple[int, int]:
|
|
100
|
+
best_ratio_diff = float('inf')
|
|
101
|
+
best_ratio = (1, 1)
|
|
102
|
+
area = width * height
|
|
103
|
+
for ratio in target_ratios:
|
|
104
|
+
target_aspect_ratio = ratio[0] / ratio[1]
|
|
105
|
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
|
106
|
+
if ratio_diff < best_ratio_diff:
|
|
107
|
+
best_ratio_diff = ratio_diff
|
|
108
|
+
best_ratio = ratio
|
|
109
|
+
elif ratio_diff == best_ratio_diff:
|
|
110
|
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
|
111
|
+
best_ratio = ratio
|
|
112
|
+
return best_ratio
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def resolve_skyworkr1v_min_max_num(
|
|
116
|
+
*,
|
|
117
|
+
min_dynamic_patch: int,
|
|
118
|
+
max_dynamic_patch: int,
|
|
119
|
+
dynamic_image_size: bool,
|
|
120
|
+
use_thumbnail: bool,
|
|
121
|
+
) -> tuple[int, int]:
|
|
122
|
+
min_dynamic_patch = min_dynamic_patch if dynamic_image_size else 1
|
|
123
|
+
max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1
|
|
124
|
+
|
|
125
|
+
if use_thumbnail and max_dynamic_patch != 1:
|
|
126
|
+
max_dynamic_patch += 1
|
|
127
|
+
|
|
128
|
+
return min_dynamic_patch, max_dynamic_patch
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def get_skyworkr1v_target_ratios(
|
|
132
|
+
min_num: int,
|
|
133
|
+
max_num: int,
|
|
134
|
+
) -> list[tuple[int, int]]:
|
|
135
|
+
target_ratios = {(i, j)
|
|
136
|
+
for n in range(min_num, max_num + 1)
|
|
137
|
+
for i in range(1, n + 1)
|
|
138
|
+
for j in range(1, n + 1) if min_num <= i * j <= max_num}
|
|
139
|
+
return sorted(target_ratios, key=lambda x: x[0] * x[1])
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def calculate_skyworkr1v_targets(
|
|
143
|
+
*,
|
|
144
|
+
orig_width: int,
|
|
145
|
+
orig_height: int,
|
|
146
|
+
target_ratios: list[tuple[int, int]],
|
|
147
|
+
image_size: int,
|
|
148
|
+
use_thumbnail: bool,
|
|
149
|
+
) -> tuple[int, int, int]:
|
|
150
|
+
aspect_ratio = orig_width / orig_height
|
|
151
|
+
|
|
152
|
+
# find the closest aspect ratio to the target
|
|
153
|
+
target_aspect_ratio = find_closest_aspect_ratio(
|
|
154
|
+
aspect_ratio,
|
|
155
|
+
target_ratios,
|
|
156
|
+
width=orig_width,
|
|
157
|
+
height=orig_height,
|
|
158
|
+
image_size=image_size,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# calculate the target width and height
|
|
162
|
+
target_width = image_size * target_aspect_ratio[0]
|
|
163
|
+
target_height = image_size * target_aspect_ratio[1]
|
|
164
|
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
|
165
|
+
|
|
166
|
+
# add thumbnail image if num_blocks != 1
|
|
167
|
+
if use_thumbnail and blocks != 1:
|
|
168
|
+
blocks += 1
|
|
169
|
+
|
|
170
|
+
return blocks, target_width, target_height
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def dynamic_preprocess_skyworkr1v(
|
|
174
|
+
image: Image.Image,
|
|
175
|
+
*,
|
|
176
|
+
target_ratios: list[tuple[int, int]],
|
|
177
|
+
image_size: int,
|
|
178
|
+
use_thumbnail: bool,
|
|
179
|
+
) -> list[Image.Image]:
|
|
180
|
+
orig_width, orig_height = image.size
|
|
181
|
+
|
|
182
|
+
# calculate the number of blocks without thumbnail
|
|
183
|
+
blocks, target_width, target_height = calculate_skyworkr1v_targets(
|
|
184
|
+
orig_width=orig_width,
|
|
185
|
+
orig_height=orig_height,
|
|
186
|
+
target_ratios=target_ratios,
|
|
187
|
+
image_size=image_size,
|
|
188
|
+
use_thumbnail=False,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# resize the image
|
|
192
|
+
resized_img = image.resize((target_width, target_height))
|
|
193
|
+
processed_images = []
|
|
194
|
+
for i in range(blocks):
|
|
195
|
+
box = ((i % (target_width // image_size)) * image_size,
|
|
196
|
+
(i // (target_width // image_size)) * image_size,
|
|
197
|
+
((i % (target_width // image_size)) + 1) * image_size,
|
|
198
|
+
((i // (target_width // image_size)) + 1) * image_size)
|
|
199
|
+
# split the image
|
|
200
|
+
split_img = resized_img.crop(box)
|
|
201
|
+
processed_images.append(split_img)
|
|
202
|
+
|
|
203
|
+
assert len(processed_images) == blocks
|
|
204
|
+
|
|
205
|
+
if use_thumbnail and len(processed_images) != 1:
|
|
206
|
+
thumbnail_img = image.resize((image_size, image_size))
|
|
207
|
+
processed_images.append(thumbnail_img)
|
|
208
|
+
|
|
209
|
+
return processed_images
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
# adapted from https://huggingface.co/Skywork/Skywork-R1V-38B
|
|
213
|
+
def image_to_pixel_values_skyworkr1v(
|
|
214
|
+
image: Image.Image,
|
|
215
|
+
*,
|
|
216
|
+
input_size: int,
|
|
217
|
+
min_num: int,
|
|
218
|
+
max_num: int,
|
|
219
|
+
use_thumbnail: bool,
|
|
220
|
+
) -> torch.Tensor:
|
|
221
|
+
target_ratios = get_skyworkr1v_target_ratios(min_num, max_num)
|
|
222
|
+
|
|
223
|
+
transform = build_transform(input_size=input_size)
|
|
224
|
+
images = dynamic_preprocess_skyworkr1v(
|
|
225
|
+
image,
|
|
226
|
+
target_ratios=target_ratios,
|
|
227
|
+
image_size=input_size,
|
|
228
|
+
use_thumbnail=use_thumbnail,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
pixel_values = torch.stack([transform(image) for image in images])
|
|
232
|
+
return pixel_values
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class BaseSkyworkR1VProcessor(ABC):
|
|
236
|
+
"""
|
|
237
|
+
This model doesn't define its own HF processor,
|
|
238
|
+
so we implement our own one here.
|
|
239
|
+
|
|
240
|
+
The code to insert image tokens is based on:
|
|
241
|
+
https://huggingface.co/Skywork/Skywork-R1V-38B/blob/main/modeling_skywork_chat.py#L252
|
|
242
|
+
"""
|
|
243
|
+
|
|
244
|
+
def __init__(
|
|
245
|
+
self,
|
|
246
|
+
config: PretrainedConfig,
|
|
247
|
+
tokenizer: AnyTokenizer,
|
|
248
|
+
*,
|
|
249
|
+
min_dynamic_patch: Optional[int] = None,
|
|
250
|
+
max_dynamic_patch: Optional[int] = None,
|
|
251
|
+
dynamic_image_size: Optional[bool] = None,
|
|
252
|
+
) -> None:
|
|
253
|
+
super().__init__()
|
|
254
|
+
|
|
255
|
+
self.config = config
|
|
256
|
+
self.tokenizer = tokenizer
|
|
257
|
+
|
|
258
|
+
image_size: int = config.vision_config.image_size
|
|
259
|
+
patch_size: int = config.vision_config.patch_size
|
|
260
|
+
|
|
261
|
+
if min_dynamic_patch is None:
|
|
262
|
+
min_dynamic_patch = config.min_dynamic_patch
|
|
263
|
+
assert isinstance(min_dynamic_patch, int)
|
|
264
|
+
|
|
265
|
+
if max_dynamic_patch is None:
|
|
266
|
+
max_dynamic_patch = config.max_dynamic_patch
|
|
267
|
+
assert isinstance(max_dynamic_patch, int)
|
|
268
|
+
|
|
269
|
+
if dynamic_image_size is None:
|
|
270
|
+
dynamic_image_size = config.dynamic_image_size
|
|
271
|
+
assert isinstance(dynamic_image_size, bool)
|
|
272
|
+
|
|
273
|
+
self.num_image_token = int(
|
|
274
|
+
(image_size // patch_size)**2 * (config.downsample_ratio**2))
|
|
275
|
+
self.image_size = image_size
|
|
276
|
+
self.min_dynamic_patch = min_dynamic_patch
|
|
277
|
+
self.max_dynamic_patch = max_dynamic_patch
|
|
278
|
+
self.dynamic_image_size = dynamic_image_size
|
|
279
|
+
self.use_thumbnail: bool = config.use_thumbnail
|
|
280
|
+
|
|
281
|
+
@property
|
|
282
|
+
@abstractmethod
|
|
283
|
+
def image_token_id(self) -> int:
|
|
284
|
+
raise NotImplementedError
|
|
285
|
+
|
|
286
|
+
@abstractmethod
|
|
287
|
+
def get_image_repl(
|
|
288
|
+
self,
|
|
289
|
+
feature_size: int,
|
|
290
|
+
num_patches: Optional[int],
|
|
291
|
+
) -> PromptUpdateDetails[str]:
|
|
292
|
+
raise NotImplementedError
|
|
293
|
+
|
|
294
|
+
def resolve_min_max_num(
|
|
295
|
+
self,
|
|
296
|
+
*,
|
|
297
|
+
min_dynamic_patch: Optional[int] = None,
|
|
298
|
+
max_dynamic_patch: Optional[int] = None,
|
|
299
|
+
dynamic_image_size: Optional[bool] = None,
|
|
300
|
+
use_thumbnail: Optional[bool] = None,
|
|
301
|
+
) -> tuple[int, int]:
|
|
302
|
+
min_dynamic_patch = (self.min_dynamic_patch if min_dynamic_patch
|
|
303
|
+
is None else min_dynamic_patch)
|
|
304
|
+
max_dynamic_patch = (self.max_dynamic_patch if max_dynamic_patch
|
|
305
|
+
is None else max_dynamic_patch)
|
|
306
|
+
dynamic_image_size = (self.dynamic_image_size if dynamic_image_size
|
|
307
|
+
is None else dynamic_image_size)
|
|
308
|
+
use_thumbnail = (self.use_thumbnail
|
|
309
|
+
if use_thumbnail is None else use_thumbnail)
|
|
310
|
+
|
|
311
|
+
return resolve_skyworkr1v_min_max_num(
|
|
312
|
+
min_dynamic_patch=min_dynamic_patch,
|
|
313
|
+
max_dynamic_patch=max_dynamic_patch,
|
|
314
|
+
dynamic_image_size=dynamic_image_size,
|
|
315
|
+
use_thumbnail=use_thumbnail,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
def resolve_target_ratios(
|
|
319
|
+
self,
|
|
320
|
+
*,
|
|
321
|
+
min_dynamic_patch: Optional[int] = None,
|
|
322
|
+
max_dynamic_patch: Optional[int] = None,
|
|
323
|
+
dynamic_image_size: Optional[bool] = None,
|
|
324
|
+
use_thumbnail: Optional[bool] = None,
|
|
325
|
+
) -> list[tuple[int, int]]:
|
|
326
|
+
min_num, max_num = self.resolve_min_max_num(
|
|
327
|
+
min_dynamic_patch=min_dynamic_patch,
|
|
328
|
+
max_dynamic_patch=max_dynamic_patch,
|
|
329
|
+
dynamic_image_size=dynamic_image_size,
|
|
330
|
+
use_thumbnail=use_thumbnail,
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
return get_skyworkr1v_target_ratios(min_num, max_num)
|
|
334
|
+
|
|
335
|
+
def get_num_image_tokens(
|
|
336
|
+
self,
|
|
337
|
+
*,
|
|
338
|
+
image_width: int,
|
|
339
|
+
image_height: int,
|
|
340
|
+
) -> int:
|
|
341
|
+
target_ratios = self.resolve_target_ratios(
|
|
342
|
+
use_thumbnail=False, # Applied in calculate_targets
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
num_patches, _, _ = calculate_skyworkr1v_targets(
|
|
346
|
+
orig_width=image_width,
|
|
347
|
+
orig_height=image_height,
|
|
348
|
+
image_size=self.image_size,
|
|
349
|
+
target_ratios=target_ratios,
|
|
350
|
+
use_thumbnail=self.use_thumbnail,
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
return num_patches * self.num_image_token
|
|
354
|
+
|
|
355
|
+
def _images_to_pixel_values_lst(
|
|
356
|
+
self,
|
|
357
|
+
images: list[Image.Image],
|
|
358
|
+
min_dynamic_patch: Optional[int] = None,
|
|
359
|
+
max_dynamic_patch: Optional[int] = None,
|
|
360
|
+
dynamic_image_size: Optional[bool] = None,
|
|
361
|
+
) -> list[torch.Tensor]:
|
|
362
|
+
min_num, max_num = self.resolve_min_max_num(
|
|
363
|
+
min_dynamic_patch=min_dynamic_patch,
|
|
364
|
+
max_dynamic_patch=max_dynamic_patch,
|
|
365
|
+
dynamic_image_size=dynamic_image_size,
|
|
366
|
+
use_thumbnail=False, # Applied in image_to_pixel_values
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
return [
|
|
370
|
+
image_to_pixel_values_skyworkr1v(
|
|
371
|
+
image,
|
|
372
|
+
input_size=self.image_size,
|
|
373
|
+
min_num=min_num,
|
|
374
|
+
max_num=max_num,
|
|
375
|
+
use_thumbnail=self.use_thumbnail,
|
|
376
|
+
) for image in images
|
|
377
|
+
]
|
|
378
|
+
|
|
379
|
+
def __call__(
|
|
380
|
+
self,
|
|
381
|
+
text: Optional[Union[str, list[str]]] = None,
|
|
382
|
+
images: Optional[Union[Image.Image, list[Image.Image]]] = None,
|
|
383
|
+
min_dynamic_patch: Optional[int] = None,
|
|
384
|
+
max_dynamic_patch: Optional[int] = None,
|
|
385
|
+
dynamic_image_size: Optional[bool] = None,
|
|
386
|
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
387
|
+
) -> Mapping[str, NestedTensors]:
|
|
388
|
+
if text is None:
|
|
389
|
+
text = []
|
|
390
|
+
if not isinstance(text, list):
|
|
391
|
+
text = [text]
|
|
392
|
+
if images is None:
|
|
393
|
+
images = []
|
|
394
|
+
if not isinstance(images, list):
|
|
395
|
+
images = [images]
|
|
396
|
+
|
|
397
|
+
if len(images) == 0:
|
|
398
|
+
image_inputs = {}
|
|
399
|
+
else:
|
|
400
|
+
pixel_values_lst = self._images_to_pixel_values_lst(
|
|
401
|
+
images,
|
|
402
|
+
min_dynamic_patch=min_dynamic_patch,
|
|
403
|
+
max_dynamic_patch=max_dynamic_patch,
|
|
404
|
+
dynamic_image_size=dynamic_image_size,
|
|
405
|
+
)
|
|
406
|
+
image_inputs: dict[str, NestedTensors] = {
|
|
407
|
+
"pixel_values_flat":
|
|
408
|
+
torch.cat(pixel_values_lst),
|
|
409
|
+
"image_num_patches":
|
|
410
|
+
torch.tensor([len(item) for item in pixel_values_lst]),
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
for pixel_values in pixel_values_lst:
|
|
414
|
+
num_patches = pixel_values.shape[0]
|
|
415
|
+
feature_size = num_patches * self.num_image_token
|
|
416
|
+
|
|
417
|
+
image_repl = self.get_image_repl(feature_size, num_patches)
|
|
418
|
+
|
|
419
|
+
text = [t.replace('<image>', image_repl.full, 1) for t in text]
|
|
420
|
+
|
|
421
|
+
text_inputs = self.tokenizer(text)
|
|
422
|
+
|
|
423
|
+
return {
|
|
424
|
+
**BatchEncoding(text_inputs, tensor_type=return_tensors),
|
|
425
|
+
**image_inputs,
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
class SkyworkR1VProcessor(BaseSkyworkR1VProcessor):
|
|
430
|
+
|
|
431
|
+
@property
|
|
432
|
+
def image_token_id(self) -> int:
|
|
433
|
+
return self.tokenizer.get_vocab()[IMG_CONTEXT]
|
|
434
|
+
|
|
435
|
+
def get_image_repl(
|
|
436
|
+
self,
|
|
437
|
+
feature_size: int,
|
|
438
|
+
num_patches: Optional[int],
|
|
439
|
+
) -> PromptUpdateDetails[str]:
|
|
440
|
+
repl_features = IMG_CONTEXT * feature_size
|
|
441
|
+
repl_full = IMG_START + repl_features + IMG_END
|
|
442
|
+
|
|
443
|
+
return PromptUpdateDetails.select_text(repl_full, IMG_CONTEXT)
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
class BaseSkyworkR1VProcessingInfo(BaseProcessingInfo):
|
|
447
|
+
|
|
448
|
+
@abstractmethod
|
|
449
|
+
def get_hf_processor(
|
|
450
|
+
self,
|
|
451
|
+
*,
|
|
452
|
+
min_dynamic_patch: Optional[int] = None,
|
|
453
|
+
max_dynamic_patch: Optional[int] = None,
|
|
454
|
+
dynamic_image_size: Optional[bool] = None,
|
|
455
|
+
**kwargs: object,
|
|
456
|
+
) -> BaseSkyworkR1VProcessor:
|
|
457
|
+
raise NotImplementedError
|
|
458
|
+
|
|
459
|
+
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
|
|
460
|
+
return {"image": None}
|
|
461
|
+
|
|
462
|
+
def get_num_image_tokens(
|
|
463
|
+
self,
|
|
464
|
+
*,
|
|
465
|
+
image_width: int,
|
|
466
|
+
image_height: int,
|
|
467
|
+
processor: Optional[BaseSkyworkR1VProcessor],
|
|
468
|
+
) -> int:
|
|
469
|
+
if processor is None:
|
|
470
|
+
processor = self.get_hf_processor()
|
|
471
|
+
|
|
472
|
+
return processor.get_num_image_tokens(
|
|
473
|
+
image_width=image_width,
|
|
474
|
+
image_height=image_height,
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
def get_image_size_with_most_features(self) -> ImageSize:
|
|
478
|
+
processor = self.get_hf_processor()
|
|
479
|
+
|
|
480
|
+
base_size = processor.image_size
|
|
481
|
+
target_ratios = processor.resolve_target_ratios()
|
|
482
|
+
|
|
483
|
+
largest_feature_size, largest_feature_pinpoint = 0, None
|
|
484
|
+
for wr, hr in target_ratios:
|
|
485
|
+
width, height = base_size * wr, base_size * hr
|
|
486
|
+
|
|
487
|
+
feat_size = self.get_num_image_tokens(
|
|
488
|
+
image_width=width,
|
|
489
|
+
image_height=height,
|
|
490
|
+
processor=processor,
|
|
491
|
+
)
|
|
492
|
+
if feat_size > largest_feature_size:
|
|
493
|
+
largest_feature_size = feat_size
|
|
494
|
+
largest_feature_pinpoint = ImageSize(width=width,
|
|
495
|
+
height=height)
|
|
496
|
+
|
|
497
|
+
if largest_feature_size == 0 or largest_feature_pinpoint is None:
|
|
498
|
+
raise ValueError("Cannot have a largest feature size of 0!")
|
|
499
|
+
|
|
500
|
+
return largest_feature_pinpoint
|
|
501
|
+
|
|
502
|
+
|
|
503
|
+
_I = TypeVar("_I", bound=BaseSkyworkR1VProcessingInfo)
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
class SkyworkR1VDummyInputsBuilder(BaseDummyInputsBuilder[_I]):
|
|
507
|
+
|
|
508
|
+
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
|
|
509
|
+
num_images = mm_counts.get("image", 0)
|
|
510
|
+
|
|
511
|
+
return "<image>" * num_images
|
|
512
|
+
|
|
513
|
+
def get_dummy_mm_data(
|
|
514
|
+
self,
|
|
515
|
+
seq_len: int,
|
|
516
|
+
mm_counts: Mapping[str, int],
|
|
517
|
+
) -> MultiModalDataDict:
|
|
518
|
+
target_width, target_height = \
|
|
519
|
+
self.info.get_image_size_with_most_features()
|
|
520
|
+
num_images = mm_counts.get("image", 0)
|
|
521
|
+
|
|
522
|
+
return {
|
|
523
|
+
"image":
|
|
524
|
+
self._get_dummy_images(width=target_width,
|
|
525
|
+
height=target_height,
|
|
526
|
+
num_images=num_images)
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
class SkyworkR1VMultiModalProcessor(BaseMultiModalProcessor[_I]):
|
|
531
|
+
|
|
532
|
+
def _call_hf_processor(
|
|
533
|
+
self,
|
|
534
|
+
prompt: str,
|
|
535
|
+
mm_data: Mapping[str, object],
|
|
536
|
+
mm_kwargs: Mapping[str, object],
|
|
537
|
+
) -> Mapping[str, NestedTensors]:
|
|
538
|
+
processed_outputs = super()._call_hf_processor(
|
|
539
|
+
prompt=prompt,
|
|
540
|
+
mm_data=mm_data,
|
|
541
|
+
mm_kwargs=mm_kwargs,
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
hf_processor = self.info.get_hf_processor(**mm_kwargs)
|
|
545
|
+
image_token_id = hf_processor.image_token_id
|
|
546
|
+
|
|
547
|
+
# Since there may be extra tokens in the feature placeholders,
|
|
548
|
+
# we need to pass the image token ID to the model to select the
|
|
549
|
+
# tokens to merge from the vision encoder outputs
|
|
550
|
+
processed_outputs["image_token_id"] = torch.tensor(image_token_id)
|
|
551
|
+
|
|
552
|
+
return processed_outputs
|
|
553
|
+
|
|
554
|
+
def _get_mm_fields_config(
|
|
555
|
+
self,
|
|
556
|
+
hf_inputs: Mapping[str, NestedTensors],
|
|
557
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
558
|
+
) -> Mapping[str, MultiModalFieldConfig]:
|
|
559
|
+
image_num_patches = hf_inputs.get("image_num_patches", torch.empty(0))
|
|
560
|
+
num_images = len(image_num_patches)
|
|
561
|
+
|
|
562
|
+
return dict(
|
|
563
|
+
pixel_values_flat=MultiModalFieldConfig.flat_from_sizes(
|
|
564
|
+
"image", image_num_patches),
|
|
565
|
+
image_num_patches=MultiModalFieldConfig.batched("image"),
|
|
566
|
+
image_embeds=MultiModalFieldConfig.batched("image"),
|
|
567
|
+
image_token_id=MultiModalFieldConfig.shared("image", num_images),
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
def _get_prompt_updates(
|
|
571
|
+
self,
|
|
572
|
+
mm_items: MultiModalDataItems,
|
|
573
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
574
|
+
out_mm_kwargs: MultiModalKwargs,
|
|
575
|
+
) -> Sequence[PromptUpdate]:
|
|
576
|
+
hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
|
|
577
|
+
|
|
578
|
+
if "image_num_patches" in out_mm_kwargs:
|
|
579
|
+
image_num_patches = out_mm_kwargs["image_num_patches"]
|
|
580
|
+
assert isinstance(image_num_patches, torch.Tensor)
|
|
581
|
+
image_num_patches = image_num_patches.tolist()
|
|
582
|
+
elif "image_embeds" in out_mm_kwargs:
|
|
583
|
+
# TODO: Use image size information in dictionary embedding inputs
|
|
584
|
+
# to compute num_patches (similar to Qwen2-VL)
|
|
585
|
+
image_num_patches = [None] * len(out_mm_kwargs["image_embeds"])
|
|
586
|
+
else:
|
|
587
|
+
image_num_patches = []
|
|
588
|
+
|
|
589
|
+
def get_replacement_skyworkr1v(item_idx: int):
|
|
590
|
+
images = mm_items.get_items(
|
|
591
|
+
"image", (ImageEmbeddingItems, ImageProcessorItems))
|
|
592
|
+
|
|
593
|
+
if isinstance(images, ImageEmbeddingItems):
|
|
594
|
+
feature_size = images.get_feature_size(item_idx)
|
|
595
|
+
else:
|
|
596
|
+
image_size = images.get_image_size(item_idx)
|
|
597
|
+
feature_size = self.info.get_num_image_tokens(
|
|
598
|
+
image_width=image_size.width,
|
|
599
|
+
image_height=image_size.height,
|
|
600
|
+
processor=hf_processor,
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
num_patches = image_num_patches[item_idx]
|
|
604
|
+
if num_patches is not None:
|
|
605
|
+
assert isinstance(num_patches, int)
|
|
606
|
+
|
|
607
|
+
return hf_processor.get_image_repl(feature_size, num_patches)
|
|
608
|
+
|
|
609
|
+
return [
|
|
610
|
+
PromptReplacement(
|
|
611
|
+
modality="image",
|
|
612
|
+
target="<image>",
|
|
613
|
+
replacement=get_replacement_skyworkr1v,
|
|
614
|
+
)
|
|
615
|
+
]
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
class SkyworkR1VProcessingInfo(BaseSkyworkR1VProcessingInfo):
|
|
619
|
+
|
|
620
|
+
def get_hf_processor(
|
|
621
|
+
self,
|
|
622
|
+
*,
|
|
623
|
+
min_dynamic_patch: Optional[int] = None,
|
|
624
|
+
max_dynamic_patch: Optional[int] = None,
|
|
625
|
+
dynamic_image_size: Optional[bool] = None,
|
|
626
|
+
**kwargs: object,
|
|
627
|
+
) -> SkyworkR1VProcessor:
|
|
628
|
+
if min_dynamic_patch is not None:
|
|
629
|
+
kwargs["min_dynamic_patch"] = min_dynamic_patch
|
|
630
|
+
if max_dynamic_patch is not None:
|
|
631
|
+
kwargs["max_dynamic_patch"] = max_dynamic_patch
|
|
632
|
+
if dynamic_image_size is not None:
|
|
633
|
+
kwargs["dynamic_image_size"] = dynamic_image_size
|
|
634
|
+
|
|
635
|
+
return self.ctx.init_processor(
|
|
636
|
+
SkyworkR1VProcessor,
|
|
637
|
+
config=self.get_hf_config(),
|
|
638
|
+
tokenizer=self.get_tokenizer(),
|
|
639
|
+
**kwargs,
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
@MULTIMODAL_REGISTRY.register_processor(
|
|
644
|
+
SkyworkR1VMultiModalProcessor,
|
|
645
|
+
info=SkyworkR1VProcessingInfo,
|
|
646
|
+
dummy_inputs=SkyworkR1VDummyInputsBuilder)
|
|
647
|
+
class SkyworkR1VChatModel(nn.Module, SupportsMultiModal, SupportsPP):
|
|
648
|
+
|
|
649
|
+
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
|
|
650
|
+
super().__init__()
|
|
651
|
+
|
|
652
|
+
config = vllm_config.model_config.hf_config
|
|
653
|
+
quant_config = vllm_config.quant_config
|
|
654
|
+
multimodal_config = vllm_config.model_config.multimodal_config
|
|
655
|
+
|
|
656
|
+
self.config = config
|
|
657
|
+
self.multimodal_config = multimodal_config
|
|
658
|
+
self._patch_quant_config(config, quant_config)
|
|
659
|
+
|
|
660
|
+
image_size = config.force_image_size or config.vision_config.image_size
|
|
661
|
+
patch_size = config.vision_config.patch_size
|
|
662
|
+
self.patch_size = patch_size
|
|
663
|
+
self.num_image_token = int(
|
|
664
|
+
(image_size // patch_size)**2 * (config.downsample_ratio**2))
|
|
665
|
+
self.downsample_ratio = config.downsample_ratio
|
|
666
|
+
self.ps_version = config.ps_version
|
|
667
|
+
|
|
668
|
+
self.llm_arch_name = config.text_config.architectures[0]
|
|
669
|
+
self.is_mono = self.llm_arch_name == 'SkyworkLM2VEForCausalLM'
|
|
670
|
+
self.vision_model = self._init_vision_model(
|
|
671
|
+
config,
|
|
672
|
+
quant_config=quant_config,
|
|
673
|
+
is_mono=self.is_mono,
|
|
674
|
+
prefix=maybe_prefix(prefix, "vision_model"),
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
self.language_model = init_vllm_registered_model(
|
|
678
|
+
vllm_config=vllm_config,
|
|
679
|
+
hf_config=config.text_config,
|
|
680
|
+
prefix=maybe_prefix(prefix, "language_model"),
|
|
681
|
+
)
|
|
682
|
+
|
|
683
|
+
self.mlp1 = self._init_mlp1(config)
|
|
684
|
+
|
|
685
|
+
self.img_context_token_id = None
|
|
686
|
+
self.visual_token_mask = None
|
|
687
|
+
self.make_empty_intermediate_tensors = (
|
|
688
|
+
self.language_model.make_empty_intermediate_tensors)
|
|
689
|
+
|
|
690
|
+
def _patch_quant_config(self, config: PretrainedConfig,
|
|
691
|
+
quant_config: QuantizationConfig):
|
|
692
|
+
# the awq models from OpenGVLab missing `modules_to_not_convert`
|
|
693
|
+
# patch the quant_config to add `modules_to_not_convert` back
|
|
694
|
+
if isinstance(quant_config, AWQConfig):
|
|
695
|
+
text_config = config.text_config
|
|
696
|
+
llm_quant_config = getattr(text_config, "quantization_config",
|
|
697
|
+
None)
|
|
698
|
+
if (not quant_config.modules_to_not_convert) and \
|
|
699
|
+
(llm_quant_config is not None):
|
|
700
|
+
quant_config.modules_to_not_convert.append("vision_model")
|
|
701
|
+
|
|
702
|
+
def _init_vision_model(
|
|
703
|
+
self,
|
|
704
|
+
config: PretrainedConfig,
|
|
705
|
+
quant_config: Optional[QuantizationConfig],
|
|
706
|
+
*,
|
|
707
|
+
is_mono: bool,
|
|
708
|
+
prefix: str,
|
|
709
|
+
):
|
|
710
|
+
if not is_mono:
|
|
711
|
+
vision_feature_layer = config.select_layer
|
|
712
|
+
if vision_feature_layer < 0:
|
|
713
|
+
num_hidden_layers = config.vision_config.num_hidden_layers \
|
|
714
|
+
+ vision_feature_layer + 1
|
|
715
|
+
else:
|
|
716
|
+
num_hidden_layers = vision_feature_layer + 1
|
|
717
|
+
|
|
718
|
+
return InternVisionModel(
|
|
719
|
+
config.vision_config,
|
|
720
|
+
quant_config=quant_config,
|
|
721
|
+
num_hidden_layers_override=num_hidden_layers,
|
|
722
|
+
prefix=prefix,
|
|
723
|
+
)
|
|
724
|
+
else:
|
|
725
|
+
return InternVisionPatchModel(config.vision_config)
|
|
726
|
+
|
|
727
|
+
def _init_mlp1(self, config: PretrainedConfig) -> nn.Sequential:
|
|
728
|
+
vit_hidden_size = config.vision_config.hidden_size
|
|
729
|
+
llm_hidden_size = config.text_config.hidden_size
|
|
730
|
+
|
|
731
|
+
return nn.Sequential(
|
|
732
|
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio)**2),
|
|
733
|
+
ReplicatedLinear(vit_hidden_size *
|
|
734
|
+
int(1 / self.downsample_ratio)**2,
|
|
735
|
+
llm_hidden_size,
|
|
736
|
+
return_bias=False),
|
|
737
|
+
nn.GELU(),
|
|
738
|
+
ReplicatedLinear(llm_hidden_size,
|
|
739
|
+
llm_hidden_size,
|
|
740
|
+
return_bias=False),
|
|
741
|
+
)
|
|
742
|
+
|
|
743
|
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
|
744
|
+
n, w, h, c = x.size()
|
|
745
|
+
# N, W, H, C --> N, W, H * scale, C // scale
|
|
746
|
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
|
747
|
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
|
748
|
+
x = x.permute(0, 2, 1, 3).contiguous()
|
|
749
|
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
|
750
|
+
int(c / (scale_factor * scale_factor)))
|
|
751
|
+
if self.ps_version == 'v1':
|
|
752
|
+
pass
|
|
753
|
+
else:
|
|
754
|
+
x = x.permute(0, 2, 1, 3).contiguous()
|
|
755
|
+
return x
|
|
756
|
+
|
|
757
|
+
def extract_feature(self, pixel_values: torch.Tensor) -> torch.Tensor:
|
|
758
|
+
vit_embeds = self.vision_model(pixel_values=pixel_values)
|
|
759
|
+
vit_embeds = vit_embeds[:, 1:, :]
|
|
760
|
+
|
|
761
|
+
h = w = int(vit_embeds.shape[1]**0.5)
|
|
762
|
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
|
763
|
+
vit_embeds = self.pixel_shuffle(vit_embeds,
|
|
764
|
+
scale_factor=self.downsample_ratio)
|
|
765
|
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1,
|
|
766
|
+
vit_embeds.shape[-1])
|
|
767
|
+
vit_embeds = self.mlp1(vit_embeds)
|
|
768
|
+
return vit_embeds
|
|
769
|
+
|
|
770
|
+
def _validate_pixel_values(self, data: torch.Tensor) -> torch.Tensor:
|
|
771
|
+
|
|
772
|
+
h = w = self.config.vision_config.image_size
|
|
773
|
+
expected_dims = (3, h, w)
|
|
774
|
+
|
|
775
|
+
def _validate_shape(d: torch.Tensor):
|
|
776
|
+
actual_dims = tuple(d.shape)
|
|
777
|
+
|
|
778
|
+
if actual_dims != expected_dims:
|
|
779
|
+
expected_expr = str(expected_dims)
|
|
780
|
+
raise ValueError(
|
|
781
|
+
"The expected shape of pixel values per image per batch "
|
|
782
|
+
f" per patch is {expected_expr}. "
|
|
783
|
+
f"You supplied {tuple(d.shape)}.")
|
|
784
|
+
|
|
785
|
+
for d in data:
|
|
786
|
+
_validate_shape(d)
|
|
787
|
+
|
|
788
|
+
return data
|
|
789
|
+
|
|
790
|
+
def _parse_and_validate_image_input(
|
|
791
|
+
self, **kwargs: object) -> Optional[SkyworkR1VImageInputs]:
|
|
792
|
+
pixel_values_flat = kwargs.pop("pixel_values_flat", None)
|
|
793
|
+
image_num_patches = kwargs.pop("image_num_patches", None)
|
|
794
|
+
image_embeds = kwargs.pop("image_embeds", None)
|
|
795
|
+
|
|
796
|
+
if pixel_values_flat is None and image_embeds is None:
|
|
797
|
+
return None
|
|
798
|
+
|
|
799
|
+
if image_embeds is not None:
|
|
800
|
+
if not isinstance(image_embeds, (torch.Tensor, list)):
|
|
801
|
+
raise ValueError("Incorrect type of image embeddings. "
|
|
802
|
+
f"Got type: {type(image_embeds)}")
|
|
803
|
+
|
|
804
|
+
return SkyworkR1VImageEmbeddingInputs(
|
|
805
|
+
type="image_embeds",
|
|
806
|
+
data=flatten_bn(image_embeds),
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
image_token_id = kwargs["image_token_id"]
|
|
810
|
+
assert isinstance(image_token_id, torch.Tensor)
|
|
811
|
+
self.img_context_token_id = image_token_id.flatten().unique().item()
|
|
812
|
+
|
|
813
|
+
if pixel_values_flat is not None:
|
|
814
|
+
if not isinstance(pixel_values_flat, (torch.Tensor, list)):
|
|
815
|
+
raise ValueError("Incorrect type of pixel values. "
|
|
816
|
+
f"Got type: {type(pixel_values_flat)}")
|
|
817
|
+
|
|
818
|
+
if not isinstance(image_num_patches, (torch.Tensor, list)):
|
|
819
|
+
raise ValueError("Incorrect type of image_num_patches. "
|
|
820
|
+
f"Got type: {type(image_num_patches)}")
|
|
821
|
+
|
|
822
|
+
pixel_values_flat = flatten_bn(pixel_values_flat, concat=True)
|
|
823
|
+
image_num_patches = flatten_bn(image_num_patches, concat=True)
|
|
824
|
+
|
|
825
|
+
return SkyworkR1VImagePixelInputs(
|
|
826
|
+
type="pixel_values",
|
|
827
|
+
pixel_values_flat=self._validate_pixel_values(
|
|
828
|
+
pixel_values_flat),
|
|
829
|
+
num_patches=image_num_patches,
|
|
830
|
+
)
|
|
831
|
+
|
|
832
|
+
raise AssertionError("This line should be unreachable.")
|
|
833
|
+
|
|
834
|
+
def _process_image_input(
|
|
835
|
+
self,
|
|
836
|
+
image_input: SkyworkR1VImageInputs,
|
|
837
|
+
) -> Union[torch.Tensor, list[torch.Tensor], tuple[torch.Tensor, ...]]:
|
|
838
|
+
if image_input["type"] == "image_embeds":
|
|
839
|
+
return image_input["data"]
|
|
840
|
+
|
|
841
|
+
assert self.vision_model is not None
|
|
842
|
+
|
|
843
|
+
image_embeds = self.extract_feature(image_input["pixel_values_flat"])
|
|
844
|
+
|
|
845
|
+
num_patches = image_input["num_patches"]
|
|
846
|
+
|
|
847
|
+
# Only one image in the current batch
|
|
848
|
+
if len(num_patches) == 1:
|
|
849
|
+
return image_embeds.view(
|
|
850
|
+
-1, self.config.text_config.hidden_size).unsqueeze(0)
|
|
851
|
+
|
|
852
|
+
# NOTE: Image embeddings are split into separate tensors for each image
|
|
853
|
+
# by the size of each embedding.
|
|
854
|
+
feature_size = image_embeds.shape[1]
|
|
855
|
+
image_embeds = image_embeds.view(-1,
|
|
856
|
+
self.config.text_config.hidden_size)
|
|
857
|
+
image_feature_sizes = [
|
|
858
|
+
num_patches * feature_size for num_patches in num_patches
|
|
859
|
+
]
|
|
860
|
+
return image_embeds.split(image_feature_sizes)
|
|
861
|
+
|
|
862
|
+
def _set_visual_token_mask(self, input_ids: torch.Tensor) -> None:
|
|
863
|
+
if self.is_mono:
|
|
864
|
+
self.visual_token_mask = (
|
|
865
|
+
input_ids == self.img_context_token_id).reshape(-1, 1)
|
|
866
|
+
else:
|
|
867
|
+
self.visual_token_mask = None
|
|
868
|
+
|
|
869
|
+
def get_language_model(self) -> torch.nn.Module:
|
|
870
|
+
return self.language_model
|
|
871
|
+
|
|
872
|
+
def get_multimodal_embeddings(
|
|
873
|
+
self, **kwargs: object) -> Optional[MultiModalEmbeddings]:
|
|
874
|
+
image_input = self._parse_and_validate_image_input(**kwargs)
|
|
875
|
+
if image_input is None:
|
|
876
|
+
return None
|
|
877
|
+
|
|
878
|
+
return self._process_image_input(image_input)
|
|
879
|
+
|
|
880
|
+
def get_input_embeddings(
|
|
881
|
+
self,
|
|
882
|
+
input_ids: torch.Tensor,
|
|
883
|
+
multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
|
|
884
|
+
) -> torch.Tensor:
|
|
885
|
+
inputs_embeds = self.language_model.get_input_embeddings(input_ids)
|
|
886
|
+
if multimodal_embeddings is not None:
|
|
887
|
+
assert self.img_context_token_id is not None
|
|
888
|
+
self._set_visual_token_mask(input_ids)
|
|
889
|
+
inputs_embeds = merge_multimodal_embeddings(
|
|
890
|
+
input_ids,
|
|
891
|
+
inputs_embeds,
|
|
892
|
+
multimodal_embeddings,
|
|
893
|
+
self.img_context_token_id,
|
|
894
|
+
)
|
|
895
|
+
return inputs_embeds
|
|
896
|
+
|
|
897
|
+
def forward(
|
|
898
|
+
self,
|
|
899
|
+
input_ids: torch.Tensor,
|
|
900
|
+
positions: torch.Tensor,
|
|
901
|
+
intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
902
|
+
inputs_embeds: Optional[torch.Tensor] = None,
|
|
903
|
+
**kwargs: object,
|
|
904
|
+
) -> IntermediateTensors:
|
|
905
|
+
|
|
906
|
+
if intermediate_tensors is not None:
|
|
907
|
+
input_ids = None
|
|
908
|
+
inputs_embeds = None
|
|
909
|
+
|
|
910
|
+
# NOTE: In v1, inputs_embeds is always generated at model runner, this
|
|
911
|
+
# condition is for v0 compatibility.
|
|
912
|
+
elif inputs_embeds is None:
|
|
913
|
+
vision_embeddings = self.get_multimodal_embeddings(**kwargs)
|
|
914
|
+
inputs_embeds = self.get_input_embeddings(input_ids,
|
|
915
|
+
vision_embeddings)
|
|
916
|
+
input_ids = None
|
|
917
|
+
|
|
918
|
+
forward_kwargs = {
|
|
919
|
+
"input_ids": input_ids,
|
|
920
|
+
"positions": positions,
|
|
921
|
+
"intermediate_tensors": intermediate_tensors,
|
|
922
|
+
"inputs_embeds": inputs_embeds,
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
# Only required if the model is mono-architecture
|
|
926
|
+
if self.visual_token_mask is not None:
|
|
927
|
+
forward_kwargs.update(
|
|
928
|
+
{"visual_token_mask": self.visual_token_mask})
|
|
929
|
+
self.visual_token_mask = None
|
|
930
|
+
|
|
931
|
+
hidden_states = self.language_model.model(**forward_kwargs)
|
|
932
|
+
return hidden_states
|
|
933
|
+
|
|
934
|
+
def compute_logits(
|
|
935
|
+
self,
|
|
936
|
+
hidden_states: torch.Tensor,
|
|
937
|
+
sampling_metadata: SamplingMetadata,
|
|
938
|
+
) -> Optional[torch.Tensor]:
|
|
939
|
+
return self.language_model.compute_logits(hidden_states,
|
|
940
|
+
sampling_metadata)
|
|
941
|
+
|
|
942
|
+
def load_weights(self, weights: Iterable[tuple[str,
|
|
943
|
+
torch.Tensor]]) -> set[str]:
|
|
944
|
+
skip_prefixes = [
|
|
945
|
+
"action_embed", "temporal_embed", "track_embed",
|
|
946
|
+
"track_embed_decoder", "box_token", "cg_criterion", "cg_model",
|
|
947
|
+
"loc_encoder", "loc_decoder", "sam", "temporal_token",
|
|
948
|
+
"track_token"
|
|
949
|
+
]
|
|
950
|
+
loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes)
|
|
951
|
+
return loader.load_weights(weights)
|