vllm-cpu-avx512bf16 0.14.0__cp313-cp313-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +225 -0
- vllm/_aiter_ops.py +1511 -0
- vllm/_bc_linter.py +54 -0
- vllm/_custom_ops.py +3206 -0
- vllm/_ipex_ops.py +445 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +43 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +62 -0
- vllm/assets/video.py +149 -0
- vllm/attention/__init__.py +0 -0
- vllm/attention/layer.py +913 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/attention/utils/kv_transfer_utils.py +60 -0
- vllm/beam_search.py +88 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +3277 -0
- vllm/benchmarks/latency.py +172 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +777 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +79 -0
- vllm/benchmarks/mm_processor.py +363 -0
- vllm/benchmarks/serve.py +1761 -0
- vllm/benchmarks/startup.py +321 -0
- vllm/benchmarks/sweep/__init__.py +0 -0
- vllm/benchmarks/sweep/cli.py +41 -0
- vllm/benchmarks/sweep/param_sweep.py +159 -0
- vllm/benchmarks/sweep/plot.py +675 -0
- vllm/benchmarks/sweep/plot_pareto.py +393 -0
- vllm/benchmarks/sweep/serve.py +450 -0
- vllm/benchmarks/sweep/serve_sla.py +459 -0
- vllm/benchmarks/sweep/server.py +114 -0
- vllm/benchmarks/sweep/sla_sweep.py +138 -0
- vllm/benchmarks/sweep/utils.py +4 -0
- vllm/benchmarks/throughput.py +946 -0
- vllm/collect_env.py +857 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +214 -0
- vllm/compilation/backends.py +840 -0
- vllm/compilation/base_static_graph.py +57 -0
- vllm/compilation/caching.py +196 -0
- vllm/compilation/collective_fusion.py +1224 -0
- vllm/compilation/compiler_interface.py +639 -0
- vllm/compilation/counter.py +50 -0
- vllm/compilation/cuda_graph.py +309 -0
- vllm/compilation/decorators.py +662 -0
- vllm/compilation/fix_functionalization.py +266 -0
- vllm/compilation/fusion.py +570 -0
- vllm/compilation/fusion_attn.py +363 -0
- vllm/compilation/fx_utils.py +92 -0
- vllm/compilation/inductor_pass.py +145 -0
- vllm/compilation/matcher_utils.py +454 -0
- vllm/compilation/monitor.py +62 -0
- vllm/compilation/noop_elimination.py +130 -0
- vllm/compilation/partition_rules.py +75 -0
- vllm/compilation/pass_manager.py +164 -0
- vllm/compilation/piecewise_backend.py +191 -0
- vllm/compilation/post_cleanup.py +21 -0
- vllm/compilation/qk_norm_rope_fusion.py +244 -0
- vllm/compilation/rocm_aiter_fusion.py +401 -0
- vllm/compilation/sequence_parallelism.py +368 -0
- vllm/compilation/torch25_custom_graph_pass.py +44 -0
- vllm/compilation/vllm_inductor_pass.py +180 -0
- vllm/compilation/wrapper.py +329 -0
- vllm/config/__init__.py +112 -0
- vllm/config/attention.py +114 -0
- vllm/config/cache.py +233 -0
- vllm/config/compilation.py +1149 -0
- vllm/config/device.py +75 -0
- vllm/config/ec_transfer.py +110 -0
- vllm/config/kv_events.py +56 -0
- vllm/config/kv_transfer.py +119 -0
- vllm/config/load.py +124 -0
- vllm/config/lora.py +102 -0
- vllm/config/model.py +2026 -0
- vllm/config/model_arch.py +57 -0
- vllm/config/multimodal.py +247 -0
- vllm/config/observability.py +157 -0
- vllm/config/parallel.py +703 -0
- vllm/config/pooler.py +188 -0
- vllm/config/profiler.py +199 -0
- vllm/config/scheduler.py +298 -0
- vllm/config/speculative.py +656 -0
- vllm/config/speech_to_text.py +39 -0
- vllm/config/structured_outputs.py +78 -0
- vllm/config/utils.py +374 -0
- vllm/config/vllm.py +1487 -0
- vllm/connections.py +189 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +301 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +43 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +509 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
- vllm/distributed/device_communicators/base_device_communicator.py +303 -0
- vllm/distributed/device_communicators/cpu_communicator.py +209 -0
- vllm/distributed/device_communicators/cuda_communicator.py +346 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +190 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
- vllm/distributed/device_communicators/pynccl.py +386 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +567 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
- vllm/distributed/device_communicators/ray_communicator.py +259 -0
- vllm/distributed/device_communicators/shm_broadcast.py +778 -0
- vllm/distributed/device_communicators/shm_object_storage.py +697 -0
- vllm/distributed/device_communicators/symm_mem.py +156 -0
- vllm/distributed/device_communicators/xpu_communicator.py +98 -0
- vllm/distributed/ec_transfer/__init__.py +14 -0
- vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
- vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
- vllm/distributed/ec_transfer/ec_connector/example_connector.py +201 -0
- vllm/distributed/ec_transfer/ec_connector/factory.py +85 -0
- vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
- vllm/distributed/eplb/__init__.py +3 -0
- vllm/distributed/eplb/async_worker.py +115 -0
- vllm/distributed/eplb/eplb_state.py +1192 -0
- vllm/distributed/eplb/policy/__init__.py +19 -0
- vllm/distributed/eplb/policy/abstract.py +43 -0
- vllm/distributed/eplb/policy/default.py +376 -0
- vllm/distributed/eplb/rebalance_execute.py +699 -0
- vllm/distributed/kv_events.py +505 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +20 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +203 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +459 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +607 -0
- vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
- vllm/distributed/kv_transfer/kv_connector/v1/example_connector.py +450 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +344 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +395 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +211 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1431 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +941 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +186 -0
- vllm/distributed/kv_transfer/kv_connector/v1/mooncake_connector.py +916 -0
- vllm/distributed/kv_transfer/kv_connector/v1/moriio/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_common.py +321 -0
- vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_connector.py +1515 -0
- vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py +609 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +477 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2688 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +557 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
- vllm/distributed/parallel_state.py +1809 -0
- vllm/distributed/utils.py +545 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +2137 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/protocol.py +194 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/anthropic/__init__.py +0 -0
- vllm/entrypoints/anthropic/protocol.py +162 -0
- vllm/entrypoints/anthropic/serving_messages.py +468 -0
- vllm/entrypoints/api_server.py +186 -0
- vllm/entrypoints/chat_utils.py +1912 -0
- vllm/entrypoints/cli/__init__.py +19 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +57 -0
- vllm/entrypoints/cli/benchmark/mm_processor.py +21 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/startup.py +21 -0
- vllm/entrypoints/cli/benchmark/sweep.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +38 -0
- vllm/entrypoints/cli/main.py +79 -0
- vllm/entrypoints/cli/openai.py +260 -0
- vllm/entrypoints/cli/run_batch.py +68 -0
- vllm/entrypoints/cli/serve.py +253 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +12 -0
- vllm/entrypoints/context.py +898 -0
- vllm/entrypoints/grpc_server.py +531 -0
- vllm/entrypoints/launcher.py +175 -0
- vllm/entrypoints/llm.py +1807 -0
- vllm/entrypoints/logger.py +86 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +1390 -0
- vllm/entrypoints/openai/cli_args.py +320 -0
- vllm/entrypoints/openai/orca_metrics.py +120 -0
- vllm/entrypoints/openai/parser/__init__.py +0 -0
- vllm/entrypoints/openai/parser/harmony_utils.py +820 -0
- vllm/entrypoints/openai/parser/responses_parser.py +176 -0
- vllm/entrypoints/openai/protocol.py +2566 -0
- vllm/entrypoints/openai/run_batch.py +635 -0
- vllm/entrypoints/openai/serving_chat.py +1897 -0
- vllm/entrypoints/openai/serving_chat_stream_harmony.py +101 -0
- vllm/entrypoints/openai/serving_completion.py +740 -0
- vllm/entrypoints/openai/serving_engine.py +1612 -0
- vllm/entrypoints/openai/serving_models.py +309 -0
- vllm/entrypoints/openai/serving_responses.py +2552 -0
- vllm/entrypoints/openai/serving_transcription.py +168 -0
- vllm/entrypoints/openai/speech_to_text.py +711 -0
- vllm/entrypoints/openai/utils.py +49 -0
- vllm/entrypoints/pooling/__init__.py +16 -0
- vllm/entrypoints/pooling/classify/__init__.py +0 -0
- vllm/entrypoints/pooling/classify/api_router.py +48 -0
- vllm/entrypoints/pooling/classify/protocol.py +181 -0
- vllm/entrypoints/pooling/classify/serving.py +233 -0
- vllm/entrypoints/pooling/embed/__init__.py +0 -0
- vllm/entrypoints/pooling/embed/api_router.py +65 -0
- vllm/entrypoints/pooling/embed/conftest.py +28 -0
- vllm/entrypoints/pooling/embed/protocol.py +217 -0
- vllm/entrypoints/pooling/embed/serving.py +684 -0
- vllm/entrypoints/pooling/pooling/__init__.py +0 -0
- vllm/entrypoints/pooling/pooling/api_router.py +62 -0
- vllm/entrypoints/pooling/pooling/protocol.py +146 -0
- vllm/entrypoints/pooling/pooling/serving.py +354 -0
- vllm/entrypoints/pooling/score/__init__.py +0 -0
- vllm/entrypoints/pooling/score/api_router.py +147 -0
- vllm/entrypoints/pooling/score/protocol.py +146 -0
- vllm/entrypoints/pooling/score/serving.py +511 -0
- vllm/entrypoints/renderer.py +411 -0
- vllm/entrypoints/responses_utils.py +218 -0
- vllm/entrypoints/sagemaker/__init__.py +4 -0
- vllm/entrypoints/sagemaker/routes.py +118 -0
- vllm/entrypoints/score_utils.py +271 -0
- vllm/entrypoints/serve/__init__.py +94 -0
- vllm/entrypoints/serve/cache/__init__.py +0 -0
- vllm/entrypoints/serve/cache/api_router.py +61 -0
- vllm/entrypoints/serve/disagg/__init__.py +0 -0
- vllm/entrypoints/serve/disagg/api_router.py +109 -0
- vllm/entrypoints/serve/disagg/protocol.py +90 -0
- vllm/entrypoints/serve/disagg/serving.py +285 -0
- vllm/entrypoints/serve/elastic_ep/__init__.py +0 -0
- vllm/entrypoints/serve/elastic_ep/api_router.py +96 -0
- vllm/entrypoints/serve/elastic_ep/middleware.py +49 -0
- vllm/entrypoints/serve/instrumentator/__init__.py +0 -0
- vllm/entrypoints/serve/instrumentator/health.py +33 -0
- vllm/entrypoints/serve/instrumentator/metrics.py +45 -0
- vllm/entrypoints/serve/instrumentator/offline_docs.py +50 -0
- vllm/entrypoints/serve/instrumentator/server_info.py +56 -0
- vllm/entrypoints/serve/instrumentator/static/swagger-ui-bundle.js +2 -0
- vllm/entrypoints/serve/instrumentator/static/swagger-ui.css +3 -0
- vllm/entrypoints/serve/lora/__init__.py +0 -0
- vllm/entrypoints/serve/lora/api_router.py +70 -0
- vllm/entrypoints/serve/profile/__init__.py +0 -0
- vllm/entrypoints/serve/profile/api_router.py +46 -0
- vllm/entrypoints/serve/rlhf/__init__.py +0 -0
- vllm/entrypoints/serve/rlhf/api_router.py +102 -0
- vllm/entrypoints/serve/rpc/__init__.py +0 -0
- vllm/entrypoints/serve/rpc/api_router.py +61 -0
- vllm/entrypoints/serve/sleep/__init__.py +0 -0
- vllm/entrypoints/serve/sleep/api_router.py +56 -0
- vllm/entrypoints/serve/tokenize/__init__.py +0 -0
- vllm/entrypoints/serve/tokenize/api_router.py +112 -0
- vllm/entrypoints/serve/tokenize/serving.py +204 -0
- vllm/entrypoints/ssl.py +78 -0
- vllm/entrypoints/tool.py +187 -0
- vllm/entrypoints/tool_server.py +234 -0
- vllm/entrypoints/utils.py +336 -0
- vllm/env_override.py +402 -0
- vllm/envs.py +1791 -0
- vllm/exceptions.py +36 -0
- vllm/forward_context.py +375 -0
- vllm/grpc/__init__.py +17 -0
- vllm/grpc/compile_protos.py +94 -0
- vllm/grpc/vllm_engine.proto +195 -0
- vllm/grpc/vllm_engine_pb2.py +77 -0
- vllm/grpc/vllm_engine_pb2.pyi +213 -0
- vllm/grpc/vllm_engine_pb2_grpc.py +330 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +359 -0
- vllm/inputs/parse.py +147 -0
- vllm/inputs/preprocess.py +716 -0
- vllm/logger.py +303 -0
- vllm/logging_utils/__init__.py +13 -0
- vllm/logging_utils/dump_input.py +83 -0
- vllm/logging_utils/formatter.py +127 -0
- vllm/logging_utils/lazy.py +20 -0
- vllm/logging_utils/log_time.py +34 -0
- vllm/logits_process.py +121 -0
- vllm/logprobs.py +206 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +43 -0
- vllm/lora/layers/base.py +66 -0
- vllm/lora/layers/base_linear.py +172 -0
- vllm/lora/layers/column_parallel_linear.py +577 -0
- vllm/lora/layers/fused_moe.py +739 -0
- vllm/lora/layers/logits_processor.py +203 -0
- vllm/lora/layers/replicated_linear.py +70 -0
- vllm/lora/layers/row_parallel_linear.py +176 -0
- vllm/lora/layers/utils.py +115 -0
- vllm/lora/layers/vocal_parallel_embedding.py +140 -0
- vllm/lora/lora_model.py +221 -0
- vllm/lora/lora_weights.py +227 -0
- vllm/lora/model_manager.py +858 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +6 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
- vllm/lora/ops/torch_ops/__init__.py +20 -0
- vllm/lora/ops/torch_ops/lora_ops.py +128 -0
- vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
- vllm/lora/ops/triton_ops/__init__.py +21 -0
- vllm/lora/ops/triton_ops/fused_moe_lora_op.py +677 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
- vllm/lora/ops/triton_ops/utils.py +313 -0
- vllm/lora/peft_helper.py +128 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +493 -0
- vllm/lora/punica_wrapper/punica_cpu.py +351 -0
- vllm/lora/punica_wrapper/punica_gpu.py +413 -0
- vllm/lora/punica_wrapper/punica_selector.py +21 -0
- vllm/lora/punica_wrapper/punica_xpu.py +276 -0
- vllm/lora/punica_wrapper/utils.py +150 -0
- vllm/lora/request.py +60 -0
- vllm/lora/resolver.py +88 -0
- vllm/lora/utils.py +281 -0
- vllm/lora/worker_manager.py +278 -0
- vllm/model_executor/__init__.py +9 -0
- vllm/model_executor/custom_op.py +203 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +628 -0
- vllm/model_executor/layers/attention/__init__.py +0 -0
- vllm/model_executor/layers/attention/chunked_local_attention.py +130 -0
- vllm/model_executor/layers/attention/cross_attention.py +182 -0
- vllm/model_executor/layers/attention/encoder_only_attention.py +103 -0
- vllm/model_executor/layers/attention/mm_encoder_attention.py +234 -0
- vllm/model_executor/layers/attention/static_sink_attention.py +254 -0
- vllm/model_executor/layers/attention_layer_base.py +34 -0
- vllm/model_executor/layers/batch_invariant.py +1063 -0
- vllm/model_executor/layers/conv.py +262 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +240 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
- vllm/model_executor/layers/fla/ops/index.py +41 -0
- vllm/model_executor/layers/fla/ops/kda.py +1351 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
- vllm/model_executor/layers/fla/ops/op.py +60 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
- vllm/model_executor/layers/fla/ops/utils.py +194 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
- vllm/model_executor/layers/fused_moe/__init__.py +120 -0
- vllm/model_executor/layers/fused_moe/all2all_utils.py +173 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +411 -0
- vllm/model_executor/layers/fused_moe/config.py +1111 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=129,N=704,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Workstation_Edition,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=768,device_name=NVIDIA_B300_SXM6_AC,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=1536,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Server_Edition,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +444 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +1086 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +364 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +427 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +436 -0
- vllm/model_executor/layers/fused_moe/fallback.py +127 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py +338 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +310 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +371 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1018 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +824 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2638 -0
- vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +119 -0
- vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +117 -0
- vllm/model_executor/layers/fused_moe/fused_moe_router.py +40 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +531 -0
- vllm/model_executor/layers/fused_moe/layer.py +2169 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1251 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +192 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/oracle/__init__.py +2 -0
- vllm/model_executor/layers/fused_moe/oracle/fp8.py +358 -0
- vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +280 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +87 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +347 -0
- vllm/model_executor/layers/fused_moe/routed_experts_capturer.py +324 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
- vllm/model_executor/layers/fused_moe/shared_fused_moe.py +96 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
- vllm/model_executor/layers/fused_moe/triton_cutlass_moe.py +78 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +75 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +144 -0
- vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +403 -0
- vllm/model_executor/layers/fused_moe/utils.py +382 -0
- vllm/model_executor/layers/fused_moe/zero_expert_fused_moe.py +189 -0
- vllm/model_executor/layers/kda.py +442 -0
- vllm/model_executor/layers/layernorm.py +451 -0
- vllm/model_executor/layers/lightning_attn.py +735 -0
- vllm/model_executor/layers/linear.py +1478 -0
- vllm/model_executor/layers/logits_processor.py +109 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +68 -0
- vllm/model_executor/layers/mamba/linear_attn.py +410 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +541 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +936 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +586 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
- vllm/model_executor/layers/mamba/short_conv.py +254 -0
- vllm/model_executor/layers/mla.py +179 -0
- vllm/model_executor/layers/pooler/__init__.py +5 -0
- vllm/model_executor/layers/pooler/abstract.py +39 -0
- vllm/model_executor/layers/pooler/activations.py +162 -0
- vllm/model_executor/layers/pooler/common.py +32 -0
- vllm/model_executor/layers/pooler/seqwise/__init__.py +45 -0
- vllm/model_executor/layers/pooler/seqwise/heads.py +151 -0
- vllm/model_executor/layers/pooler/seqwise/methods.py +93 -0
- vllm/model_executor/layers/pooler/seqwise/poolers.py +127 -0
- vllm/model_executor/layers/pooler/special.py +128 -0
- vllm/model_executor/layers/pooler/tokwise/__init__.py +39 -0
- vllm/model_executor/layers/pooler/tokwise/heads.py +133 -0
- vllm/model_executor/layers/pooler/tokwise/methods.py +122 -0
- vllm/model_executor/layers/pooler/tokwise/poolers.py +127 -0
- vllm/model_executor/layers/quantization/__init__.py +195 -0
- vllm/model_executor/layers/quantization/auto_round.py +454 -0
- vllm/model_executor/layers/quantization/awq.py +277 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +795 -0
- vllm/model_executor/layers/quantization/awq_triton.py +337 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +502 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +631 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +982 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2368 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +37 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_mxfp4.py +106 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +203 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +230 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/cpu_wna16.py +299 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
- vllm/model_executor/layers/quantization/experts_int8.py +209 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
- vllm/model_executor/layers/quantization/fp8.py +1224 -0
- vllm/model_executor/layers/quantization/fp_quant.py +420 -0
- vllm/model_executor/layers/quantization/gguf.py +682 -0
- vllm/model_executor/layers/quantization/gptq.py +393 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +934 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +372 -0
- vllm/model_executor/layers/quantization/inc.py +65 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +212 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +403 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +113 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py +126 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +130 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +168 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +200 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py +97 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +76 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +77 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +128 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +220 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +147 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +88 -0
- vllm/model_executor/layers/quantization/kv_cache.py +153 -0
- vllm/model_executor/layers/quantization/modelopt.py +1665 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +518 -0
- vllm/model_executor/layers/quantization/mxfp4.py +1145 -0
- vllm/model_executor/layers/quantization/petit.py +319 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +140 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +570 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +797 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +343 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
- vllm/model_executor/layers/quantization/rtn.py +626 -0
- vllm/model_executor/layers/quantization/schema.py +90 -0
- vllm/model_executor/layers/quantization/torchao.py +380 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
- vllm/model_executor/layers/quantization/utils/configs/N=10240,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=25600,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=5120,K=8192,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=51200,K=5120,device_name=NVIDIA_L40S,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +514 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +370 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +1658 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +477 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +720 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +565 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +378 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +219 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +189 -0
- vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +67 -0
- vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +767 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +519 -0
- vllm/model_executor/layers/resampler.py +283 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +291 -0
- vllm/model_executor/layers/rotary_embedding/base.py +282 -0
- vllm/model_executor/layers/rotary_embedding/common.py +289 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +184 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +218 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +82 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +83 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +412 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
- vllm/model_executor/layers/rotary_embedding/xdrope.py +160 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +84 -0
- vllm/model_executor/layers/utils.py +251 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +564 -0
- vllm/model_executor/model_loader/__init__.py +150 -0
- vllm/model_executor/model_loader/base_loader.py +71 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +821 -0
- vllm/model_executor/model_loader/default_loader.py +304 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +371 -0
- vllm/model_executor/model_loader/online_quantization.py +275 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +115 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +214 -0
- vllm/model_executor/model_loader/tensorizer.py +793 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
- vllm/model_executor/model_loader/utils.py +299 -0
- vllm/model_executor/model_loader/weight_utils.py +1183 -0
- vllm/model_executor/models/__init__.py +44 -0
- vllm/model_executor/models/adapters.py +592 -0
- vllm/model_executor/models/afmoe.py +697 -0
- vllm/model_executor/models/aimv2.py +248 -0
- vllm/model_executor/models/apertus.py +567 -0
- vllm/model_executor/models/arcee.py +428 -0
- vllm/model_executor/models/arctic.py +633 -0
- vllm/model_executor/models/aria.py +663 -0
- vllm/model_executor/models/audioflamingo3.py +639 -0
- vllm/model_executor/models/aya_vision.py +448 -0
- vllm/model_executor/models/bagel.py +591 -0
- vllm/model_executor/models/baichuan.py +493 -0
- vllm/model_executor/models/bailing_moe.py +643 -0
- vllm/model_executor/models/bamba.py +511 -0
- vllm/model_executor/models/bee.py +157 -0
- vllm/model_executor/models/bert.py +911 -0
- vllm/model_executor/models/bert_with_rope.py +729 -0
- vllm/model_executor/models/blip.py +350 -0
- vllm/model_executor/models/blip2.py +736 -0
- vllm/model_executor/models/bloom.py +390 -0
- vllm/model_executor/models/chameleon.py +1095 -0
- vllm/model_executor/models/chatglm.py +502 -0
- vllm/model_executor/models/clip.py +1045 -0
- vllm/model_executor/models/cohere2_vision.py +470 -0
- vllm/model_executor/models/commandr.py +469 -0
- vllm/model_executor/models/config.py +571 -0
- vllm/model_executor/models/dbrx.py +484 -0
- vllm/model_executor/models/deepencoder.py +679 -0
- vllm/model_executor/models/deepseek_eagle.py +253 -0
- vllm/model_executor/models/deepseek_mtp.py +447 -0
- vllm/model_executor/models/deepseek_ocr.py +601 -0
- vllm/model_executor/models/deepseek_v2.py +1727 -0
- vllm/model_executor/models/deepseek_vl2.py +642 -0
- vllm/model_executor/models/dots1.py +566 -0
- vllm/model_executor/models/dots_ocr.py +830 -0
- vllm/model_executor/models/ernie45.py +53 -0
- vllm/model_executor/models/ernie45_moe.py +755 -0
- vllm/model_executor/models/ernie45_vl.py +1702 -0
- vllm/model_executor/models/ernie45_vl_moe.py +801 -0
- vllm/model_executor/models/ernie_mtp.py +278 -0
- vllm/model_executor/models/exaone.py +524 -0
- vllm/model_executor/models/exaone4.py +518 -0
- vllm/model_executor/models/exaone_moe.py +579 -0
- vllm/model_executor/models/exaone_moe_mtp.py +255 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +543 -0
- vllm/model_executor/models/falcon_h1.py +675 -0
- vllm/model_executor/models/flex_olmo.py +155 -0
- vllm/model_executor/models/fuyu.py +371 -0
- vllm/model_executor/models/gemma.py +425 -0
- vllm/model_executor/models/gemma2.py +435 -0
- vllm/model_executor/models/gemma3.py +520 -0
- vllm/model_executor/models/gemma3_mm.py +664 -0
- vllm/model_executor/models/gemma3n.py +1166 -0
- vllm/model_executor/models/gemma3n_audio_utils.py +57 -0
- vllm/model_executor/models/gemma3n_mm.py +820 -0
- vllm/model_executor/models/glm.py +24 -0
- vllm/model_executor/models/glm4.py +295 -0
- vllm/model_executor/models/glm4_1v.py +1823 -0
- vllm/model_executor/models/glm4_moe.py +725 -0
- vllm/model_executor/models/glm4_moe_mtp.py +365 -0
- vllm/model_executor/models/glm4v.py +783 -0
- vllm/model_executor/models/glmasr.py +1154 -0
- vllm/model_executor/models/glmasr_utils.py +188 -0
- vllm/model_executor/models/gpt2.py +385 -0
- vllm/model_executor/models/gpt_bigcode.py +339 -0
- vllm/model_executor/models/gpt_j.py +346 -0
- vllm/model_executor/models/gpt_neox.py +340 -0
- vllm/model_executor/models/gpt_oss.py +745 -0
- vllm/model_executor/models/granite.py +475 -0
- vllm/model_executor/models/granite_speech.py +919 -0
- vllm/model_executor/models/granitemoe.py +561 -0
- vllm/model_executor/models/granitemoehybrid.py +703 -0
- vllm/model_executor/models/granitemoeshared.py +328 -0
- vllm/model_executor/models/gritlm.py +242 -0
- vllm/model_executor/models/grok1.py +803 -0
- vllm/model_executor/models/h2ovl.py +554 -0
- vllm/model_executor/models/hunyuan_v1.py +1042 -0
- vllm/model_executor/models/hunyuan_vision.py +1034 -0
- vllm/model_executor/models/hyperclovax_vision.py +1163 -0
- vllm/model_executor/models/idefics2_vision_model.py +427 -0
- vllm/model_executor/models/idefics3.py +734 -0
- vllm/model_executor/models/interfaces.py +1180 -0
- vllm/model_executor/models/interfaces_base.py +252 -0
- vllm/model_executor/models/intern_vit.py +454 -0
- vllm/model_executor/models/internlm2.py +451 -0
- vllm/model_executor/models/internlm2_ve.py +139 -0
- vllm/model_executor/models/interns1.py +828 -0
- vllm/model_executor/models/interns1_vit.py +433 -0
- vllm/model_executor/models/internvl.py +1436 -0
- vllm/model_executor/models/iquest_loopcoder.py +595 -0
- vllm/model_executor/models/isaac.py +1503 -0
- vllm/model_executor/models/jais.py +397 -0
- vllm/model_executor/models/jais2.py +508 -0
- vllm/model_executor/models/jamba.py +599 -0
- vllm/model_executor/models/jina_vl.py +145 -0
- vllm/model_executor/models/kanana_v.py +756 -0
- vllm/model_executor/models/keye.py +1709 -0
- vllm/model_executor/models/keye_vl1_5.py +726 -0
- vllm/model_executor/models/kimi_linear.py +659 -0
- vllm/model_executor/models/kimi_vl.py +577 -0
- vllm/model_executor/models/lfm2.py +515 -0
- vllm/model_executor/models/lfm2_moe.py +746 -0
- vllm/model_executor/models/lfm2_vl.py +732 -0
- vllm/model_executor/models/lightonocr.py +197 -0
- vllm/model_executor/models/llama.py +724 -0
- vllm/model_executor/models/llama4.py +860 -0
- vllm/model_executor/models/llama4_eagle.py +225 -0
- vllm/model_executor/models/llama_eagle.py +213 -0
- vllm/model_executor/models/llama_eagle3.py +375 -0
- vllm/model_executor/models/llava.py +879 -0
- vllm/model_executor/models/llava_next.py +583 -0
- vllm/model_executor/models/llava_next_video.py +467 -0
- vllm/model_executor/models/llava_onevision.py +922 -0
- vllm/model_executor/models/longcat_flash.py +767 -0
- vllm/model_executor/models/longcat_flash_mtp.py +348 -0
- vllm/model_executor/models/mamba.py +276 -0
- vllm/model_executor/models/mamba2.py +288 -0
- vllm/model_executor/models/medusa.py +179 -0
- vllm/model_executor/models/midashenglm.py +826 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +294 -0
- vllm/model_executor/models/mimo_v2_flash.py +718 -0
- vllm/model_executor/models/minicpm.py +660 -0
- vllm/model_executor/models/minicpm3.py +233 -0
- vllm/model_executor/models/minicpm_eagle.py +386 -0
- vllm/model_executor/models/minicpmo.py +768 -0
- vllm/model_executor/models/minicpmv.py +1742 -0
- vllm/model_executor/models/minimax_m2.py +552 -0
- vllm/model_executor/models/minimax_text_01.py +1008 -0
- vllm/model_executor/models/minimax_vl_01.py +395 -0
- vllm/model_executor/models/mistral3.py +638 -0
- vllm/model_executor/models/mistral_large_3.py +63 -0
- vllm/model_executor/models/mistral_large_3_eagle.py +137 -0
- vllm/model_executor/models/mixtral.py +599 -0
- vllm/model_executor/models/mllama4.py +1170 -0
- vllm/model_executor/models/mlp_speculator.py +235 -0
- vllm/model_executor/models/modernbert.py +458 -0
- vllm/model_executor/models/module_mapping.py +74 -0
- vllm/model_executor/models/molmo.py +1592 -0
- vllm/model_executor/models/moonvit.py +601 -0
- vllm/model_executor/models/mpt.py +335 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1725 -0
- vllm/model_executor/models/nemotron.py +499 -0
- vllm/model_executor/models/nemotron_h.py +902 -0
- vllm/model_executor/models/nemotron_nas.py +474 -0
- vllm/model_executor/models/nemotron_parse.py +958 -0
- vllm/model_executor/models/nemotron_vl.py +651 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +412 -0
- vllm/model_executor/models/olmo2.py +454 -0
- vllm/model_executor/models/olmoe.py +498 -0
- vllm/model_executor/models/opencua.py +262 -0
- vllm/model_executor/models/openpangu.py +1378 -0
- vllm/model_executor/models/openpangu_mtp.py +265 -0
- vllm/model_executor/models/opt.py +426 -0
- vllm/model_executor/models/orion.py +365 -0
- vllm/model_executor/models/ouro.py +507 -0
- vllm/model_executor/models/ovis.py +557 -0
- vllm/model_executor/models/ovis2_5.py +661 -0
- vllm/model_executor/models/paddleocr_vl.py +1261 -0
- vllm/model_executor/models/paligemma.py +429 -0
- vllm/model_executor/models/persimmon.py +373 -0
- vllm/model_executor/models/phi.py +363 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3v.py +729 -0
- vllm/model_executor/models/phi4mm.py +1250 -0
- vllm/model_executor/models/phi4mm_audio.py +1296 -0
- vllm/model_executor/models/phi4mm_utils.py +1907 -0
- vllm/model_executor/models/phimoe.py +671 -0
- vllm/model_executor/models/pixtral.py +1437 -0
- vllm/model_executor/models/plamo2.py +993 -0
- vllm/model_executor/models/plamo3.py +437 -0
- vllm/model_executor/models/qwen.py +377 -0
- vllm/model_executor/models/qwen2.py +600 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +1200 -0
- vllm/model_executor/models/qwen2_5_vl.py +1598 -0
- vllm/model_executor/models/qwen2_audio.py +478 -0
- vllm/model_executor/models/qwen2_moe.py +604 -0
- vllm/model_executor/models/qwen2_rm.py +120 -0
- vllm/model_executor/models/qwen2_vl.py +1588 -0
- vllm/model_executor/models/qwen3.py +331 -0
- vllm/model_executor/models/qwen3_moe.py +752 -0
- vllm/model_executor/models/qwen3_next.py +1410 -0
- vllm/model_executor/models/qwen3_next_mtp.py +293 -0
- vllm/model_executor/models/qwen3_omni_moe_thinker.py +1814 -0
- vllm/model_executor/models/qwen3_vl.py +2120 -0
- vllm/model_executor/models/qwen3_vl_moe.py +474 -0
- vllm/model_executor/models/qwen_vl.py +821 -0
- vllm/model_executor/models/radio.py +573 -0
- vllm/model_executor/models/registry.py +1218 -0
- vllm/model_executor/models/roberta.py +239 -0
- vllm/model_executor/models/rvl.py +107 -0
- vllm/model_executor/models/seed_oss.py +492 -0
- vllm/model_executor/models/siglip.py +1259 -0
- vllm/model_executor/models/siglip2.py +495 -0
- vllm/model_executor/models/siglip2navit.py +660 -0
- vllm/model_executor/models/skyworkr1v.py +951 -0
- vllm/model_executor/models/smolvlm.py +38 -0
- vllm/model_executor/models/solar.py +484 -0
- vllm/model_executor/models/stablelm.py +354 -0
- vllm/model_executor/models/starcoder2.py +365 -0
- vllm/model_executor/models/step3_text.py +554 -0
- vllm/model_executor/models/step3_vl.py +1147 -0
- vllm/model_executor/models/swin.py +500 -0
- vllm/model_executor/models/tarsier.py +624 -0
- vllm/model_executor/models/telechat2.py +153 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/terratorch.py +318 -0
- vllm/model_executor/models/transformers/__init__.py +127 -0
- vllm/model_executor/models/transformers/base.py +523 -0
- vllm/model_executor/models/transformers/causal.py +65 -0
- vllm/model_executor/models/transformers/legacy.py +90 -0
- vllm/model_executor/models/transformers/moe.py +329 -0
- vllm/model_executor/models/transformers/multimodal.py +441 -0
- vllm/model_executor/models/transformers/pooling.py +102 -0
- vllm/model_executor/models/transformers/utils.py +253 -0
- vllm/model_executor/models/ultravox.py +786 -0
- vllm/model_executor/models/utils.py +832 -0
- vllm/model_executor/models/vision.py +546 -0
- vllm/model_executor/models/voxtral.py +867 -0
- vllm/model_executor/models/voxtral_streaming.py +304 -0
- vllm/model_executor/models/whisper.py +993 -0
- vllm/model_executor/models/whisper_utils.py +299 -0
- vllm/model_executor/models/zamba2.py +986 -0
- vllm/model_executor/parameter.py +642 -0
- vllm/model_executor/utils.py +113 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +371 -0
- vllm/model_executor/warmup/kernel_warmup.py +97 -0
- vllm/model_inspection.py +136 -0
- vllm/multimodal/__init__.py +38 -0
- vllm/multimodal/audio.py +287 -0
- vllm/multimodal/base.py +60 -0
- vllm/multimodal/cache.py +829 -0
- vllm/multimodal/evs.py +294 -0
- vllm/multimodal/hasher.py +123 -0
- vllm/multimodal/image.py +155 -0
- vllm/multimodal/inputs.py +1027 -0
- vllm/multimodal/parse.py +674 -0
- vllm/multimodal/processing.py +2469 -0
- vllm/multimodal/profiling.py +351 -0
- vllm/multimodal/registry.py +375 -0
- vllm/multimodal/utils.py +550 -0
- vllm/multimodal/video.py +512 -0
- vllm/outputs.py +347 -0
- vllm/platforms/__init__.py +277 -0
- vllm/platforms/cpu.py +423 -0
- vllm/platforms/cuda.py +618 -0
- vllm/platforms/interface.py +707 -0
- vllm/platforms/rocm.py +586 -0
- vllm/platforms/tpu.py +20 -0
- vllm/platforms/xpu.py +262 -0
- vllm/plugins/__init__.py +81 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +77 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
- vllm/pooling_params.py +229 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/layerwise_profile.py +392 -0
- vllm/profiler/utils.py +151 -0
- vllm/profiler/wrapper.py +241 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +30 -0
- vllm/ray/ray_env.py +79 -0
- vllm/reasoning/__init__.py +96 -0
- vllm/reasoning/abs_reasoning_parsers.py +318 -0
- vllm/reasoning/basic_parsers.py +175 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/deepseek_v3_reasoning_parser.py +69 -0
- vllm/reasoning/ernie45_reasoning_parser.py +165 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +13 -0
- vllm/reasoning/gptoss_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/holo2_reasoning_parser.py +89 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
- vllm/reasoning/identity_reasoning_parser.py +63 -0
- vllm/reasoning/minimax_m2_reasoning_parser.py +110 -0
- vllm/reasoning/mistral_reasoning_parser.py +154 -0
- vllm/reasoning/olmo3_reasoning_parser.py +302 -0
- vllm/reasoning/qwen3_reasoning_parser.py +67 -0
- vllm/reasoning/seedoss_reasoning_parser.py +27 -0
- vllm/reasoning/step3_reasoning_parser.py +113 -0
- vllm/sampling_params.py +629 -0
- vllm/scalar_type.py +355 -0
- vllm/scripts.py +17 -0
- vllm/sequence.py +64 -0
- vllm/tasks.py +13 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tokenizers/__init__.py +18 -0
- vllm/tokenizers/deepseek_v32.py +187 -0
- vllm/tokenizers/deepseek_v32_encoding.py +463 -0
- vllm/tokenizers/detokenizer_utils.py +198 -0
- vllm/tokenizers/grok2.py +443 -0
- vllm/tokenizers/hf.py +119 -0
- vllm/tokenizers/mistral.py +543 -0
- vllm/tokenizers/protocol.py +123 -0
- vllm/tokenizers/registry.py +238 -0
- vllm/tool_parsers/__init__.py +158 -0
- vllm/tool_parsers/abstract_tool_parser.py +274 -0
- vllm/tool_parsers/deepseekv31_tool_parser.py +388 -0
- vllm/tool_parsers/deepseekv32_tool_parser.py +591 -0
- vllm/tool_parsers/deepseekv3_tool_parser.py +390 -0
- vllm/tool_parsers/ernie45_tool_parser.py +210 -0
- vllm/tool_parsers/functiongemma_tool_parser.py +321 -0
- vllm/tool_parsers/gigachat3_tool_parser.py +190 -0
- vllm/tool_parsers/glm47_moe_tool_parser.py +23 -0
- vllm/tool_parsers/glm4_moe_tool_parser.py +215 -0
- vllm/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
- vllm/tool_parsers/granite_tool_parser.py +253 -0
- vllm/tool_parsers/hermes_tool_parser.py +495 -0
- vllm/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
- vllm/tool_parsers/internlm2_tool_parser.py +227 -0
- vllm/tool_parsers/jamba_tool_parser.py +323 -0
- vllm/tool_parsers/kimi_k2_tool_parser.py +598 -0
- vllm/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
- vllm/tool_parsers/llama_tool_parser.py +324 -0
- vllm/tool_parsers/longcat_tool_parser.py +37 -0
- vllm/tool_parsers/minimax_m2_tool_parser.py +776 -0
- vllm/tool_parsers/minimax_tool_parser.py +849 -0
- vllm/tool_parsers/mistral_tool_parser.py +612 -0
- vllm/tool_parsers/olmo3_tool_parser.py +366 -0
- vllm/tool_parsers/openai_tool_parser.py +111 -0
- vllm/tool_parsers/phi4mini_tool_parser.py +120 -0
- vllm/tool_parsers/pythonic_tool_parser.py +332 -0
- vllm/tool_parsers/qwen3coder_tool_parser.py +781 -0
- vllm/tool_parsers/qwen3xml_tool_parser.py +1316 -0
- vllm/tool_parsers/seed_oss_tool_parser.py +744 -0
- vllm/tool_parsers/step3_tool_parser.py +303 -0
- vllm/tool_parsers/utils.py +229 -0
- vllm/tool_parsers/xlam_tool_parser.py +556 -0
- vllm/tracing.py +135 -0
- vllm/transformers_utils/__init__.py +26 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +73 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1169 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +106 -0
- vllm/transformers_utils/configs/afmoe.py +87 -0
- vllm/transformers_utils/configs/arctic.py +216 -0
- vllm/transformers_utils/configs/bagel.py +53 -0
- vllm/transformers_utils/configs/chatglm.py +75 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
- vllm/transformers_utils/configs/dotsocr.py +71 -0
- vllm/transformers_utils/configs/eagle.py +90 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/flex_olmo.py +82 -0
- vllm/transformers_utils/configs/hunyuan_vl.py +322 -0
- vllm/transformers_utils/configs/isaac.py +100 -0
- vllm/transformers_utils/configs/jais.py +243 -0
- vllm/transformers_utils/configs/kimi_linear.py +148 -0
- vllm/transformers_utils/configs/kimi_vl.py +38 -0
- vllm/transformers_utils/configs/lfm2_moe.py +163 -0
- vllm/transformers_utils/configs/medusa.py +65 -0
- vllm/transformers_utils/configs/midashenglm.py +103 -0
- vllm/transformers_utils/configs/mistral.py +263 -0
- vllm/transformers_utils/configs/mlp_speculator.py +69 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +220 -0
- vllm/transformers_utils/configs/nemotron_h.py +284 -0
- vllm/transformers_utils/configs/olmo3.py +83 -0
- vllm/transformers_utils/configs/ovis.py +182 -0
- vllm/transformers_utils/configs/qwen3_next.py +277 -0
- vllm/transformers_utils/configs/radio.py +98 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +38 -0
- vllm/transformers_utils/configs/speculators/base.py +114 -0
- vllm/transformers_utils/configs/step3_vl.py +178 -0
- vllm/transformers_utils/configs/tarsier2.py +24 -0
- vllm/transformers_utils/configs/ultravox.py +120 -0
- vllm/transformers_utils/dynamic_module.py +70 -0
- vllm/transformers_utils/gguf_utils.py +280 -0
- vllm/transformers_utils/model_arch_config_convertor.py +402 -0
- vllm/transformers_utils/processor.py +424 -0
- vllm/transformers_utils/processors/__init__.py +25 -0
- vllm/transformers_utils/processors/bagel.py +78 -0
- vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
- vllm/transformers_utils/processors/hunyuan_vl.py +233 -0
- vllm/transformers_utils/processors/hunyuan_vl_image.py +477 -0
- vllm/transformers_utils/processors/ovis.py +453 -0
- vllm/transformers_utils/processors/ovis2_5.py +468 -0
- vllm/transformers_utils/repo_utils.py +287 -0
- vllm/transformers_utils/runai_utils.py +102 -0
- vllm/transformers_utils/s3_utils.py +95 -0
- vllm/transformers_utils/tokenizer.py +19 -0
- vllm/transformers_utils/utils.py +112 -0
- vllm/triton_utils/__init__.py +20 -0
- vllm/triton_utils/importing.py +103 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +278 -0
- vllm/utils/__init__.py +36 -0
- vllm/utils/argparse_utils.py +491 -0
- vllm/utils/async_utils.py +310 -0
- vllm/utils/cache.py +214 -0
- vllm/utils/collection_utils.py +112 -0
- vllm/utils/counter.py +45 -0
- vllm/utils/deep_gemm.py +424 -0
- vllm/utils/flashinfer.py +602 -0
- vllm/utils/func_utils.py +236 -0
- vllm/utils/gc_utils.py +151 -0
- vllm/utils/hashing.py +117 -0
- vllm/utils/import_utils.py +438 -0
- vllm/utils/jsontree.py +158 -0
- vllm/utils/math_utils.py +32 -0
- vllm/utils/mem_constants.py +13 -0
- vllm/utils/mem_utils.py +285 -0
- vllm/utils/nccl.py +64 -0
- vllm/utils/network_utils.py +331 -0
- vllm/utils/nvtx_pytorch_hooks.py +286 -0
- vllm/utils/platform_utils.py +59 -0
- vllm/utils/profiling.py +56 -0
- vllm/utils/registry.py +51 -0
- vllm/utils/serial_utils.py +214 -0
- vllm/utils/system_utils.py +296 -0
- vllm/utils/tensor_schema.py +255 -0
- vllm/utils/torch_utils.py +781 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backend.py +736 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +501 -0
- vllm/v1/attention/backends/fa_utils.py +126 -0
- vllm/v1/attention/backends/flash_attn.py +1092 -0
- vllm/v1/attention/backends/flash_attn_diffkv.py +277 -0
- vllm/v1/attention/backends/flashinfer.py +1713 -0
- vllm/v1/attention/backends/flex_attention.py +1024 -0
- vllm/v1/attention/backends/gdn_attn.py +382 -0
- vllm/v1/attention/backends/linear_attn.py +77 -0
- vllm/v1/attention/backends/mamba1_attn.py +28 -0
- vllm/v1/attention/backends/mamba2_attn.py +256 -0
- vllm/v1/attention/backends/mamba_attn.py +313 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/aiter_triton_mla.py +66 -0
- vllm/v1/attention/backends/mla/common.py +2156 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +278 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +348 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +175 -0
- vllm/v1/attention/backends/mla/flashmla.py +321 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +1021 -0
- vllm/v1/attention/backends/mla/indexer.py +345 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +284 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py +321 -0
- vllm/v1/attention/backends/mla/triton_mla.py +171 -0
- vllm/v1/attention/backends/registry.py +258 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +1000 -0
- vllm/v1/attention/backends/rocm_aiter_unified_attn.py +206 -0
- vllm/v1/attention/backends/rocm_attn.py +405 -0
- vllm/v1/attention/backends/short_conv_attn.py +26 -0
- vllm/v1/attention/backends/tree_attn.py +430 -0
- vllm/v1/attention/backends/triton_attn.py +578 -0
- vllm/v1/attention/backends/utils.py +978 -0
- vllm/v1/attention/ops/__init__.py +0 -0
- vllm/v1/attention/ops/chunked_prefill_paged_decode.py +459 -0
- vllm/v1/attention/ops/common.py +469 -0
- vllm/v1/attention/ops/flashmla.py +254 -0
- vllm/v1/attention/ops/merge_attn_states.py +47 -0
- vllm/v1/attention/ops/paged_attn.py +51 -0
- vllm/v1/attention/ops/pallas_kv_cache_update.py +130 -0
- vllm/v1/attention/ops/prefix_prefill.py +862 -0
- vllm/v1/attention/ops/rocm_aiter_mla_sparse.py +210 -0
- vllm/v1/attention/ops/triton_decode_attention.py +709 -0
- vllm/v1/attention/ops/triton_merge_attn_states.py +116 -0
- vllm/v1/attention/ops/triton_prefill_attention.py +272 -0
- vllm/v1/attention/ops/triton_reshape_and_cache_flash.py +395 -0
- vllm/v1/attention/ops/triton_unified_attention.py +1088 -0
- vllm/v1/attention/ops/vit_attn_wrappers.py +185 -0
- vllm/v1/attention/selector.py +145 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +489 -0
- vllm/v1/core/encoder_cache_manager.py +402 -0
- vllm/v1/core/kv_cache_coordinator.py +560 -0
- vllm/v1/core/kv_cache_manager.py +485 -0
- vllm/v1/core/kv_cache_metrics.py +96 -0
- vllm/v1/core/kv_cache_utils.py +1642 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +66 -0
- vllm/v1/core/sched/interface.py +205 -0
- vllm/v1/core/sched/output.py +261 -0
- vllm/v1/core/sched/request_queue.py +208 -0
- vllm/v1/core/sched/scheduler.py +1936 -0
- vllm/v1/core/sched/utils.py +64 -0
- vllm/v1/core/single_type_kv_cache_manager.py +926 -0
- vllm/v1/cudagraph_dispatcher.py +183 -0
- vllm/v1/engine/__init__.py +224 -0
- vllm/v1/engine/async_llm.py +874 -0
- vllm/v1/engine/coordinator.py +396 -0
- vllm/v1/engine/core.py +1614 -0
- vllm/v1/engine/core_client.py +1422 -0
- vllm/v1/engine/detokenizer.py +351 -0
- vllm/v1/engine/exceptions.py +18 -0
- vllm/v1/engine/input_processor.py +713 -0
- vllm/v1/engine/llm_engine.py +415 -0
- vllm/v1/engine/logprobs.py +245 -0
- vllm/v1/engine/output_processor.py +715 -0
- vllm/v1/engine/parallel_sampling.py +150 -0
- vllm/v1/engine/utils.py +1086 -0
- vllm/v1/executor/__init__.py +6 -0
- vllm/v1/executor/abstract.py +352 -0
- vllm/v1/executor/multiproc_executor.py +888 -0
- vllm/v1/executor/ray_distributed_executor.py +8 -0
- vllm/v1/executor/ray_executor.py +623 -0
- vllm/v1/executor/ray_utils.py +468 -0
- vllm/v1/executor/uniproc_executor.py +186 -0
- vllm/v1/kv_cache_interface.py +485 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +161 -0
- vllm/v1/kv_offload/arc_manager.py +237 -0
- vllm/v1/kv_offload/backend.py +97 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +62 -0
- vllm/v1/kv_offload/cpu.py +109 -0
- vllm/v1/kv_offload/factory.py +58 -0
- vllm/v1/kv_offload/lru_manager.py +139 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +70 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +287 -0
- vllm/v1/kv_offload/worker/worker.py +163 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +1320 -0
- vllm/v1/metrics/perf.py +1244 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +194 -0
- vllm/v1/metrics/reader.py +257 -0
- vllm/v1/metrics/stats.py +440 -0
- vllm/v1/outputs.py +242 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +124 -0
- vllm/v1/request.py +281 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +352 -0
- vllm/v1/sample/logits_processor/builtin.py +278 -0
- vllm/v1/sample/logits_processor/interface.py +106 -0
- vllm/v1/sample/logits_processor/state.py +165 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +57 -0
- vllm/v1/sample/ops/logprobs.py +25 -0
- vllm/v1/sample/ops/penalties.py +57 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +388 -0
- vllm/v1/sample/rejection_sampler.py +822 -0
- vllm/v1/sample/sampler.py +319 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +120 -0
- vllm/v1/sample/tpu/sampler.py +215 -0
- vllm/v1/serial_utils.py +514 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1346 -0
- vllm/v1/spec_decode/medusa.py +73 -0
- vllm/v1/spec_decode/metadata.py +66 -0
- vllm/v1/spec_decode/metrics.py +225 -0
- vllm/v1/spec_decode/ngram_proposer.py +281 -0
- vllm/v1/spec_decode/suffix_decoding.py +95 -0
- vllm/v1/spec_decode/utils.py +109 -0
- vllm/v1/structured_output/__init__.py +337 -0
- vllm/v1/structured_output/backend_guidance.py +291 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
- vllm/v1/structured_output/backend_outlines.py +324 -0
- vllm/v1/structured_output/backend_types.py +136 -0
- vllm/v1/structured_output/backend_xgrammar.py +378 -0
- vllm/v1/structured_output/request.py +91 -0
- vllm/v1/structured_output/utils.py +457 -0
- vllm/v1/utils.py +466 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +343 -0
- vllm/v1/worker/cp_utils.py +42 -0
- vllm/v1/worker/cpu_model_runner.py +122 -0
- vllm/v1/worker/cpu_worker.py +192 -0
- vllm/v1/worker/dp_utils.py +240 -0
- vllm/v1/worker/ec_connector_model_runner_mixin.py +85 -0
- vllm/v1/worker/gpu/README.md +4 -0
- vllm/v1/worker/gpu/__init__.py +0 -0
- vllm/v1/worker/gpu/async_utils.py +98 -0
- vllm/v1/worker/gpu/attn_utils.py +183 -0
- vllm/v1/worker/gpu/block_table.py +222 -0
- vllm/v1/worker/gpu/buffer_utils.py +224 -0
- vllm/v1/worker/gpu/cudagraph_utils.py +264 -0
- vllm/v1/worker/gpu/dp_utils.py +31 -0
- vllm/v1/worker/gpu/input_batch.py +526 -0
- vllm/v1/worker/gpu/metrics/__init__.py +0 -0
- vllm/v1/worker/gpu/metrics/logits.py +42 -0
- vllm/v1/worker/gpu/mm/__init__.py +0 -0
- vllm/v1/worker/gpu/mm/mrope_utils.py +127 -0
- vllm/v1/worker/gpu/model_runner.py +1005 -0
- vllm/v1/worker/gpu/sample/__init__.py +0 -0
- vllm/v1/worker/gpu/sample/gumbel.py +106 -0
- vllm/v1/worker/gpu/sample/logit_bias.py +270 -0
- vllm/v1/worker/gpu/sample/logprob.py +167 -0
- vllm/v1/worker/gpu/sample/metadata.py +79 -0
- vllm/v1/worker/gpu/sample/min_p.py +58 -0
- vllm/v1/worker/gpu/sample/output.py +14 -0
- vllm/v1/worker/gpu/sample/penalties.py +155 -0
- vllm/v1/worker/gpu/sample/sampler.py +88 -0
- vllm/v1/worker/gpu/spec_decode/__init__.py +18 -0
- vllm/v1/worker/gpu/spec_decode/eagle.py +566 -0
- vllm/v1/worker/gpu/spec_decode/eagle_cudagraph.py +115 -0
- vllm/v1/worker/gpu/spec_decode/rejection_sample.py +71 -0
- vllm/v1/worker/gpu/states.py +282 -0
- vllm/v1/worker/gpu/structured_outputs.py +100 -0
- vllm/v1/worker/gpu_input_batch.py +1030 -0
- vllm/v1/worker/gpu_model_runner.py +5761 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +475 -0
- vllm/v1/worker/gpu_worker.py +968 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +300 -0
- vllm/v1/worker/lora_model_runner_mixin.py +225 -0
- vllm/v1/worker/tpu_input_batch.py +574 -0
- vllm/v1/worker/tpu_worker.py +18 -0
- vllm/v1/worker/ubatch_utils.py +112 -0
- vllm/v1/worker/ubatching.py +242 -0
- vllm/v1/worker/utils.py +400 -0
- vllm/v1/worker/worker_base.py +372 -0
- vllm/v1/worker/workspace.py +253 -0
- vllm/v1/worker/xpu_model_runner.py +48 -0
- vllm/v1/worker/xpu_worker.py +174 -0
- vllm/version.py +39 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm_cpu_avx512bf16-0.14.0.dist-info/METADATA +348 -0
- vllm_cpu_avx512bf16-0.14.0.dist-info/RECORD +1712 -0
- vllm_cpu_avx512bf16-0.14.0.dist-info/WHEEL +5 -0
- vllm_cpu_avx512bf16-0.14.0.dist-info/entry_points.txt +5 -0
- vllm_cpu_avx512bf16-0.14.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,2469 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
import contextvars
|
|
4
|
+
import threading
|
|
5
|
+
import time
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
from collections.abc import Callable, Generator, ItemsView, Iterable, Mapping, Sequence
|
|
9
|
+
from contextlib import contextmanager
|
|
10
|
+
from dataclasses import dataclass, field, replace
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from functools import lru_cache
|
|
13
|
+
from typing import (
|
|
14
|
+
TYPE_CHECKING,
|
|
15
|
+
Any,
|
|
16
|
+
Generic,
|
|
17
|
+
NamedTuple,
|
|
18
|
+
Protocol,
|
|
19
|
+
TypeAlias,
|
|
20
|
+
cast,
|
|
21
|
+
overload,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
import regex as re
|
|
25
|
+
import torch
|
|
26
|
+
from typing_extensions import TypeVar, assert_never
|
|
27
|
+
|
|
28
|
+
from vllm.logger import init_logger
|
|
29
|
+
from vllm.tokenizers import TokenizerLike
|
|
30
|
+
from vllm.transformers_utils.processor import cached_processor_from_config
|
|
31
|
+
from vllm.utils.collection_utils import flatten_2d_lists, full_groupby
|
|
32
|
+
from vllm.utils.func_utils import get_allowed_kwarg_only_overrides
|
|
33
|
+
from vllm.utils.jsontree import JSONTree, json_map_leaves
|
|
34
|
+
|
|
35
|
+
from .hasher import MultiModalHasher
|
|
36
|
+
from .inputs import (
|
|
37
|
+
MultiModalDataDict,
|
|
38
|
+
MultiModalEncDecInputs,
|
|
39
|
+
MultiModalFieldConfig,
|
|
40
|
+
MultiModalInputs,
|
|
41
|
+
MultiModalKwargsItem,
|
|
42
|
+
MultiModalKwargsItems,
|
|
43
|
+
MultiModalKwargsOptionalItems,
|
|
44
|
+
MultiModalUUIDDict,
|
|
45
|
+
PlaceholderRange,
|
|
46
|
+
)
|
|
47
|
+
from .parse import (
|
|
48
|
+
DictEmbeddingItems,
|
|
49
|
+
EmbeddingItems,
|
|
50
|
+
MultiModalDataItems,
|
|
51
|
+
MultiModalDataParser,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
if TYPE_CHECKING:
|
|
55
|
+
from transformers.configuration_utils import PretrainedConfig
|
|
56
|
+
from transformers.feature_extraction_utils import BatchFeature
|
|
57
|
+
from transformers.processing_utils import ProcessorMixin
|
|
58
|
+
|
|
59
|
+
from vllm.config import ModelConfig, ObservabilityConfig
|
|
60
|
+
|
|
61
|
+
from .cache import BaseMultiModalProcessorCache
|
|
62
|
+
from .profiling import BaseDummyInputsBuilder
|
|
63
|
+
else:
|
|
64
|
+
PretrainedConfig = object
|
|
65
|
+
BatchFeature = object
|
|
66
|
+
ProcessorMixin = object
|
|
67
|
+
|
|
68
|
+
ModelConfig = object
|
|
69
|
+
ObservabilityConfig = object
|
|
70
|
+
|
|
71
|
+
BaseMultiModalProcessorCache = object
|
|
72
|
+
|
|
73
|
+
logger = init_logger(__name__)
|
|
74
|
+
|
|
75
|
+
_S = TypeVar("_S", str, list[int])
|
|
76
|
+
|
|
77
|
+
_request_id_context: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
|
78
|
+
"_request_id_context", default=None
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def get_current_request_id() -> str | None:
|
|
83
|
+
"""Get the current request_id from the context, if available."""
|
|
84
|
+
return _request_id_context.get()
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@contextmanager
|
|
88
|
+
def set_request_id(request_id: str) -> Generator[None, None, None]:
|
|
89
|
+
"""Context manager to set the request_id for the current context."""
|
|
90
|
+
token = _request_id_context.set(request_id)
|
|
91
|
+
try:
|
|
92
|
+
yield
|
|
93
|
+
finally:
|
|
94
|
+
_request_id_context.reset(token)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@dataclass
|
|
98
|
+
class MultiModalProcessorTimingStats:
|
|
99
|
+
"""Per-request timing statistics for multimodal processor stages."""
|
|
100
|
+
|
|
101
|
+
hf_processor_time: float = 0.0
|
|
102
|
+
"""Time spent in HuggingFace processor calls (seconds)."""
|
|
103
|
+
|
|
104
|
+
hashing_time: float = 0.0
|
|
105
|
+
"""Time spent computing multimodal item hashes (seconds)."""
|
|
106
|
+
|
|
107
|
+
cache_lookup_time: float = 0.0
|
|
108
|
+
"""Time spent in cache lookups and merges (seconds)."""
|
|
109
|
+
|
|
110
|
+
prompt_update_time: float = 0.0
|
|
111
|
+
"""Time spent applying prompt updates and finding placeholders (seconds)."""
|
|
112
|
+
|
|
113
|
+
total_time: float = 0.0
|
|
114
|
+
"""Total processing time (seconds)."""
|
|
115
|
+
|
|
116
|
+
def to_dict(self) -> dict[str, float]:
|
|
117
|
+
"""Convert stats to a dictionary for JSON serialization."""
|
|
118
|
+
return {
|
|
119
|
+
"hf_processor_time": self.hf_processor_time,
|
|
120
|
+
"hashing_time": self.hashing_time,
|
|
121
|
+
"cache_lookup_time": self.cache_lookup_time,
|
|
122
|
+
"prompt_update_time": self.prompt_update_time,
|
|
123
|
+
"total_time": self.total_time,
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def get_timing_stats_from_engine_client(
|
|
128
|
+
engine_client: Any,
|
|
129
|
+
) -> dict[str, dict[str, float]]:
|
|
130
|
+
"""
|
|
131
|
+
Get all timing stats from the context associated with the engine client.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
engine_client: The engine client that has input_processor.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
A dictionary mapping request_id to stats dict.
|
|
138
|
+
"""
|
|
139
|
+
try:
|
|
140
|
+
if not engine_client.vllm_config.observability_config.enable_mm_processor_stats:
|
|
141
|
+
return {}
|
|
142
|
+
except (AttributeError, RuntimeError):
|
|
143
|
+
return {}
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
input_processor = engine_client.input_processor
|
|
147
|
+
input_preprocessor = input_processor.input_preprocessor
|
|
148
|
+
|
|
149
|
+
if hasattr(input_preprocessor, "_get_mm_processor"):
|
|
150
|
+
mm_processor = input_preprocessor._get_mm_processor()
|
|
151
|
+
if mm_processor is not None and hasattr(mm_processor, "info"):
|
|
152
|
+
ctx = mm_processor.info.ctx
|
|
153
|
+
return ctx.get_all_timing_stats()
|
|
154
|
+
except (AttributeError, RuntimeError):
|
|
155
|
+
pass
|
|
156
|
+
|
|
157
|
+
return {}
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@contextmanager
|
|
161
|
+
def _timed_operation(ctx: "InputProcessingContext", stage_name: str):
|
|
162
|
+
"""
|
|
163
|
+
Context manager to time an operation using the context's timing stats.
|
|
164
|
+
|
|
165
|
+
The request_id is automatically retrieved from the context variable,
|
|
166
|
+
so it doesn't need to be passed as a parameter.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
ctx: The InputProcessingContext containing the timing stats registry.
|
|
170
|
+
stage_name: Name of the stage being timed.
|
|
171
|
+
"""
|
|
172
|
+
request_id = get_current_request_id()
|
|
173
|
+
if ctx is None or request_id is None:
|
|
174
|
+
yield
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
stats = ctx.get_timing_stats(request_id)
|
|
178
|
+
if stats is None:
|
|
179
|
+
yield
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
start_time = time.perf_counter()
|
|
183
|
+
try:
|
|
184
|
+
yield
|
|
185
|
+
finally:
|
|
186
|
+
elapsed = time.perf_counter() - start_time
|
|
187
|
+
if stage_name == "hf_processor":
|
|
188
|
+
stats.hf_processor_time += elapsed
|
|
189
|
+
elif stage_name == "hashing":
|
|
190
|
+
stats.hashing_time += elapsed
|
|
191
|
+
elif stage_name == "cache_lookup":
|
|
192
|
+
stats.cache_lookup_time += elapsed
|
|
193
|
+
elif stage_name == "prompt_update":
|
|
194
|
+
stats.prompt_update_time += elapsed
|
|
195
|
+
stats.total_time += elapsed
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
PromptSeq: TypeAlias = str | list[int]
|
|
199
|
+
"""A token sequence (list of token IDs) or text."""
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
@lru_cache(maxsize=2048)
|
|
203
|
+
def _cached_encode(
|
|
204
|
+
tokenizer: TokenizerLike,
|
|
205
|
+
text: str,
|
|
206
|
+
*,
|
|
207
|
+
add_special_tokens: bool = True,
|
|
208
|
+
) -> list[int]:
|
|
209
|
+
return tokenizer.encode(text, add_special_tokens=add_special_tokens)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
@lru_cache(maxsize=2048)
|
|
213
|
+
def _cached_decode(
|
|
214
|
+
tokenizer: TokenizerLike,
|
|
215
|
+
token_ids: tuple[int, ...],
|
|
216
|
+
*,
|
|
217
|
+
skip_special_tokens: bool = False,
|
|
218
|
+
) -> str:
|
|
219
|
+
return tokenizer.decode(list(token_ids), skip_special_tokens=skip_special_tokens)
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _seq2text(
|
|
223
|
+
tokenizer: TokenizerLike | None,
|
|
224
|
+
seq: PromptSeq,
|
|
225
|
+
*,
|
|
226
|
+
use_cache: bool = True,
|
|
227
|
+
) -> str:
|
|
228
|
+
if isinstance(seq, str):
|
|
229
|
+
return seq
|
|
230
|
+
|
|
231
|
+
if tokenizer is None:
|
|
232
|
+
raise ValueError("You cannot decode tokens when `skip_tokenizer_init=True`")
|
|
233
|
+
|
|
234
|
+
if not use_cache:
|
|
235
|
+
return tokenizer.decode(seq)
|
|
236
|
+
|
|
237
|
+
return _cached_decode(tokenizer, tuple(seq))
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def _seq2tokens(
|
|
241
|
+
tokenizer: TokenizerLike | None,
|
|
242
|
+
seq: PromptSeq,
|
|
243
|
+
*,
|
|
244
|
+
use_cache: bool = True,
|
|
245
|
+
) -> list[int]:
|
|
246
|
+
if isinstance(seq, str):
|
|
247
|
+
if tokenizer is None:
|
|
248
|
+
raise ValueError("You cannot encode text when `skip_tokenizer_init=True`")
|
|
249
|
+
|
|
250
|
+
if not use_cache:
|
|
251
|
+
return tokenizer.encode(seq, add_special_tokens=False)
|
|
252
|
+
|
|
253
|
+
return _cached_encode(tokenizer, seq, add_special_tokens=False)
|
|
254
|
+
|
|
255
|
+
return seq
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class _GetMatchIndex(Protocol):
|
|
259
|
+
def __call__(
|
|
260
|
+
self,
|
|
261
|
+
tokenizer: TokenizerLike | None,
|
|
262
|
+
prompt: PromptSeq,
|
|
263
|
+
start_idx: int = 0,
|
|
264
|
+
) -> int | None: ...
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
@dataclass
|
|
268
|
+
class PromptIndex:
|
|
269
|
+
"""Resolves to an index in the prompt."""
|
|
270
|
+
|
|
271
|
+
get_match_index: _GetMatchIndex
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
class PromptIndexTargets:
|
|
275
|
+
@staticmethod
|
|
276
|
+
def start() -> PromptIndex:
|
|
277
|
+
"""
|
|
278
|
+
Resolves to the start of the prompt (before the first token).
|
|
279
|
+
|
|
280
|
+
This results in a match even if the prompt is empty.
|
|
281
|
+
"""
|
|
282
|
+
return PromptIndex(lambda tokenizer, prompt, start_idx=0: 0)
|
|
283
|
+
|
|
284
|
+
@staticmethod
|
|
285
|
+
def prefix(seq: PromptSeq) -> PromptIndex:
|
|
286
|
+
"""
|
|
287
|
+
Resolves to a location in the prompt after the given prefix.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
def get_match_index(
|
|
291
|
+
tokenizer: TokenizerLike | None,
|
|
292
|
+
prompt: PromptSeq,
|
|
293
|
+
start_idx: int = 0,
|
|
294
|
+
) -> int | None:
|
|
295
|
+
if start_idx != 0:
|
|
296
|
+
return None
|
|
297
|
+
|
|
298
|
+
prefix = seq
|
|
299
|
+
|
|
300
|
+
if isinstance(prompt, str):
|
|
301
|
+
# Make both `str`
|
|
302
|
+
prefix = _seq2text(tokenizer, prefix, use_cache=False)
|
|
303
|
+
else:
|
|
304
|
+
# Make both `list[int]`
|
|
305
|
+
prefix = _seq2tokens(tokenizer, prefix, use_cache=False)
|
|
306
|
+
|
|
307
|
+
match_idx = len(prefix)
|
|
308
|
+
return match_idx if prompt[:match_idx] == prefix else None
|
|
309
|
+
|
|
310
|
+
return PromptIndex(get_match_index)
|
|
311
|
+
|
|
312
|
+
@staticmethod
|
|
313
|
+
def end() -> PromptIndex:
|
|
314
|
+
"""
|
|
315
|
+
Resolves to the end of the prompt (after the last token).
|
|
316
|
+
|
|
317
|
+
This results in a match even if the prompt is empty.
|
|
318
|
+
"""
|
|
319
|
+
return PromptIndex(lambda tokenizer, prompt, start_idx=0: len(prompt))
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
UpdateTarget: TypeAlias = PromptSeq | PromptIndex
|
|
323
|
+
"""
|
|
324
|
+
The token sequence or text to update.
|
|
325
|
+
"""
|
|
326
|
+
|
|
327
|
+
PromptUpdateTarget: TypeAlias = Callable[[int], UpdateTarget] | UpdateTarget
|
|
328
|
+
"""
|
|
329
|
+
Given the index of the processed item within
|
|
330
|
+
[`modality`][vllm.multimodal.processing.PromptUpdate.modality],
|
|
331
|
+
output the corresponding token sequence (or text).
|
|
332
|
+
|
|
333
|
+
For convenience, you can directly pass in the token sequence (or text)
|
|
334
|
+
instead of a function if it does not depend on the input.
|
|
335
|
+
"""
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
@dataclass
|
|
339
|
+
class PromptUpdateDetails(Generic[_S]):
|
|
340
|
+
"""Details about the token sequence or text that are part of the update."""
|
|
341
|
+
|
|
342
|
+
full: _S
|
|
343
|
+
"""The full content."""
|
|
344
|
+
|
|
345
|
+
is_embed: Callable[[TokenizerLike | None, PromptSeq], torch.Tensor] | None = None
|
|
346
|
+
"""
|
|
347
|
+
Given [`full`][vllm.multimodal.processing.PromptUpdateDetails.full],
|
|
348
|
+
return a boolean mask of shape `(len(full),)` indicating which positions
|
|
349
|
+
of `full` to assign embeddings to.
|
|
350
|
+
|
|
351
|
+
`None` (default) means to assign embeddings to all positions of `full`.
|
|
352
|
+
|
|
353
|
+
The embeddings are obtained by calling
|
|
354
|
+
[`SupportsMultiModal.embed_multimodal`][vllm.model_executor.models.interfaces.SupportsMultiModal.embed_multimodal].
|
|
355
|
+
"""
|
|
356
|
+
|
|
357
|
+
@staticmethod
|
|
358
|
+
def from_seq(seq: _S) -> "PromptUpdateDetails[_S]":
|
|
359
|
+
return PromptUpdateDetails(full=seq)
|
|
360
|
+
|
|
361
|
+
@staticmethod
|
|
362
|
+
def select_text(
|
|
363
|
+
seq: _S,
|
|
364
|
+
embed_text: str,
|
|
365
|
+
) -> "PromptUpdateDetails[_S]":
|
|
366
|
+
def is_embed(tokenizer: TokenizerLike | None, full: PromptSeq) -> torch.Tensor:
|
|
367
|
+
embed_token_ids = _seq2tokens(tokenizer, embed_text, use_cache=False)
|
|
368
|
+
token_ids = _seq2tokens(tokenizer, full)
|
|
369
|
+
|
|
370
|
+
return torch.isin(
|
|
371
|
+
torch.tensor(token_ids),
|
|
372
|
+
torch.tensor(embed_token_ids),
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
return PromptUpdateDetails(full=seq, is_embed=is_embed)
|
|
376
|
+
|
|
377
|
+
@staticmethod
|
|
378
|
+
def select_token_id(
|
|
379
|
+
seq: _S,
|
|
380
|
+
embed_token_id: int,
|
|
381
|
+
) -> "PromptUpdateDetails[_S]":
|
|
382
|
+
def is_embed(tokenizer: TokenizerLike | None, full: PromptSeq) -> torch.Tensor:
|
|
383
|
+
token_ids = _seq2tokens(tokenizer, full)
|
|
384
|
+
|
|
385
|
+
return torch.tensor(token_ids) == embed_token_id
|
|
386
|
+
|
|
387
|
+
return PromptUpdateDetails(full=seq, is_embed=is_embed)
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
PromptUpdateInfo: TypeAlias = PromptSeq | PromptUpdateDetails
|
|
391
|
+
"""
|
|
392
|
+
The token sequence or text that are part of the update.
|
|
393
|
+
|
|
394
|
+
If only part of the content corresponds to feature placeholders, you can
|
|
395
|
+
use [`PromptUpdateDetails`][vllm.multimodal.processing.PromptUpdateDetails] to
|
|
396
|
+
specify which part.
|
|
397
|
+
"""
|
|
398
|
+
|
|
399
|
+
PromptUpdateContent: TypeAlias = Callable[[int], PromptUpdateInfo] | PromptUpdateInfo
|
|
400
|
+
"""
|
|
401
|
+
Given the index of the processed item within
|
|
402
|
+
[`modality`][vllm.multimodal.processing.PromptUpdate.modality],
|
|
403
|
+
output the corresponding token sequence (or text).
|
|
404
|
+
|
|
405
|
+
For convenience, you can directly pass in the token sequence (or text)
|
|
406
|
+
instead of a function if it does not depend on the input.
|
|
407
|
+
"""
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
class UpdateMode(str, Enum):
|
|
411
|
+
INSERT = "insert"
|
|
412
|
+
REPLACE = "replace"
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
@dataclass
|
|
416
|
+
class PromptUpdate(ABC):
|
|
417
|
+
"""
|
|
418
|
+
Defines how to update a prompt with placeholder tokens.
|
|
419
|
+
"""
|
|
420
|
+
|
|
421
|
+
modality: str
|
|
422
|
+
"""The modality for which the update is made."""
|
|
423
|
+
|
|
424
|
+
target: PromptUpdateTarget
|
|
425
|
+
"""The token sequence (or text) to update."""
|
|
426
|
+
|
|
427
|
+
@property
|
|
428
|
+
@abstractmethod
|
|
429
|
+
def content(self) -> PromptUpdateContent:
|
|
430
|
+
"""The placeholder tokens that are part of the update."""
|
|
431
|
+
raise NotImplementedError
|
|
432
|
+
|
|
433
|
+
@property
|
|
434
|
+
@abstractmethod
|
|
435
|
+
def mode(self) -> UpdateMode:
|
|
436
|
+
"""Defines how to update the prompt."""
|
|
437
|
+
raise NotImplementedError
|
|
438
|
+
|
|
439
|
+
def _resolve_target(self, item_idx: int) -> UpdateTarget:
|
|
440
|
+
target = self.target
|
|
441
|
+
if callable(target):
|
|
442
|
+
target = target(item_idx)
|
|
443
|
+
|
|
444
|
+
return target
|
|
445
|
+
|
|
446
|
+
def _resolve_content(self, item_idx: int) -> PromptUpdateDetails:
|
|
447
|
+
content = self.content
|
|
448
|
+
if callable(content):
|
|
449
|
+
content = content(item_idx)
|
|
450
|
+
|
|
451
|
+
if not isinstance(content, PromptUpdateDetails):
|
|
452
|
+
content = PromptUpdateDetails.from_seq(content)
|
|
453
|
+
|
|
454
|
+
return content
|
|
455
|
+
|
|
456
|
+
def resolve(self, item_idx: int) -> "ResolvedPromptUpdate":
|
|
457
|
+
"""
|
|
458
|
+
Given the index of the processed item within
|
|
459
|
+
[`modality`][vllm.multimodal.processing.PromptUpdate.modality],
|
|
460
|
+
output a copy of this object with its lazy attributes resolved.
|
|
461
|
+
"""
|
|
462
|
+
return ResolvedPromptUpdate(
|
|
463
|
+
modality=self.modality,
|
|
464
|
+
item_idx=item_idx,
|
|
465
|
+
mode=self.mode,
|
|
466
|
+
target=self._resolve_target(item_idx),
|
|
467
|
+
content=self._resolve_content(item_idx),
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
@dataclass
|
|
472
|
+
class PromptInsertion(PromptUpdate):
|
|
473
|
+
"""
|
|
474
|
+
Defines how to insert placeholder tokens into a prompt.
|
|
475
|
+
|
|
476
|
+
Example:
|
|
477
|
+
|
|
478
|
+
For each image, insert a number of `<image>` feature placeholders
|
|
479
|
+
equal to the feature size of the vision encoder after the `<s>` token:
|
|
480
|
+
|
|
481
|
+
```python
|
|
482
|
+
PromptInsertion(
|
|
483
|
+
modality="image",
|
|
484
|
+
target="<s>",
|
|
485
|
+
insertion="<image>" * image_feature_size,
|
|
486
|
+
)
|
|
487
|
+
```
|
|
488
|
+
|
|
489
|
+
Insert these tokens at the start of the prompt:
|
|
490
|
+
|
|
491
|
+
```python
|
|
492
|
+
PromptInsertion(
|
|
493
|
+
modality="image",
|
|
494
|
+
target=PromptIndexTargets.start(),
|
|
495
|
+
insertion="<image>" * image_feature_size,
|
|
496
|
+
)
|
|
497
|
+
```
|
|
498
|
+
|
|
499
|
+
Insert these tokens after a prefix `Images:`:
|
|
500
|
+
|
|
501
|
+
```python
|
|
502
|
+
PromptInsertion(
|
|
503
|
+
modality="image",
|
|
504
|
+
target=PromptIndexTargets.prefix("Images:"),
|
|
505
|
+
insertion="<image>" * image_feature_size,
|
|
506
|
+
)
|
|
507
|
+
```
|
|
508
|
+
|
|
509
|
+
Insert these tokens at the end of the prompt:
|
|
510
|
+
|
|
511
|
+
```python
|
|
512
|
+
PromptInsertion(
|
|
513
|
+
modality="image",
|
|
514
|
+
target=PromptIndexTargets.end(),
|
|
515
|
+
insertion="<image>" * image_feature_size,
|
|
516
|
+
)
|
|
517
|
+
```
|
|
518
|
+
"""
|
|
519
|
+
|
|
520
|
+
insertion: PromptUpdateContent = field(repr=False)
|
|
521
|
+
"""
|
|
522
|
+
Given the index of the processed item within
|
|
523
|
+
[`modality`][vllm.multimodal.processing.PromptUpdate.modality],
|
|
524
|
+
output the token sequence (or text) to insert right after
|
|
525
|
+
[`target`][vllm.multimodal.processing.PromptUpdate.target].
|
|
526
|
+
|
|
527
|
+
For convenience, you can directly pass in the token sequence (or text)
|
|
528
|
+
instead of a function if it does not depend on the input.
|
|
529
|
+
"""
|
|
530
|
+
|
|
531
|
+
@property
|
|
532
|
+
def content(self) -> PromptUpdateContent:
|
|
533
|
+
return self.insertion
|
|
534
|
+
|
|
535
|
+
@property
|
|
536
|
+
def mode(self) -> UpdateMode:
|
|
537
|
+
return UpdateMode.INSERT
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
@dataclass
|
|
541
|
+
class PromptReplacement(PromptUpdate):
|
|
542
|
+
"""
|
|
543
|
+
Defines how to replace portions of an input prompt with placeholder tokens.
|
|
544
|
+
|
|
545
|
+
Example:
|
|
546
|
+
|
|
547
|
+
For each image, replace one `<image>` input placeholder in the prompt
|
|
548
|
+
with a number of `<image>` feature placeholders
|
|
549
|
+
equal to the feature size of the vision encoder:
|
|
550
|
+
|
|
551
|
+
```python
|
|
552
|
+
PromptReplacement(
|
|
553
|
+
modality="image",
|
|
554
|
+
target="<image>",
|
|
555
|
+
replacement="<image>" * image_feature_size,
|
|
556
|
+
)
|
|
557
|
+
```
|
|
558
|
+
|
|
559
|
+
As above, but further pad the feature placeholders with `<image_bos>`
|
|
560
|
+
and `<image_eos>`, which are not supposed to be passed to the vision
|
|
561
|
+
encoder:
|
|
562
|
+
|
|
563
|
+
```python
|
|
564
|
+
PromptReplacement(
|
|
565
|
+
modality="image",
|
|
566
|
+
target="<image>",
|
|
567
|
+
replacement=PromptUpdateDetails(
|
|
568
|
+
full="".join(
|
|
569
|
+
[
|
|
570
|
+
"<image_bos>",
|
|
571
|
+
"<image>" * image_feature_size,
|
|
572
|
+
"<image_eos>",
|
|
573
|
+
]
|
|
574
|
+
),
|
|
575
|
+
features="<image>" * image_feature_size,
|
|
576
|
+
),
|
|
577
|
+
)
|
|
578
|
+
```
|
|
579
|
+
|
|
580
|
+
To avoid unnecessary tokenization during prompt replacement,
|
|
581
|
+
we recommended passing token sequences instead of text:
|
|
582
|
+
|
|
583
|
+
```python
|
|
584
|
+
PromptReplacement(
|
|
585
|
+
modality="image",
|
|
586
|
+
target=[image_token_id],
|
|
587
|
+
replacement=PromptUpdateDetails(
|
|
588
|
+
full=(
|
|
589
|
+
[image_bos_id] + [image_token_id] * image_feature_size + [image_eos_id]
|
|
590
|
+
),
|
|
591
|
+
features=[image_token_id] * image_feature_size,
|
|
592
|
+
),
|
|
593
|
+
)
|
|
594
|
+
```
|
|
595
|
+
"""
|
|
596
|
+
|
|
597
|
+
replacement: PromptUpdateContent = field(repr=False)
|
|
598
|
+
"""
|
|
599
|
+
Given the index of the processed item within
|
|
600
|
+
[`modality`][vllm.multimodal.processing.PromptUpdate.modality],
|
|
601
|
+
output the token sequence (or text) to replace
|
|
602
|
+
[`target`][vllm.multimodal.processing.PromptUpdate.target].
|
|
603
|
+
|
|
604
|
+
For convenience, you can directly pass in the token sequence (or text)
|
|
605
|
+
instead of a function if it does not depend on the input.
|
|
606
|
+
"""
|
|
607
|
+
|
|
608
|
+
@property
|
|
609
|
+
def content(self) -> PromptUpdateContent:
|
|
610
|
+
return self.replacement
|
|
611
|
+
|
|
612
|
+
@property
|
|
613
|
+
def mode(self) -> UpdateMode:
|
|
614
|
+
return UpdateMode.REPLACE
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
class _HasModalityAttr(Protocol):
|
|
618
|
+
modality: str
|
|
619
|
+
|
|
620
|
+
|
|
621
|
+
class _HasModalityProp(Protocol):
|
|
622
|
+
@property
|
|
623
|
+
def modality(self) -> str: ...
|
|
624
|
+
|
|
625
|
+
|
|
626
|
+
_M = TypeVar("_M", bound=_HasModalityAttr | _HasModalityProp)
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]:
|
|
630
|
+
"""
|
|
631
|
+
Convenience function to apply
|
|
632
|
+
[`full_groupby`][vllm.utils.collection_utils.full_groupby]
|
|
633
|
+
based on modality.
|
|
634
|
+
"""
|
|
635
|
+
return full_groupby(values, key=lambda x: x.modality)
|
|
636
|
+
|
|
637
|
+
|
|
638
|
+
class PromptTargetMatch(NamedTuple):
|
|
639
|
+
start_idx: int
|
|
640
|
+
end_idx: int
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
@dataclass(frozen=True)
|
|
644
|
+
class ResolvedPromptUpdate:
|
|
645
|
+
"""
|
|
646
|
+
A [`PromptUpdate`][vllm.multimodal.processing.PromptUpdate] with its
|
|
647
|
+
lazy attributes resolved, apart from those related to tokenization.
|
|
648
|
+
"""
|
|
649
|
+
|
|
650
|
+
modality: str
|
|
651
|
+
"""The modality for which the update is made."""
|
|
652
|
+
|
|
653
|
+
item_idx: int
|
|
654
|
+
"""The index within `modality` of the item this update pertains to."""
|
|
655
|
+
|
|
656
|
+
mode: UpdateMode
|
|
657
|
+
"""Defines how to update the prompt."""
|
|
658
|
+
|
|
659
|
+
target: UpdateTarget
|
|
660
|
+
"""The token sequence (or text) to update."""
|
|
661
|
+
|
|
662
|
+
content: PromptUpdateDetails = field(repr=False)
|
|
663
|
+
"""The placeholder tokens that are part of the update."""
|
|
664
|
+
|
|
665
|
+
def iter_token_matches(
|
|
666
|
+
self,
|
|
667
|
+
prompt: list[int],
|
|
668
|
+
tokenizer: TokenizerLike | None,
|
|
669
|
+
*,
|
|
670
|
+
start_idx: int = 0,
|
|
671
|
+
) -> Generator[PromptTargetMatch]:
|
|
672
|
+
"""Yield each instance of `self.target` found in `prompt`."""
|
|
673
|
+
target = self.target
|
|
674
|
+
|
|
675
|
+
if isinstance(target, PromptIndex):
|
|
676
|
+
match_idx = target.get_match_index(tokenizer, prompt, start_idx)
|
|
677
|
+
if match_idx is not None:
|
|
678
|
+
yield PromptTargetMatch(match_idx, match_idx)
|
|
679
|
+
|
|
680
|
+
return
|
|
681
|
+
|
|
682
|
+
target_token_ids = _seq2tokens(tokenizer, target)
|
|
683
|
+
|
|
684
|
+
for match in iter_token_matches(prompt, target_token_ids, start_idx=start_idx):
|
|
685
|
+
yield PromptTargetMatch(match.start_idx, match.end_idx)
|
|
686
|
+
|
|
687
|
+
def iter_text_matches(
|
|
688
|
+
self,
|
|
689
|
+
prompt: str,
|
|
690
|
+
tokenizer: TokenizerLike | None,
|
|
691
|
+
*,
|
|
692
|
+
start_idx: int = 0,
|
|
693
|
+
) -> Generator[PromptTargetMatch]:
|
|
694
|
+
"""Yield each instance of `self.target` found in `prompt`."""
|
|
695
|
+
target = self.target
|
|
696
|
+
|
|
697
|
+
if isinstance(target, PromptIndex):
|
|
698
|
+
match_idx = target.get_match_index(tokenizer, prompt, start_idx)
|
|
699
|
+
if match_idx is not None:
|
|
700
|
+
yield PromptTargetMatch(match_idx, match_idx)
|
|
701
|
+
|
|
702
|
+
return
|
|
703
|
+
|
|
704
|
+
target_text = _seq2text(tokenizer, target)
|
|
705
|
+
|
|
706
|
+
for match in re.finditer(re.escape(target_text), prompt, pos=start_idx):
|
|
707
|
+
yield PromptTargetMatch(match.start(), match.end())
|
|
708
|
+
|
|
709
|
+
def iter_matches(
|
|
710
|
+
self,
|
|
711
|
+
prompt: list[int] | str,
|
|
712
|
+
tokenizer: TokenizerLike | None,
|
|
713
|
+
*,
|
|
714
|
+
start_idx: int = 0,
|
|
715
|
+
) -> Generator[PromptTargetMatch]:
|
|
716
|
+
"""Yield each instance of `self.target` found in `prompt`."""
|
|
717
|
+
if isinstance(prompt, str):
|
|
718
|
+
return self.iter_text_matches(prompt, tokenizer, start_idx=start_idx)
|
|
719
|
+
|
|
720
|
+
return self.iter_token_matches(prompt, tokenizer, start_idx=start_idx)
|
|
721
|
+
|
|
722
|
+
def with_target(self, target: UpdateTarget):
|
|
723
|
+
return replace(self, target=target)
|
|
724
|
+
|
|
725
|
+
def with_content(self, content: PromptUpdateInfo):
|
|
726
|
+
if not isinstance(content, PromptUpdateDetails):
|
|
727
|
+
content = PromptUpdateDetails.from_seq(content)
|
|
728
|
+
|
|
729
|
+
return replace(self, content=content)
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
class _TokenMatch(NamedTuple):
|
|
733
|
+
start_idx: int
|
|
734
|
+
end_idx: int
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
def iter_token_matches(
|
|
738
|
+
token_ids: list[int],
|
|
739
|
+
match_ids: list[int],
|
|
740
|
+
*,
|
|
741
|
+
start_idx: int = 0,
|
|
742
|
+
) -> Generator[_TokenMatch]:
|
|
743
|
+
"""
|
|
744
|
+
Yield each occurrence of `match_ids` in `token_ids`.
|
|
745
|
+
|
|
746
|
+
Note that empty matches are ignored.
|
|
747
|
+
"""
|
|
748
|
+
prompt_len = len(token_ids)
|
|
749
|
+
match_len = len(match_ids)
|
|
750
|
+
|
|
751
|
+
if match_len == 0:
|
|
752
|
+
return
|
|
753
|
+
|
|
754
|
+
while start_idx < prompt_len - match_len + 1:
|
|
755
|
+
end_idx = start_idx + match_len
|
|
756
|
+
|
|
757
|
+
if token_ids[start_idx:end_idx] == match_ids:
|
|
758
|
+
yield _TokenMatch(start_idx=start_idx, end_idx=end_idx)
|
|
759
|
+
|
|
760
|
+
# Exclude overlapping matches
|
|
761
|
+
start_idx = end_idx
|
|
762
|
+
else:
|
|
763
|
+
start_idx += 1
|
|
764
|
+
|
|
765
|
+
|
|
766
|
+
def replace_token_matches(
|
|
767
|
+
token_ids: list[int],
|
|
768
|
+
match_ids: list[int],
|
|
769
|
+
new_ids: list[int],
|
|
770
|
+
) -> list[int]:
|
|
771
|
+
"""
|
|
772
|
+
Replace each occurrence of `match_ids` in `token_ids`
|
|
773
|
+
with `new_ids`.
|
|
774
|
+
|
|
775
|
+
Note that empty matches are ignored.
|
|
776
|
+
"""
|
|
777
|
+
out_seqs = list[list[int]]()
|
|
778
|
+
prev_end_idx = 0
|
|
779
|
+
|
|
780
|
+
for match in iter_token_matches(token_ids, match_ids):
|
|
781
|
+
start_idx = match.start_idx
|
|
782
|
+
end_idx = match.end_idx
|
|
783
|
+
|
|
784
|
+
out_seqs.append(token_ids[prev_end_idx:start_idx])
|
|
785
|
+
out_seqs.append(new_ids)
|
|
786
|
+
prev_end_idx = end_idx
|
|
787
|
+
|
|
788
|
+
out_seqs.append(token_ids[prev_end_idx:])
|
|
789
|
+
|
|
790
|
+
return flatten_2d_lists(out_seqs)
|
|
791
|
+
|
|
792
|
+
|
|
793
|
+
@dataclass
|
|
794
|
+
class PlaceholderFeaturesInfo:
|
|
795
|
+
modality: str
|
|
796
|
+
item_idx: int
|
|
797
|
+
start_idx: int
|
|
798
|
+
tokens: list[int]
|
|
799
|
+
is_embed: torch.Tensor | None
|
|
800
|
+
|
|
801
|
+
@property
|
|
802
|
+
def length(self) -> int:
|
|
803
|
+
return len(self.tokens)
|
|
804
|
+
|
|
805
|
+
def to_range(self) -> PlaceholderRange:
|
|
806
|
+
# TODO: Is it worth it to optimize this by stripping the
|
|
807
|
+
# leading and ending positions where `is_embed=False`?
|
|
808
|
+
return PlaceholderRange(
|
|
809
|
+
offset=self.start_idx,
|
|
810
|
+
length=self.length,
|
|
811
|
+
is_embed=self.is_embed,
|
|
812
|
+
)
|
|
813
|
+
|
|
814
|
+
|
|
815
|
+
_MatchToApply = tuple[tuple[str, int], tuple[PromptTargetMatch, int]]
|
|
816
|
+
|
|
817
|
+
|
|
818
|
+
def _find_matches(
|
|
819
|
+
prompt: _S,
|
|
820
|
+
mm_prompt_updates: "MultiModalPromptUpdates",
|
|
821
|
+
tokenizer: TokenizerLike | None,
|
|
822
|
+
*,
|
|
823
|
+
prev_end_idx: int = 0,
|
|
824
|
+
current_result: "MultiModalPromptUpdatesApplyResult",
|
|
825
|
+
) -> tuple[UpdateMode | None, list[_MatchToApply]]:
|
|
826
|
+
mode: UpdateMode | None = None
|
|
827
|
+
mm_matches = dict[tuple[str, int], tuple[PromptTargetMatch, int]]()
|
|
828
|
+
|
|
829
|
+
for modality, modality_updates in mm_prompt_updates.items():
|
|
830
|
+
for item_idx, item_updates in enumerate(modality_updates):
|
|
831
|
+
if current_result[modality][item_idx] is not None:
|
|
832
|
+
continue # Updates have already been applied for this item
|
|
833
|
+
|
|
834
|
+
for update_idx, update in enumerate(item_updates):
|
|
835
|
+
if (modality, item_idx) in mm_matches:
|
|
836
|
+
break # Already found a match for this item
|
|
837
|
+
|
|
838
|
+
for match in update.iter_matches(
|
|
839
|
+
prompt,
|
|
840
|
+
tokenizer,
|
|
841
|
+
start_idx=prev_end_idx,
|
|
842
|
+
):
|
|
843
|
+
# All matches should share the same mode
|
|
844
|
+
if mode is None:
|
|
845
|
+
mode = update.mode
|
|
846
|
+
elif mode != update.mode:
|
|
847
|
+
continue
|
|
848
|
+
|
|
849
|
+
mm_matches[(modality, item_idx)] = match, update_idx
|
|
850
|
+
break # Get only the first valid match per item
|
|
851
|
+
|
|
852
|
+
# Prioritize earlier matches
|
|
853
|
+
matches_to_apply = sorted(mm_matches.items(), key=lambda item: item[1][0])
|
|
854
|
+
|
|
855
|
+
# To avoid conflicts, only replace one non-empty item at a time
|
|
856
|
+
if mode == UpdateMode.REPLACE:
|
|
857
|
+
matches_to_apply_ = list[_MatchToApply]()
|
|
858
|
+
has_non_empty_matches = False
|
|
859
|
+
|
|
860
|
+
for item in matches_to_apply:
|
|
861
|
+
_, (match, _) = item
|
|
862
|
+
if match.start_idx == match.end_idx:
|
|
863
|
+
matches_to_apply_.append(item)
|
|
864
|
+
elif not has_non_empty_matches:
|
|
865
|
+
has_non_empty_matches = True
|
|
866
|
+
matches_to_apply_.append(item)
|
|
867
|
+
|
|
868
|
+
matches_to_apply = matches_to_apply_
|
|
869
|
+
|
|
870
|
+
return mode, matches_to_apply
|
|
871
|
+
|
|
872
|
+
|
|
873
|
+
def _all_items_found(
|
|
874
|
+
mm_item_counts: dict[str, int],
|
|
875
|
+
mm_found_counts: dict[str, int],
|
|
876
|
+
) -> bool:
|
|
877
|
+
return all(
|
|
878
|
+
item_idx >= mm_item_counts[modality]
|
|
879
|
+
for modality, item_idx in mm_found_counts.items()
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
|
|
883
|
+
def _apply_matches(
|
|
884
|
+
prompt: _S,
|
|
885
|
+
mm_prompt_updates: "MultiModalPromptUpdates",
|
|
886
|
+
tokenizer: TokenizerLike | None,
|
|
887
|
+
) -> tuple[list[_S], "MultiModalPromptUpdatesApplyResult"]:
|
|
888
|
+
mm_item_counts = {m: len(items) for m, items in mm_prompt_updates.items()}
|
|
889
|
+
|
|
890
|
+
out_seqs = list[str | list[int]]()
|
|
891
|
+
out_result: MultiModalPromptUpdatesApplyResult = {
|
|
892
|
+
m: [None] * len(items) for m, items in mm_prompt_updates.items()
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
# Early exit if no items to find
|
|
896
|
+
mm_found_counts = {
|
|
897
|
+
m: sum(r is not None for r in res) for m, res in out_result.items()
|
|
898
|
+
}
|
|
899
|
+
if _all_items_found(mm_item_counts, mm_found_counts):
|
|
900
|
+
return [prompt], out_result
|
|
901
|
+
|
|
902
|
+
prev_end_idx = 0
|
|
903
|
+
while True:
|
|
904
|
+
mode, matches_to_apply = _find_matches(
|
|
905
|
+
prompt,
|
|
906
|
+
mm_prompt_updates,
|
|
907
|
+
tokenizer,
|
|
908
|
+
prev_end_idx=prev_end_idx,
|
|
909
|
+
current_result=out_result,
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
if mode is None:
|
|
913
|
+
break # No more matches to find
|
|
914
|
+
|
|
915
|
+
for (modality, item_idx), (match, update_idx) in matches_to_apply:
|
|
916
|
+
matched_update = mm_prompt_updates[modality][item_idx][update_idx]
|
|
917
|
+
matched_content = matched_update.content.full
|
|
918
|
+
|
|
919
|
+
if mode == UpdateMode.INSERT:
|
|
920
|
+
end_idx_to_insert = match.end_idx
|
|
921
|
+
elif mode == UpdateMode.REPLACE:
|
|
922
|
+
end_idx_to_insert = match.start_idx
|
|
923
|
+
else:
|
|
924
|
+
assert_never(mode)
|
|
925
|
+
|
|
926
|
+
out_seqs.append(prompt[prev_end_idx:end_idx_to_insert])
|
|
927
|
+
out_seqs.append(
|
|
928
|
+
_seq2text(tokenizer, matched_content)
|
|
929
|
+
if isinstance(prompt, str)
|
|
930
|
+
else _seq2tokens(tokenizer, matched_content)
|
|
931
|
+
)
|
|
932
|
+
out_result[modality][item_idx] = update_idx
|
|
933
|
+
|
|
934
|
+
# Exclude overlapping matches
|
|
935
|
+
prev_end_idx = match.end_idx
|
|
936
|
+
|
|
937
|
+
# Early exit if all items found
|
|
938
|
+
mm_found_counts = {
|
|
939
|
+
m: sum(r is not None for r in res) for m, res in out_result.items()
|
|
940
|
+
}
|
|
941
|
+
if _all_items_found(mm_item_counts, mm_found_counts):
|
|
942
|
+
break
|
|
943
|
+
|
|
944
|
+
out_seqs.append(prompt[prev_end_idx:])
|
|
945
|
+
|
|
946
|
+
return cast(list[_S], out_seqs), out_result
|
|
947
|
+
|
|
948
|
+
|
|
949
|
+
def apply_token_matches(
|
|
950
|
+
prompt: list[int],
|
|
951
|
+
mm_prompt_updates: "MultiModalPromptUpdates",
|
|
952
|
+
tokenizer: TokenizerLike | None,
|
|
953
|
+
) -> tuple[list[int], "MultiModalPromptUpdatesApplyResult"]:
|
|
954
|
+
"""
|
|
955
|
+
Apply the updates in `mm_prompt_updates` to `prompt`.
|
|
956
|
+
|
|
957
|
+
Matches are exclusive even when multiple modalities share
|
|
958
|
+
the same placeholder tokens. In that case, the modality that
|
|
959
|
+
appears earlier in `mm_prompt_updates` takes priority.
|
|
960
|
+
"""
|
|
961
|
+
token_id_seqs, result = _apply_matches(prompt, mm_prompt_updates, tokenizer)
|
|
962
|
+
|
|
963
|
+
return flatten_2d_lists(token_id_seqs), result
|
|
964
|
+
|
|
965
|
+
|
|
966
|
+
def apply_text_matches(
|
|
967
|
+
prompt: str,
|
|
968
|
+
mm_prompt_updates: "MultiModalPromptUpdates",
|
|
969
|
+
tokenizer: TokenizerLike | None,
|
|
970
|
+
) -> tuple[str, "MultiModalPromptUpdatesApplyResult"]:
|
|
971
|
+
"""
|
|
972
|
+
Apply the updates in `mm_prompt_updates` to `prompt`.
|
|
973
|
+
|
|
974
|
+
Matches are exclusive even when multiple modalities share
|
|
975
|
+
the same placeholder tokens. In that case, the modality that
|
|
976
|
+
appears earlier in `mm_prompt_updates` takes priority.
|
|
977
|
+
"""
|
|
978
|
+
texts, result = _apply_matches(prompt, mm_prompt_updates, tokenizer)
|
|
979
|
+
|
|
980
|
+
return "".join(texts), result
|
|
981
|
+
|
|
982
|
+
|
|
983
|
+
def _iter_placeholders(
|
|
984
|
+
prompt: list[int],
|
|
985
|
+
mm_prompt_updates: "MultiModalPromptUpdates",
|
|
986
|
+
tokenizer: TokenizerLike | None,
|
|
987
|
+
) -> Iterable[PlaceholderFeaturesInfo]:
|
|
988
|
+
"""
|
|
989
|
+
Yield each set of placeholder tokens found in `prompt`.
|
|
990
|
+
|
|
991
|
+
Matches are exclusive even when multiple modalities share
|
|
992
|
+
the same placeholder tokens. In that case, the modality that
|
|
993
|
+
appears earlier in `mm_prompt_updates` takes priority.
|
|
994
|
+
|
|
995
|
+
Note that empty matches are ignored.
|
|
996
|
+
"""
|
|
997
|
+
mm_item_counts = {m: len(items) for m, items in mm_prompt_updates.items()}
|
|
998
|
+
item_idx_by_modality = {modality: 0 for modality in mm_prompt_updates}
|
|
999
|
+
|
|
1000
|
+
if _all_items_found(mm_item_counts, item_idx_by_modality):
|
|
1001
|
+
return
|
|
1002
|
+
|
|
1003
|
+
prompt_len = len(prompt)
|
|
1004
|
+
start_idx = 0
|
|
1005
|
+
|
|
1006
|
+
while start_idx < prompt_len:
|
|
1007
|
+
found = False
|
|
1008
|
+
|
|
1009
|
+
for modality, modality_updates in mm_prompt_updates.items():
|
|
1010
|
+
item_idx = item_idx_by_modality[modality]
|
|
1011
|
+
if item_idx >= mm_item_counts.get(modality, 0):
|
|
1012
|
+
continue
|
|
1013
|
+
|
|
1014
|
+
for update in modality_updates[item_idx]:
|
|
1015
|
+
content = update.content
|
|
1016
|
+
content_tokens_full = _seq2tokens(tokenizer, content.full)
|
|
1017
|
+
content_len_full = len(content_tokens_full)
|
|
1018
|
+
end_idx_full = start_idx + content_len_full
|
|
1019
|
+
|
|
1020
|
+
if content_len_full == 0 or end_idx_full > prompt_len:
|
|
1021
|
+
continue
|
|
1022
|
+
|
|
1023
|
+
if prompt[start_idx:end_idx_full] == content_tokens_full:
|
|
1024
|
+
content_is_embed = content.is_embed
|
|
1025
|
+
if content_is_embed is not None:
|
|
1026
|
+
content_is_embed = content_is_embed(tokenizer, content.full)
|
|
1027
|
+
|
|
1028
|
+
yield PlaceholderFeaturesInfo(
|
|
1029
|
+
modality=modality,
|
|
1030
|
+
item_idx=item_idx,
|
|
1031
|
+
start_idx=start_idx,
|
|
1032
|
+
tokens=content_tokens_full,
|
|
1033
|
+
is_embed=content_is_embed,
|
|
1034
|
+
)
|
|
1035
|
+
|
|
1036
|
+
# Exclude overlapping matches
|
|
1037
|
+
start_idx = end_idx_full
|
|
1038
|
+
item_idx_by_modality[modality] += 1
|
|
1039
|
+
found = True
|
|
1040
|
+
break
|
|
1041
|
+
|
|
1042
|
+
if found:
|
|
1043
|
+
if _all_items_found(mm_item_counts, item_idx_by_modality):
|
|
1044
|
+
return
|
|
1045
|
+
|
|
1046
|
+
break # Go back to the outer while loop
|
|
1047
|
+
|
|
1048
|
+
if not found:
|
|
1049
|
+
start_idx += 1
|
|
1050
|
+
|
|
1051
|
+
|
|
1052
|
+
def find_mm_placeholders(
|
|
1053
|
+
prompt: list[int],
|
|
1054
|
+
mm_prompt_updates: "MultiModalPromptUpdates",
|
|
1055
|
+
tokenizer: TokenizerLike | None,
|
|
1056
|
+
) -> Mapping[str, list[PlaceholderFeaturesInfo]]:
|
|
1057
|
+
it = _iter_placeholders(prompt, mm_prompt_updates, tokenizer)
|
|
1058
|
+
return dict(full_groupby_modality(it))
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
_T = TypeVar("_T")
|
|
1062
|
+
_C = TypeVar("_C", bound=PretrainedConfig, default=PretrainedConfig)
|
|
1063
|
+
_P = TypeVar("_P", bound=ProcessorMixin, default=ProcessorMixin)
|
|
1064
|
+
|
|
1065
|
+
|
|
1066
|
+
@dataclass(frozen=True)
|
|
1067
|
+
class InputProcessingContext:
|
|
1068
|
+
"""
|
|
1069
|
+
Contains information about the model which may be used to
|
|
1070
|
+
modify the inputs.
|
|
1071
|
+
"""
|
|
1072
|
+
|
|
1073
|
+
model_config: ModelConfig
|
|
1074
|
+
"""The configuration of the model."""
|
|
1075
|
+
|
|
1076
|
+
tokenizer: TokenizerLike | None
|
|
1077
|
+
"""The tokenizer used to tokenize the inputs."""
|
|
1078
|
+
|
|
1079
|
+
observability_config: "ObservabilityConfig | None" = field(
|
|
1080
|
+
default=None, compare=False, repr=False
|
|
1081
|
+
)
|
|
1082
|
+
"""Configuration for observability features."""
|
|
1083
|
+
|
|
1084
|
+
timing_stats_registry: dict[str, MultiModalProcessorTimingStats] = field(
|
|
1085
|
+
default_factory=dict, compare=False, repr=False
|
|
1086
|
+
)
|
|
1087
|
+
"""Registry for storing timing stats keyed by request_id."""
|
|
1088
|
+
|
|
1089
|
+
_timing_stats_registry_lock: threading.Lock = field(
|
|
1090
|
+
default_factory=threading.Lock, compare=False, repr=False
|
|
1091
|
+
)
|
|
1092
|
+
"""Lock for thread-safe access to timing_stats_registry."""
|
|
1093
|
+
|
|
1094
|
+
def get_tokenizer(self) -> TokenizerLike:
|
|
1095
|
+
if self.tokenizer is None:
|
|
1096
|
+
raise ValueError(
|
|
1097
|
+
"You cannot pass text prompts when `skip_tokenizer_init=True`"
|
|
1098
|
+
)
|
|
1099
|
+
|
|
1100
|
+
return self.tokenizer
|
|
1101
|
+
|
|
1102
|
+
@overload
|
|
1103
|
+
def get_hf_config(self, /) -> PretrainedConfig: ...
|
|
1104
|
+
|
|
1105
|
+
@overload
|
|
1106
|
+
def get_hf_config(
|
|
1107
|
+
self,
|
|
1108
|
+
typ: type[_C] | tuple[type[_C], ...],
|
|
1109
|
+
/,
|
|
1110
|
+
) -> _C: ...
|
|
1111
|
+
|
|
1112
|
+
def get_hf_config(
|
|
1113
|
+
self,
|
|
1114
|
+
typ: type[Any] | tuple[type[Any], ...] | None = None,
|
|
1115
|
+
/,
|
|
1116
|
+
) -> Any:
|
|
1117
|
+
"""
|
|
1118
|
+
Get the HuggingFace configuration
|
|
1119
|
+
(`transformers.PretrainedConfig`) of the model,
|
|
1120
|
+
additionally checking its type.
|
|
1121
|
+
|
|
1122
|
+
Raises:
|
|
1123
|
+
TypeError: If the configuration is not of the specified type.
|
|
1124
|
+
"""
|
|
1125
|
+
if typ is None:
|
|
1126
|
+
from transformers.configuration_utils import PretrainedConfig
|
|
1127
|
+
|
|
1128
|
+
typ = PretrainedConfig
|
|
1129
|
+
|
|
1130
|
+
hf_config = self.model_config.hf_config
|
|
1131
|
+
if not isinstance(hf_config, typ):
|
|
1132
|
+
raise TypeError(
|
|
1133
|
+
"Invalid type of HuggingFace config. "
|
|
1134
|
+
f"Expected type: {typ}, but "
|
|
1135
|
+
f"found type: {type(hf_config)}"
|
|
1136
|
+
)
|
|
1137
|
+
|
|
1138
|
+
return hf_config
|
|
1139
|
+
|
|
1140
|
+
def get_hf_image_processor_config(self) -> dict[str, Any]:
|
|
1141
|
+
"""
|
|
1142
|
+
Get the HuggingFace image processor configuration of the model.
|
|
1143
|
+
"""
|
|
1144
|
+
return self.model_config.hf_image_processor_config
|
|
1145
|
+
|
|
1146
|
+
def get_mm_config(self):
|
|
1147
|
+
"""
|
|
1148
|
+
Get the multimodal config of the model.
|
|
1149
|
+
|
|
1150
|
+
Raises:
|
|
1151
|
+
RuntimeError: If the model is not a multimodal model.
|
|
1152
|
+
"""
|
|
1153
|
+
mm_config = self.model_config.multimodal_config
|
|
1154
|
+
if mm_config is None:
|
|
1155
|
+
raise RuntimeError("Not a multimodal model")
|
|
1156
|
+
|
|
1157
|
+
return mm_config
|
|
1158
|
+
|
|
1159
|
+
@overload
|
|
1160
|
+
def get_hf_processor(self, /, **kwargs: object) -> ProcessorMixin: ...
|
|
1161
|
+
|
|
1162
|
+
@overload
|
|
1163
|
+
def get_hf_processor(
|
|
1164
|
+
self,
|
|
1165
|
+
typ: type[_P] | tuple[type[_P], ...],
|
|
1166
|
+
/,
|
|
1167
|
+
**kwargs: object,
|
|
1168
|
+
) -> _P: ...
|
|
1169
|
+
|
|
1170
|
+
def get_hf_processor(
|
|
1171
|
+
self,
|
|
1172
|
+
typ: type[Any] | tuple[type[Any], ...] | None = None,
|
|
1173
|
+
/,
|
|
1174
|
+
**kwargs: object,
|
|
1175
|
+
) -> Any:
|
|
1176
|
+
"""
|
|
1177
|
+
Get the HuggingFace processor
|
|
1178
|
+
(`transformers.ProcessorMixin`) of the model,
|
|
1179
|
+
additionally checking its type.
|
|
1180
|
+
|
|
1181
|
+
Raises:
|
|
1182
|
+
TypeError: If the processor is not of the specified type.
|
|
1183
|
+
"""
|
|
1184
|
+
if typ is None:
|
|
1185
|
+
from transformers.processing_utils import ProcessorMixin
|
|
1186
|
+
|
|
1187
|
+
typ = ProcessorMixin
|
|
1188
|
+
|
|
1189
|
+
from vllm.tokenizers.mistral import MistralTokenizer
|
|
1190
|
+
|
|
1191
|
+
tokenizer = self.tokenizer
|
|
1192
|
+
if isinstance(tokenizer, MistralTokenizer):
|
|
1193
|
+
tokenizer = tokenizer.transformers_tokenizer
|
|
1194
|
+
|
|
1195
|
+
return cached_processor_from_config(
|
|
1196
|
+
self.model_config,
|
|
1197
|
+
processor_cls=typ,
|
|
1198
|
+
tokenizer=tokenizer,
|
|
1199
|
+
**kwargs,
|
|
1200
|
+
)
|
|
1201
|
+
|
|
1202
|
+
def init_processor(
|
|
1203
|
+
self,
|
|
1204
|
+
typ: type[_T],
|
|
1205
|
+
/,
|
|
1206
|
+
**kwargs: object,
|
|
1207
|
+
) -> _T:
|
|
1208
|
+
"""
|
|
1209
|
+
Initialize a HuggingFace-like processor class, merging the
|
|
1210
|
+
keyword arguments with those in the model's configuration.
|
|
1211
|
+
"""
|
|
1212
|
+
mm_config = self.model_config.get_multimodal_config()
|
|
1213
|
+
base_kwargs = mm_config.mm_processor_kwargs
|
|
1214
|
+
if base_kwargs is None:
|
|
1215
|
+
base_kwargs = {}
|
|
1216
|
+
|
|
1217
|
+
merged_kwargs = {**base_kwargs, **kwargs}
|
|
1218
|
+
|
|
1219
|
+
return typ(**merged_kwargs)
|
|
1220
|
+
|
|
1221
|
+
def _postprocess_output(
|
|
1222
|
+
self,
|
|
1223
|
+
output: JSONTree,
|
|
1224
|
+
) -> JSONTree:
|
|
1225
|
+
def _postprocess_one(x: object):
|
|
1226
|
+
if isinstance(x, torch.Tensor): # noqa: SIM102
|
|
1227
|
+
# This mimics the behavior of transformers.BatchFeature
|
|
1228
|
+
if x.is_floating_point():
|
|
1229
|
+
x = x.to(dtype=self.model_config.dtype)
|
|
1230
|
+
|
|
1231
|
+
return x
|
|
1232
|
+
|
|
1233
|
+
return json_map_leaves(_postprocess_one, output)
|
|
1234
|
+
|
|
1235
|
+
def call_hf_processor(
|
|
1236
|
+
self,
|
|
1237
|
+
hf_processor: ProcessorMixin,
|
|
1238
|
+
data: Mapping[str, object],
|
|
1239
|
+
kwargs: Mapping[str, object] = {},
|
|
1240
|
+
*,
|
|
1241
|
+
num_tries: int = 1,
|
|
1242
|
+
max_tries: int = 5,
|
|
1243
|
+
) -> BatchFeature | JSONTree:
|
|
1244
|
+
"""
|
|
1245
|
+
Call `hf_processor` on the prompt `data`
|
|
1246
|
+
(text, image, audio...) with configurable options `kwargs`.
|
|
1247
|
+
"""
|
|
1248
|
+
assert callable(hf_processor)
|
|
1249
|
+
|
|
1250
|
+
mm_config = self.model_config.get_multimodal_config()
|
|
1251
|
+
merged_kwargs = mm_config.merge_mm_processor_kwargs(kwargs)
|
|
1252
|
+
|
|
1253
|
+
allowed_kwargs = get_allowed_kwarg_only_overrides(
|
|
1254
|
+
hf_processor,
|
|
1255
|
+
merged_kwargs,
|
|
1256
|
+
requires_kw_only=False,
|
|
1257
|
+
allow_var_kwargs=True,
|
|
1258
|
+
)
|
|
1259
|
+
|
|
1260
|
+
try:
|
|
1261
|
+
output = hf_processor(**data, **allowed_kwargs, return_tensors="pt")
|
|
1262
|
+
except Exception as exc:
|
|
1263
|
+
# See https://github.com/huggingface/tokenizers/issues/537
|
|
1264
|
+
if (
|
|
1265
|
+
isinstance(exc, RuntimeError)
|
|
1266
|
+
and exc
|
|
1267
|
+
and exc.args[0] == "Already borrowed"
|
|
1268
|
+
and num_tries < max_tries
|
|
1269
|
+
):
|
|
1270
|
+
logger.warning(
|
|
1271
|
+
"Failed to acquire tokenizer in current thread. "
|
|
1272
|
+
"Retrying (%d/%d)...",
|
|
1273
|
+
num_tries,
|
|
1274
|
+
max_tries,
|
|
1275
|
+
)
|
|
1276
|
+
time.sleep(0.5)
|
|
1277
|
+
return self.call_hf_processor(
|
|
1278
|
+
hf_processor,
|
|
1279
|
+
data,
|
|
1280
|
+
kwargs,
|
|
1281
|
+
num_tries=num_tries + 1,
|
|
1282
|
+
max_tries=max_tries,
|
|
1283
|
+
)
|
|
1284
|
+
|
|
1285
|
+
msg = (
|
|
1286
|
+
f"Failed to apply {type(hf_processor).__name__} "
|
|
1287
|
+
f"on data={data} with kwargs={allowed_kwargs}"
|
|
1288
|
+
)
|
|
1289
|
+
|
|
1290
|
+
raise ValueError(msg) from exc
|
|
1291
|
+
|
|
1292
|
+
# this emulates output.to(dtype=self.model_config.dtype)
|
|
1293
|
+
from transformers.feature_extraction_utils import BatchFeature
|
|
1294
|
+
|
|
1295
|
+
if isinstance(output, BatchFeature):
|
|
1296
|
+
output_ = self._postprocess_output(output.data)
|
|
1297
|
+
return BatchFeature(output_)
|
|
1298
|
+
|
|
1299
|
+
logger.warning_once(
|
|
1300
|
+
"%s did not return `BatchFeature`. "
|
|
1301
|
+
"Make sure to match the behaviour of `ProcessorMixin` when "
|
|
1302
|
+
"implementing custom processors.",
|
|
1303
|
+
type(hf_processor).__name__,
|
|
1304
|
+
)
|
|
1305
|
+
|
|
1306
|
+
return self._postprocess_output(output)
|
|
1307
|
+
|
|
1308
|
+
def get_timing_stats(
|
|
1309
|
+
self, request_id: str
|
|
1310
|
+
) -> MultiModalProcessorTimingStats | None:
|
|
1311
|
+
"""
|
|
1312
|
+
Get timing stats for a request.
|
|
1313
|
+
"""
|
|
1314
|
+
if (
|
|
1315
|
+
self.observability_config is None
|
|
1316
|
+
or not self.observability_config.enable_mm_processor_stats
|
|
1317
|
+
):
|
|
1318
|
+
return None
|
|
1319
|
+
with self._timing_stats_registry_lock:
|
|
1320
|
+
return self.timing_stats_registry.get(request_id)
|
|
1321
|
+
|
|
1322
|
+
def create_timing_stats(self, request_id: str) -> MultiModalProcessorTimingStats:
|
|
1323
|
+
"""
|
|
1324
|
+
Create and store timing stats in the registry for a request.
|
|
1325
|
+
|
|
1326
|
+
This should be called at the start of processing for a request.
|
|
1327
|
+
The stats object is created immediately and stored in the registry.
|
|
1328
|
+
"""
|
|
1329
|
+
if (
|
|
1330
|
+
self.observability_config is None
|
|
1331
|
+
or not self.observability_config.enable_mm_processor_stats
|
|
1332
|
+
):
|
|
1333
|
+
return MultiModalProcessorTimingStats()
|
|
1334
|
+
|
|
1335
|
+
with self._timing_stats_registry_lock:
|
|
1336
|
+
if request_id in self.timing_stats_registry:
|
|
1337
|
+
raise ValueError(
|
|
1338
|
+
f"Timing stats already exist for request_id: {request_id}"
|
|
1339
|
+
)
|
|
1340
|
+
stats = MultiModalProcessorTimingStats()
|
|
1341
|
+
self.timing_stats_registry[request_id] = stats
|
|
1342
|
+
return stats
|
|
1343
|
+
|
|
1344
|
+
def clear_timing_stats_registry(self) -> int:
|
|
1345
|
+
"""
|
|
1346
|
+
Clear all stats from the registry. Returns the number of stats cleared.
|
|
1347
|
+
"""
|
|
1348
|
+
if (
|
|
1349
|
+
self.observability_config is None
|
|
1350
|
+
or not self.observability_config.enable_mm_processor_stats
|
|
1351
|
+
):
|
|
1352
|
+
return 0
|
|
1353
|
+
with self._timing_stats_registry_lock:
|
|
1354
|
+
count = len(self.timing_stats_registry)
|
|
1355
|
+
self.timing_stats_registry.clear()
|
|
1356
|
+
return count
|
|
1357
|
+
|
|
1358
|
+
def get_all_timing_stats(self) -> dict[str, dict[str, float]]:
|
|
1359
|
+
"""
|
|
1360
|
+
Get all timing stats as a dictionary for API endpoints.
|
|
1361
|
+
"""
|
|
1362
|
+
if (
|
|
1363
|
+
self.observability_config is None
|
|
1364
|
+
or not self.observability_config.enable_mm_processor_stats
|
|
1365
|
+
):
|
|
1366
|
+
return {}
|
|
1367
|
+
with self._timing_stats_registry_lock:
|
|
1368
|
+
return {
|
|
1369
|
+
rid: stats.to_dict()
|
|
1370
|
+
for rid, stats in self.timing_stats_registry.items()
|
|
1371
|
+
}
|
|
1372
|
+
|
|
1373
|
+
|
|
1374
|
+
class BaseProcessingInfo:
|
|
1375
|
+
"""Base class to provide the information necessary for data processing."""
|
|
1376
|
+
|
|
1377
|
+
def __init__(self, ctx: InputProcessingContext) -> None:
|
|
1378
|
+
super().__init__()
|
|
1379
|
+
|
|
1380
|
+
self.ctx = ctx
|
|
1381
|
+
|
|
1382
|
+
@property
|
|
1383
|
+
def model_id(self) -> str:
|
|
1384
|
+
return self.ctx.model_config.model
|
|
1385
|
+
|
|
1386
|
+
def get_tokenizer(self) -> TokenizerLike:
|
|
1387
|
+
return self.ctx.get_tokenizer()
|
|
1388
|
+
|
|
1389
|
+
def get_hf_config(self) -> PretrainedConfig:
|
|
1390
|
+
return self.ctx.get_hf_config()
|
|
1391
|
+
|
|
1392
|
+
def get_hf_processor(self, **kwargs: object) -> ProcessorMixin:
|
|
1393
|
+
"""
|
|
1394
|
+
Subclasses can override this method to handle
|
|
1395
|
+
specific kwargs from model config or user inputs.
|
|
1396
|
+
"""
|
|
1397
|
+
return self.ctx.get_hf_processor(**kwargs)
|
|
1398
|
+
|
|
1399
|
+
@abstractmethod
|
|
1400
|
+
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
|
|
1401
|
+
"""
|
|
1402
|
+
Return the maximum supported number of items for each modality.
|
|
1403
|
+
|
|
1404
|
+
A value of `None` means unlimited number of items.
|
|
1405
|
+
|
|
1406
|
+
Omitting a modality from the returned dictionary means that
|
|
1407
|
+
it is not supported at all.
|
|
1408
|
+
"""
|
|
1409
|
+
raise NotImplementedError
|
|
1410
|
+
|
|
1411
|
+
def get_allowed_mm_limits(self) -> Mapping[str, int]:
|
|
1412
|
+
"""Return the maximum allowed number of items for each modality."""
|
|
1413
|
+
supported_mm_limits = self.get_supported_mm_limits()
|
|
1414
|
+
mm_config = self.ctx.get_mm_config()
|
|
1415
|
+
|
|
1416
|
+
allowed_limits = dict[str, int]()
|
|
1417
|
+
for modality, supported_limit in supported_mm_limits.items():
|
|
1418
|
+
user_limit = mm_config.get_limit_per_prompt(modality)
|
|
1419
|
+
|
|
1420
|
+
allowed_limits[modality] = (
|
|
1421
|
+
user_limit
|
|
1422
|
+
if supported_limit is None
|
|
1423
|
+
else min(user_limit, supported_limit)
|
|
1424
|
+
)
|
|
1425
|
+
|
|
1426
|
+
return allowed_limits
|
|
1427
|
+
|
|
1428
|
+
def get_mm_max_tokens_per_item(
|
|
1429
|
+
self,
|
|
1430
|
+
seq_len: int,
|
|
1431
|
+
mm_counts: Mapping[str, int],
|
|
1432
|
+
) -> Mapping[str, int] | None:
|
|
1433
|
+
"""
|
|
1434
|
+
Return the maximum number of tokens per item of for each modality.
|
|
1435
|
+
|
|
1436
|
+
When `None` (the default) is returned, vLLM will generate dummy inputs
|
|
1437
|
+
(images/videos) at maximum possible sizes and process them to determine
|
|
1438
|
+
the maximum token count per modality.
|
|
1439
|
+
|
|
1440
|
+
This approach works but can be very slow for certain models (e.g.,
|
|
1441
|
+
Qwen2.5-VL), leading to very long startup time. For better performance,
|
|
1442
|
+
each model can override this method to return pre-computed maximum token
|
|
1443
|
+
counts, avoiding the need for dummy input generation and processing.
|
|
1444
|
+
|
|
1445
|
+
Note:
|
|
1446
|
+
The maximum number of tokens per item of each modality returned
|
|
1447
|
+
from this function should respect the model's maximum sequence
|
|
1448
|
+
length and the maximum number of items of each modality allowed,
|
|
1449
|
+
and agree with dummy inputs (images/videos) at maximum possible
|
|
1450
|
+
sizes.
|
|
1451
|
+
"""
|
|
1452
|
+
return None
|
|
1453
|
+
|
|
1454
|
+
|
|
1455
|
+
_I = TypeVar("_I", bound=BaseProcessingInfo)
|
|
1456
|
+
|
|
1457
|
+
MultiModalHashes = dict[str, list[str]]
|
|
1458
|
+
"""
|
|
1459
|
+
A collection of the multi-modal hash for each item, with a similar structure as
|
|
1460
|
+
[`MultiModalKwargsItems`][vllm.multimodal.inputs.MultiModalKwargsItems].
|
|
1461
|
+
"""
|
|
1462
|
+
|
|
1463
|
+
MultiModalIsCached = dict[str, list[bool]]
|
|
1464
|
+
"""
|
|
1465
|
+
A collection of the `is_cached` flag for each item, with a similar structure as
|
|
1466
|
+
[`MultiModalKwargsItems`][vllm.multimodal.inputs.MultiModalKwargsItems].
|
|
1467
|
+
"""
|
|
1468
|
+
|
|
1469
|
+
MultiModalPromptUpdates = Mapping[str, list[Sequence[ResolvedPromptUpdate]]]
|
|
1470
|
+
"""
|
|
1471
|
+
A collection of prompt updates with a similar structure as
|
|
1472
|
+
[`MultiModalKwargsItems`][vllm.multimodal.inputs.MultiModalKwargsItems].
|
|
1473
|
+
"""
|
|
1474
|
+
|
|
1475
|
+
MultiModalPromptUpdatesApplyResult = Mapping[str, list[int | None]]
|
|
1476
|
+
"""
|
|
1477
|
+
For an item `MultiModalPromptUpdates[k][i]`,
|
|
1478
|
+
`MultiModalPromptUpdatesApplyResult[k][i]` represents the index of the
|
|
1479
|
+
`ResolvedPromptUpdate` instance that has been applied, or `None` if none of the
|
|
1480
|
+
`ResolvedPromptUpdate` instances have been applied.
|
|
1481
|
+
"""
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
class MultiModalProcessingInfo(NamedTuple):
|
|
1485
|
+
kwargs: MultiModalKwargsOptionalItems
|
|
1486
|
+
hashes: MultiModalHashes
|
|
1487
|
+
prompt_updates: MultiModalPromptUpdates
|
|
1488
|
+
|
|
1489
|
+
|
|
1490
|
+
class BaseMultiModalProcessor(ABC, Generic[_I]):
|
|
1491
|
+
"""
|
|
1492
|
+
Abstract base class to process multi-modal inputs to be used in vLLM.
|
|
1493
|
+
|
|
1494
|
+
Not to be confused with `transformers.ProcessorMixin`.
|
|
1495
|
+
"""
|
|
1496
|
+
|
|
1497
|
+
def __init__(
|
|
1498
|
+
self,
|
|
1499
|
+
info: _I,
|
|
1500
|
+
dummy_inputs: "BaseDummyInputsBuilder[_I]",
|
|
1501
|
+
*,
|
|
1502
|
+
cache: BaseMultiModalProcessorCache | None = None,
|
|
1503
|
+
) -> None:
|
|
1504
|
+
super().__init__()
|
|
1505
|
+
|
|
1506
|
+
self.info = info
|
|
1507
|
+
self.dummy_inputs = dummy_inputs
|
|
1508
|
+
self.cache = cache
|
|
1509
|
+
|
|
1510
|
+
self.data_parser = self._get_data_parser()
|
|
1511
|
+
|
|
1512
|
+
# Avoid unnecessary recomputation
|
|
1513
|
+
self._supported_mm_limits = self.info.get_supported_mm_limits()
|
|
1514
|
+
self._allowed_mm_limits = self.info.get_allowed_mm_limits()
|
|
1515
|
+
|
|
1516
|
+
@property
|
|
1517
|
+
def supported_mm_limits(self):
|
|
1518
|
+
return self._supported_mm_limits
|
|
1519
|
+
|
|
1520
|
+
@property
|
|
1521
|
+
def allowed_mm_limits(self):
|
|
1522
|
+
return self._allowed_mm_limits
|
|
1523
|
+
|
|
1524
|
+
def __call__(
|
|
1525
|
+
self,
|
|
1526
|
+
prompt: str,
|
|
1527
|
+
mm_data: MultiModalDataDict,
|
|
1528
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1529
|
+
*,
|
|
1530
|
+
mm_uuids: MultiModalUUIDDict | None = None,
|
|
1531
|
+
) -> MultiModalInputs:
|
|
1532
|
+
return self.apply(prompt, mm_data, hf_processor_mm_kwargs, mm_uuids=mm_uuids)
|
|
1533
|
+
|
|
1534
|
+
def _get_data_parser(self) -> MultiModalDataParser:
|
|
1535
|
+
"""
|
|
1536
|
+
Construct a parser to preprocess multi-modal data items
|
|
1537
|
+
before passing them to
|
|
1538
|
+
[`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
|
|
1539
|
+
|
|
1540
|
+
You can support additional modalities by creating a subclass
|
|
1541
|
+
of [`MultiModalDataParser`][vllm.multimodal.parse.MultiModalDataParser]
|
|
1542
|
+
that has additional subparsers.
|
|
1543
|
+
"""
|
|
1544
|
+
# Get expected hidden size for embedding validation if mm_embeds enabled
|
|
1545
|
+
# This validates hidden dimensions to prevent vulnerabilities: embeddings
|
|
1546
|
+
# with correct ndim but wrong shape could cause crashes at inference time
|
|
1547
|
+
mm_config = self.info.ctx.model_config.get_multimodal_config()
|
|
1548
|
+
expected_hidden_size = None
|
|
1549
|
+
if mm_config.enable_mm_embeds:
|
|
1550
|
+
expected_hidden_size = self.info.ctx.model_config.get_inputs_embeds_size()
|
|
1551
|
+
|
|
1552
|
+
return MultiModalDataParser(expected_hidden_size=expected_hidden_size)
|
|
1553
|
+
|
|
1554
|
+
def validate_num_items(
|
|
1555
|
+
self,
|
|
1556
|
+
modality: str,
|
|
1557
|
+
num_items: int,
|
|
1558
|
+
) -> None:
|
|
1559
|
+
supported_limit = self.supported_mm_limits.get(modality, 0)
|
|
1560
|
+
allowed_limit = self.allowed_mm_limits.get(modality, 0)
|
|
1561
|
+
|
|
1562
|
+
if supported_limit is None:
|
|
1563
|
+
supported_limit = allowed_limit
|
|
1564
|
+
|
|
1565
|
+
limit = min(supported_limit, allowed_limit)
|
|
1566
|
+
|
|
1567
|
+
if num_items > limit:
|
|
1568
|
+
msg = f"At most {limit} {modality}(s) may be provided in one prompt."
|
|
1569
|
+
|
|
1570
|
+
if num_items <= supported_limit:
|
|
1571
|
+
msg += " Set `--limit-mm-per-prompt` to increase this limit."
|
|
1572
|
+
|
|
1573
|
+
raise ValueError(msg)
|
|
1574
|
+
|
|
1575
|
+
def _to_mm_items(
|
|
1576
|
+
self,
|
|
1577
|
+
mm_data: MultiModalDataDict,
|
|
1578
|
+
) -> MultiModalDataItems:
|
|
1579
|
+
"""
|
|
1580
|
+
Normalize
|
|
1581
|
+
[`MultiModalDataDict`][vllm.multimodal.inputs.MultiModalDataDict]
|
|
1582
|
+
to [`MultiModalDataItems`][vllm.multimodal.parse.MultiModalDataItems]
|
|
1583
|
+
before passing them to
|
|
1584
|
+
[`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
|
|
1585
|
+
"""
|
|
1586
|
+
mm_items = self.data_parser.parse_mm_data(mm_data)
|
|
1587
|
+
|
|
1588
|
+
mm_config = self.info.ctx.model_config.get_multimodal_config()
|
|
1589
|
+
if not mm_config.enable_mm_embeds:
|
|
1590
|
+
for modality, items in mm_items.items():
|
|
1591
|
+
if isinstance(items, (EmbeddingItems, DictEmbeddingItems)):
|
|
1592
|
+
raise ValueError(
|
|
1593
|
+
f"You must set `--enable-mm-embeds` to input "
|
|
1594
|
+
f"`{modality}_embeds`"
|
|
1595
|
+
)
|
|
1596
|
+
|
|
1597
|
+
for modality, items in mm_items.items():
|
|
1598
|
+
self.validate_num_items(modality, len(items))
|
|
1599
|
+
|
|
1600
|
+
return mm_items
|
|
1601
|
+
|
|
1602
|
+
@abstractmethod
|
|
1603
|
+
def _get_mm_fields_config(
|
|
1604
|
+
self,
|
|
1605
|
+
hf_inputs: BatchFeature,
|
|
1606
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1607
|
+
) -> Mapping[str, MultiModalFieldConfig]:
|
|
1608
|
+
"""Given the HF-processed data, output the metadata of each field."""
|
|
1609
|
+
raise NotImplementedError
|
|
1610
|
+
|
|
1611
|
+
@abstractmethod
|
|
1612
|
+
def _get_prompt_updates(
|
|
1613
|
+
self,
|
|
1614
|
+
mm_items: MultiModalDataItems,
|
|
1615
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1616
|
+
out_mm_kwargs: MultiModalKwargsItems,
|
|
1617
|
+
) -> Sequence[PromptUpdate]:
|
|
1618
|
+
"""
|
|
1619
|
+
Given the original multi-modal items for this modality
|
|
1620
|
+
and HF-processed data, output the updates to perform.
|
|
1621
|
+
|
|
1622
|
+
The information returned by this method is used to update token inputs
|
|
1623
|
+
which bypass the HF processor. It is also used to update the output of
|
|
1624
|
+
HF processor if the HF process does not apply prompt updates to text
|
|
1625
|
+
inputs.
|
|
1626
|
+
|
|
1627
|
+
Moreover, this information is critical to determine the token positions
|
|
1628
|
+
in order to construct
|
|
1629
|
+
[`PlaceholderRange`][vllm.multimodal.inputs.PlaceholderRange]
|
|
1630
|
+
for each multi-modal item.
|
|
1631
|
+
"""
|
|
1632
|
+
raise NotImplementedError
|
|
1633
|
+
|
|
1634
|
+
def _bind_and_group_updates(
|
|
1635
|
+
self,
|
|
1636
|
+
prompt_updates: Sequence[PromptUpdate],
|
|
1637
|
+
mm_item_counts: Mapping[str, int],
|
|
1638
|
+
) -> MultiModalPromptUpdates:
|
|
1639
|
+
return {
|
|
1640
|
+
modality: [
|
|
1641
|
+
[update.resolve(item_idx) for update in updates]
|
|
1642
|
+
for item_idx in range(mm_item_counts.get(modality, 0))
|
|
1643
|
+
]
|
|
1644
|
+
for modality, updates in full_groupby_modality(prompt_updates)
|
|
1645
|
+
}
|
|
1646
|
+
|
|
1647
|
+
def _get_mm_prompt_updates(
|
|
1648
|
+
self,
|
|
1649
|
+
mm_items: MultiModalDataItems,
|
|
1650
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1651
|
+
out_mm_kwargs: MultiModalKwargsItems,
|
|
1652
|
+
) -> MultiModalPromptUpdates:
|
|
1653
|
+
unbound_prompt_updates = self._get_prompt_updates(
|
|
1654
|
+
mm_items=mm_items,
|
|
1655
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
1656
|
+
out_mm_kwargs=out_mm_kwargs,
|
|
1657
|
+
)
|
|
1658
|
+
|
|
1659
|
+
mm_prompt_updates = self._bind_and_group_updates(
|
|
1660
|
+
unbound_prompt_updates,
|
|
1661
|
+
mm_items.get_all_counts(),
|
|
1662
|
+
)
|
|
1663
|
+
|
|
1664
|
+
for modality, prompt_updates in mm_prompt_updates.items():
|
|
1665
|
+
for item_idx, item_prompt_updates in enumerate(prompt_updates):
|
|
1666
|
+
if len(item_prompt_updates) > 1:
|
|
1667
|
+
logger.warning_once(
|
|
1668
|
+
"Detected %d prompt updates for `mm_items[%r][%s]`. "
|
|
1669
|
+
"Multiple prompt updates per item is now "
|
|
1670
|
+
"deprecated and may be removed in v0.13. "
|
|
1671
|
+
"Instead, please specify dynamic update targets "
|
|
1672
|
+
"in the same prompt update definition by passing "
|
|
1673
|
+
"a function to `PromptUpdate.target`.",
|
|
1674
|
+
len(prompt_updates),
|
|
1675
|
+
modality,
|
|
1676
|
+
item_idx,
|
|
1677
|
+
)
|
|
1678
|
+
|
|
1679
|
+
return mm_prompt_updates
|
|
1680
|
+
|
|
1681
|
+
def _find_mm_placeholders(
|
|
1682
|
+
self,
|
|
1683
|
+
new_token_ids: list[int],
|
|
1684
|
+
mm_prompt_updates: MultiModalPromptUpdates,
|
|
1685
|
+
) -> Mapping[str, list[PlaceholderFeaturesInfo]]:
|
|
1686
|
+
tokenizer = self.info.get_tokenizer()
|
|
1687
|
+
|
|
1688
|
+
return find_mm_placeholders(new_token_ids, mm_prompt_updates, tokenizer)
|
|
1689
|
+
|
|
1690
|
+
def _get_hf_mm_data(
|
|
1691
|
+
self,
|
|
1692
|
+
mm_items: MultiModalDataItems,
|
|
1693
|
+
) -> tuple[Mapping[str, object], Mapping[str, object]]:
|
|
1694
|
+
processor_data = dict[str, object]()
|
|
1695
|
+
passthrough_data = dict[str, object]()
|
|
1696
|
+
|
|
1697
|
+
for items in mm_items.values():
|
|
1698
|
+
processor_data.update(items.get_processor_data())
|
|
1699
|
+
passthrough_data.update(items.get_passthrough_data())
|
|
1700
|
+
|
|
1701
|
+
return processor_data, passthrough_data
|
|
1702
|
+
|
|
1703
|
+
def _call_hf_processor(
|
|
1704
|
+
self,
|
|
1705
|
+
prompt: str,
|
|
1706
|
+
# Not to be confused with `mm_data` in `self.apply`.
|
|
1707
|
+
# This refers to the data to be passed to HF processor.
|
|
1708
|
+
mm_data: Mapping[str, object],
|
|
1709
|
+
mm_kwargs: Mapping[str, object],
|
|
1710
|
+
tok_kwargs: Mapping[str, object],
|
|
1711
|
+
) -> BatchFeature:
|
|
1712
|
+
"""
|
|
1713
|
+
Call the HF processor on the prompt text and
|
|
1714
|
+
associated multi-modal data.
|
|
1715
|
+
"""
|
|
1716
|
+
with _timed_operation(self.info.ctx, "hf_processor"):
|
|
1717
|
+
return self.info.ctx.call_hf_processor(
|
|
1718
|
+
self.info.get_hf_processor(**mm_kwargs),
|
|
1719
|
+
dict(text=prompt, **mm_data),
|
|
1720
|
+
dict(**mm_kwargs, **tok_kwargs),
|
|
1721
|
+
)
|
|
1722
|
+
|
|
1723
|
+
def _hf_processor_applies_updates(
|
|
1724
|
+
self,
|
|
1725
|
+
prompt_text: str,
|
|
1726
|
+
mm_items: MultiModalDataItems,
|
|
1727
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1728
|
+
tokenization_kwargs: Mapping[str, object],
|
|
1729
|
+
) -> bool:
|
|
1730
|
+
"""
|
|
1731
|
+
Return whether the HF processor applies prompt updates.
|
|
1732
|
+
|
|
1733
|
+
For most HF processors, this should be `True` when multi-modal
|
|
1734
|
+
data items are passed, but `False` when multi-modal embeddings
|
|
1735
|
+
are passed.
|
|
1736
|
+
"""
|
|
1737
|
+
return not any(
|
|
1738
|
+
isinstance(items, (EmbeddingItems, DictEmbeddingItems))
|
|
1739
|
+
for items in mm_items.values()
|
|
1740
|
+
)
|
|
1741
|
+
|
|
1742
|
+
def _apply_hf_processor_text_mm(
|
|
1743
|
+
self,
|
|
1744
|
+
prompt_text: str,
|
|
1745
|
+
mm_items: MultiModalDataItems,
|
|
1746
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1747
|
+
tokenization_kwargs: Mapping[str, object],
|
|
1748
|
+
) -> tuple[list[int], BatchFeature, bool]:
|
|
1749
|
+
"""
|
|
1750
|
+
Apply the HF processor on the prompt text and multi-modal data
|
|
1751
|
+
together.
|
|
1752
|
+
|
|
1753
|
+
In addition, return whether prompt updates have been applied.
|
|
1754
|
+
"""
|
|
1755
|
+
processor_data, passthrough_data = self._get_hf_mm_data(mm_items)
|
|
1756
|
+
|
|
1757
|
+
processed_data = self._call_hf_processor(
|
|
1758
|
+
prompt=prompt_text,
|
|
1759
|
+
mm_data=processor_data,
|
|
1760
|
+
mm_kwargs=hf_processor_mm_kwargs,
|
|
1761
|
+
tok_kwargs=tokenization_kwargs,
|
|
1762
|
+
)
|
|
1763
|
+
processed_data.update(passthrough_data)
|
|
1764
|
+
|
|
1765
|
+
(prompt_ids,) = processed_data.pop("input_ids").tolist()
|
|
1766
|
+
|
|
1767
|
+
is_update_applied = self._hf_processor_applies_updates(
|
|
1768
|
+
prompt_text=prompt_text,
|
|
1769
|
+
mm_items=mm_items,
|
|
1770
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
1771
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1772
|
+
)
|
|
1773
|
+
|
|
1774
|
+
return prompt_ids, processed_data, is_update_applied
|
|
1775
|
+
|
|
1776
|
+
def _apply_hf_processor_text_only(
|
|
1777
|
+
self,
|
|
1778
|
+
prompt_text: str,
|
|
1779
|
+
tokenization_kwargs: Mapping[str, object],
|
|
1780
|
+
) -> list[int]:
|
|
1781
|
+
"""
|
|
1782
|
+
Apply the HF processor on the prompt text only.
|
|
1783
|
+
|
|
1784
|
+
Since HF processor requires that text and multi-modal items
|
|
1785
|
+
correspond to each other, we create dummy multi-modal items
|
|
1786
|
+
to go along with the text.
|
|
1787
|
+
"""
|
|
1788
|
+
prompt_ids, _, _ = self._apply_hf_processor_text_mm(
|
|
1789
|
+
prompt_text=prompt_text,
|
|
1790
|
+
mm_items=MultiModalDataItems({}),
|
|
1791
|
+
hf_processor_mm_kwargs={},
|
|
1792
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1793
|
+
)
|
|
1794
|
+
|
|
1795
|
+
return prompt_ids
|
|
1796
|
+
|
|
1797
|
+
def _apply_hf_processor_tokens_only(
|
|
1798
|
+
self,
|
|
1799
|
+
prompt_tokens: list[int],
|
|
1800
|
+
) -> list[int]:
|
|
1801
|
+
"""
|
|
1802
|
+
Apply the HF processor on the prompt tokens only.
|
|
1803
|
+
|
|
1804
|
+
Most HF processors accept prompt text but not prompt tokens.
|
|
1805
|
+
If the HF processor adds or removes tokens that are not related to
|
|
1806
|
+
multi-modal data, you should override this method so it is consistent
|
|
1807
|
+
with the output of
|
|
1808
|
+
[`_apply_hf_processor_text_only`][vllm.multimodal.processing.BaseMultiModalProcessor._apply_hf_processor_text_only]
|
|
1809
|
+
on the
|
|
1810
|
+
corresponding text.
|
|
1811
|
+
"""
|
|
1812
|
+
return prompt_tokens
|
|
1813
|
+
|
|
1814
|
+
def _apply_hf_processor_mm_only(
|
|
1815
|
+
self,
|
|
1816
|
+
mm_items: MultiModalDataItems,
|
|
1817
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1818
|
+
tokenization_kwargs: Mapping[str, object],
|
|
1819
|
+
) -> BatchFeature:
|
|
1820
|
+
"""
|
|
1821
|
+
Apply the HF processor on the multi-modal data only.
|
|
1822
|
+
|
|
1823
|
+
Since HF processor requires that text and multi-modal items
|
|
1824
|
+
correspond to each other, we generate dummy text using
|
|
1825
|
+
[`DummyInputsBuilder`][vllm.multimodal.profiling.BaseDummyInputsBuilder]
|
|
1826
|
+
to go along with the multi-modal data.
|
|
1827
|
+
"""
|
|
1828
|
+
mm_counts = mm_items.get_all_counts()
|
|
1829
|
+
|
|
1830
|
+
_, mm_processed_data, _ = self._apply_hf_processor_text_mm(
|
|
1831
|
+
prompt_text=self.dummy_inputs.get_dummy_text(mm_counts),
|
|
1832
|
+
mm_items=mm_items,
|
|
1833
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
1834
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1835
|
+
)
|
|
1836
|
+
|
|
1837
|
+
return mm_processed_data
|
|
1838
|
+
|
|
1839
|
+
def _apply_hf_processor_main(
|
|
1840
|
+
self,
|
|
1841
|
+
prompt: str | list[int],
|
|
1842
|
+
mm_items: MultiModalDataItems,
|
|
1843
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1844
|
+
tokenization_kwargs: Mapping[str, object],
|
|
1845
|
+
*,
|
|
1846
|
+
enable_hf_prompt_update: bool,
|
|
1847
|
+
) -> tuple[list[int], BatchFeature, bool]:
|
|
1848
|
+
"""
|
|
1849
|
+
Apply the HF processor on the prompt text and multi-modal data.
|
|
1850
|
+
|
|
1851
|
+
In addition, return whether prompt updates have been applied
|
|
1852
|
+
(for most HF processors, this should be `True`).
|
|
1853
|
+
|
|
1854
|
+
Note:
|
|
1855
|
+
If `enable_hf_prompt_update=False`, we use HF processor
|
|
1856
|
+
to perform prompt updates if available; HF processor requires
|
|
1857
|
+
that the prompt corresponds to multi-modal items.
|
|
1858
|
+
"""
|
|
1859
|
+
if isinstance(prompt, str):
|
|
1860
|
+
if enable_hf_prompt_update:
|
|
1861
|
+
return self._apply_hf_processor_text_mm(
|
|
1862
|
+
prompt_text=prompt,
|
|
1863
|
+
mm_items=mm_items,
|
|
1864
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
1865
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1866
|
+
)
|
|
1867
|
+
|
|
1868
|
+
prompt_ids = self._apply_hf_processor_text_only(prompt, tokenization_kwargs)
|
|
1869
|
+
else:
|
|
1870
|
+
prompt_ids = self._apply_hf_processor_tokens_only(prompt)
|
|
1871
|
+
|
|
1872
|
+
mm_processed_data = self._apply_hf_processor_mm_only(
|
|
1873
|
+
mm_items=mm_items,
|
|
1874
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
1875
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
1876
|
+
)
|
|
1877
|
+
|
|
1878
|
+
return prompt_ids, mm_processed_data, False
|
|
1879
|
+
|
|
1880
|
+
def _hash_mm_items(
|
|
1881
|
+
self,
|
|
1882
|
+
mm_items: MultiModalDataItems,
|
|
1883
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
1884
|
+
tokenization_kwargs: Mapping[str, object],
|
|
1885
|
+
*,
|
|
1886
|
+
mm_uuids: MultiModalUUIDDict | None = None,
|
|
1887
|
+
) -> MultiModalHashes:
|
|
1888
|
+
"""Create MM hashes to be returned.
|
|
1889
|
+
|
|
1890
|
+
|
|
1891
|
+
Note: When overrides are provided via callers of `apply`,
|
|
1892
|
+
`_hash_mm_items` will be bypassed and the overrides will be used.
|
|
1893
|
+
"""
|
|
1894
|
+
model_id = self.info.model_id
|
|
1895
|
+
|
|
1896
|
+
hashes: MultiModalHashes = {}
|
|
1897
|
+
mm_uuids = mm_uuids or {}
|
|
1898
|
+
|
|
1899
|
+
for modality, items in mm_items.items():
|
|
1900
|
+
if modality in mm_uuids:
|
|
1901
|
+
mm_uuids_per_modality = mm_uuids[modality]
|
|
1902
|
+
if isinstance(mm_uuids_per_modality, str):
|
|
1903
|
+
mm_uuids_per_modality = [mm_uuids_per_modality]
|
|
1904
|
+
|
|
1905
|
+
# For None entries, compute a hash; otherwise, use provided ID.
|
|
1906
|
+
computed: list[str] = []
|
|
1907
|
+
for i, item in enumerate(items.get_all_items_for_hash()):
|
|
1908
|
+
item_uuid = mm_uuids_per_modality[i]
|
|
1909
|
+
|
|
1910
|
+
# NOTE: Even if a item_uuid is provided, we still compute a
|
|
1911
|
+
# hash if `hf_processor_mm_kwargs` or `tokenization_kwargs`
|
|
1912
|
+
# are provided. This is because the processed multimodal
|
|
1913
|
+
# inputs can be different depending on the processor kwargs.
|
|
1914
|
+
if (
|
|
1915
|
+
item_uuid is None
|
|
1916
|
+
or hf_processor_mm_kwargs
|
|
1917
|
+
or tokenization_kwargs
|
|
1918
|
+
):
|
|
1919
|
+
# NOTE: use provided hash string to hash with kwargs
|
|
1920
|
+
# if available for better performance.
|
|
1921
|
+
item = item_uuid if item_uuid is not None else item
|
|
1922
|
+
computed.append(
|
|
1923
|
+
MultiModalHasher.hash_kwargs(
|
|
1924
|
+
model_id=model_id,
|
|
1925
|
+
**{modality: item},
|
|
1926
|
+
**hf_processor_mm_kwargs,
|
|
1927
|
+
**tokenization_kwargs,
|
|
1928
|
+
)
|
|
1929
|
+
)
|
|
1930
|
+
else:
|
|
1931
|
+
computed.append(item_uuid)
|
|
1932
|
+
hashes[modality] = computed
|
|
1933
|
+
else:
|
|
1934
|
+
hashes[modality] = [
|
|
1935
|
+
MultiModalHasher.hash_kwargs(
|
|
1936
|
+
model_id=model_id,
|
|
1937
|
+
**{modality: item},
|
|
1938
|
+
**hf_processor_mm_kwargs,
|
|
1939
|
+
**tokenization_kwargs,
|
|
1940
|
+
)
|
|
1941
|
+
for item in items
|
|
1942
|
+
]
|
|
1943
|
+
|
|
1944
|
+
return hashes
|
|
1945
|
+
|
|
1946
|
+
def _get_cache_missing_items(
|
|
1947
|
+
self,
|
|
1948
|
+
cache: BaseMultiModalProcessorCache,
|
|
1949
|
+
mm_data_items: MultiModalDataItems,
|
|
1950
|
+
mm_hashes: MultiModalHashes,
|
|
1951
|
+
) -> tuple[MultiModalIsCached, MultiModalDataItems]:
|
|
1952
|
+
mm_is_cached = {
|
|
1953
|
+
modality: cache.is_cached(hashes) for modality, hashes in mm_hashes.items()
|
|
1954
|
+
}
|
|
1955
|
+
|
|
1956
|
+
mm_missing_idxs = {
|
|
1957
|
+
modality: [
|
|
1958
|
+
idx
|
|
1959
|
+
for idx, item_is_cached in enumerate(items_is_cached)
|
|
1960
|
+
if not item_is_cached
|
|
1961
|
+
]
|
|
1962
|
+
for modality, items_is_cached in mm_is_cached.items()
|
|
1963
|
+
}
|
|
1964
|
+
mm_missing_data = {}
|
|
1965
|
+
for modality, idxs in mm_missing_idxs.items():
|
|
1966
|
+
missing_modality_data = []
|
|
1967
|
+
for idx in idxs:
|
|
1968
|
+
data = mm_data_items[modality][idx]
|
|
1969
|
+
if data is None:
|
|
1970
|
+
raise ValueError(
|
|
1971
|
+
f"Cache miss for {modality} at index {idx} "
|
|
1972
|
+
f"but data is not provided."
|
|
1973
|
+
)
|
|
1974
|
+
else:
|
|
1975
|
+
missing_modality_data.append(data)
|
|
1976
|
+
mm_missing_data[modality] = missing_modality_data
|
|
1977
|
+
|
|
1978
|
+
return mm_is_cached, self._to_mm_items(mm_missing_data)
|
|
1979
|
+
|
|
1980
|
+
def _recompute_cached_prompt_update(
|
|
1981
|
+
self,
|
|
1982
|
+
cached_update: ResolvedPromptUpdate,
|
|
1983
|
+
new_item_idx: int,
|
|
1984
|
+
) -> ResolvedPromptUpdate:
|
|
1985
|
+
"""
|
|
1986
|
+
Override this if other attributes of `ResolvedPromptUpdate`
|
|
1987
|
+
also need to be recomputed after retrieving from the cache.
|
|
1988
|
+
"""
|
|
1989
|
+
return replace(cached_update, item_idx=new_item_idx)
|
|
1990
|
+
|
|
1991
|
+
def _merge_mm_kwargs(
|
|
1992
|
+
self,
|
|
1993
|
+
cache: BaseMultiModalProcessorCache,
|
|
1994
|
+
mm_hashes: MultiModalHashes,
|
|
1995
|
+
mm_is_cached: MultiModalIsCached,
|
|
1996
|
+
mm_missing_kwargs: MultiModalKwargsItems,
|
|
1997
|
+
mm_missing_prompt_updates: MultiModalPromptUpdates,
|
|
1998
|
+
) -> tuple[MultiModalKwargsOptionalItems, MultiModalPromptUpdates]:
|
|
1999
|
+
# Need to touch all mm hashes before update to avoid hash in updated
|
|
2000
|
+
# list evict during update
|
|
2001
|
+
for hashes in mm_hashes.values():
|
|
2002
|
+
for item_hash in hashes:
|
|
2003
|
+
cache.touch_sender_cache_item(item_hash)
|
|
2004
|
+
|
|
2005
|
+
mm_missing_next_idx = defaultdict[str, int](lambda: 0)
|
|
2006
|
+
|
|
2007
|
+
merged_kwargs = defaultdict[str, list[MultiModalKwargsItem | None]](list)
|
|
2008
|
+
merged_prompt_updates = defaultdict[str, list[Sequence[ResolvedPromptUpdate]]](
|
|
2009
|
+
list
|
|
2010
|
+
)
|
|
2011
|
+
for modality, hashes in mm_hashes.items():
|
|
2012
|
+
missing_kwargs = mm_missing_kwargs.get(modality, [])
|
|
2013
|
+
missing_prompt_updates = mm_missing_prompt_updates.get(modality, [])
|
|
2014
|
+
|
|
2015
|
+
for item_idx, item_hash in enumerate(hashes):
|
|
2016
|
+
if not mm_is_cached[modality][item_idx]:
|
|
2017
|
+
missing_next_idx = mm_missing_next_idx[modality]
|
|
2018
|
+
missing_kwargs_item = missing_kwargs[missing_next_idx]
|
|
2019
|
+
missing_updates_item = missing_prompt_updates[missing_next_idx]
|
|
2020
|
+
|
|
2021
|
+
mm_missing_next_idx[modality] += 1
|
|
2022
|
+
|
|
2023
|
+
item = missing_kwargs_item, missing_updates_item
|
|
2024
|
+
else:
|
|
2025
|
+
item = None
|
|
2026
|
+
|
|
2027
|
+
kwargs, updates = cache.get_and_update_item(item, item_hash)
|
|
2028
|
+
|
|
2029
|
+
merged_kwargs[modality].append(kwargs)
|
|
2030
|
+
merged_prompt_updates[modality].append(
|
|
2031
|
+
[
|
|
2032
|
+
self._recompute_cached_prompt_update(update, item_idx)
|
|
2033
|
+
for update in updates
|
|
2034
|
+
]
|
|
2035
|
+
)
|
|
2036
|
+
|
|
2037
|
+
mm_kwargs = MultiModalKwargsItems(merged_kwargs)
|
|
2038
|
+
mm_prompt_updates = dict(merged_prompt_updates)
|
|
2039
|
+
|
|
2040
|
+
return mm_kwargs, mm_prompt_updates
|
|
2041
|
+
|
|
2042
|
+
def _apply_hf_processor(
|
|
2043
|
+
self,
|
|
2044
|
+
prompt: str | list[int],
|
|
2045
|
+
mm_data_items: MultiModalDataItems,
|
|
2046
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
2047
|
+
tokenization_kwargs: Mapping[str, object],
|
|
2048
|
+
*,
|
|
2049
|
+
mm_uuids: MultiModalUUIDDict | None = None,
|
|
2050
|
+
) -> tuple[list[int], MultiModalProcessingInfo, bool]:
|
|
2051
|
+
(
|
|
2052
|
+
prompt_ids,
|
|
2053
|
+
mm_processed_data,
|
|
2054
|
+
is_update_applied,
|
|
2055
|
+
) = self._apply_hf_processor_main(
|
|
2056
|
+
prompt=prompt,
|
|
2057
|
+
mm_items=mm_data_items,
|
|
2058
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
2059
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
2060
|
+
enable_hf_prompt_update=True,
|
|
2061
|
+
)
|
|
2062
|
+
|
|
2063
|
+
mm_kwargs = MultiModalKwargsItems.from_hf_inputs(
|
|
2064
|
+
mm_processed_data,
|
|
2065
|
+
self._get_mm_fields_config(mm_processed_data, hf_processor_mm_kwargs),
|
|
2066
|
+
)
|
|
2067
|
+
|
|
2068
|
+
# Use overrides if provided; fallback to data-dependent hashing.
|
|
2069
|
+
with _timed_operation(self.info.ctx, "hashing"):
|
|
2070
|
+
mm_hashes = self._hash_mm_items(
|
|
2071
|
+
mm_data_items,
|
|
2072
|
+
hf_processor_mm_kwargs,
|
|
2073
|
+
tokenization_kwargs,
|
|
2074
|
+
mm_uuids=mm_uuids,
|
|
2075
|
+
)
|
|
2076
|
+
|
|
2077
|
+
mm_prompt_updates = self._get_mm_prompt_updates(
|
|
2078
|
+
mm_data_items,
|
|
2079
|
+
hf_processor_mm_kwargs,
|
|
2080
|
+
mm_kwargs,
|
|
2081
|
+
)
|
|
2082
|
+
|
|
2083
|
+
mm_info = MultiModalProcessingInfo(
|
|
2084
|
+
kwargs=mm_kwargs,
|
|
2085
|
+
hashes=mm_hashes,
|
|
2086
|
+
prompt_updates=mm_prompt_updates,
|
|
2087
|
+
)
|
|
2088
|
+
|
|
2089
|
+
return prompt_ids, mm_info, is_update_applied
|
|
2090
|
+
|
|
2091
|
+
def _cached_apply_hf_processor(
|
|
2092
|
+
self,
|
|
2093
|
+
prompt: str | list[int],
|
|
2094
|
+
mm_data_items: MultiModalDataItems,
|
|
2095
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
2096
|
+
tokenization_kwargs: Mapping[str, object],
|
|
2097
|
+
*,
|
|
2098
|
+
mm_uuids: MultiModalUUIDDict | None = None,
|
|
2099
|
+
) -> tuple[list[int], MultiModalProcessingInfo, bool]:
|
|
2100
|
+
"""
|
|
2101
|
+
Apply the HF processor on the full prompt text,
|
|
2102
|
+
caching the results and reusing cached results.
|
|
2103
|
+
"""
|
|
2104
|
+
cache = self.cache
|
|
2105
|
+
|
|
2106
|
+
_, passthrough_data = self._get_hf_mm_data(mm_data_items)
|
|
2107
|
+
if cache is None or passthrough_data:
|
|
2108
|
+
return self._apply_hf_processor(
|
|
2109
|
+
prompt=prompt,
|
|
2110
|
+
mm_data_items=mm_data_items,
|
|
2111
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
2112
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
2113
|
+
mm_uuids=mm_uuids,
|
|
2114
|
+
)
|
|
2115
|
+
|
|
2116
|
+
with _timed_operation(self.info.ctx, "hashing"):
|
|
2117
|
+
mm_hashes = self._hash_mm_items(
|
|
2118
|
+
mm_data_items,
|
|
2119
|
+
hf_processor_mm_kwargs,
|
|
2120
|
+
tokenization_kwargs,
|
|
2121
|
+
mm_uuids=mm_uuids,
|
|
2122
|
+
)
|
|
2123
|
+
|
|
2124
|
+
with _timed_operation(self.info.ctx, "cache_lookup"):
|
|
2125
|
+
mm_is_cached, mm_missing_data_items = self._get_cache_missing_items(
|
|
2126
|
+
cache=cache,
|
|
2127
|
+
mm_data_items=mm_data_items,
|
|
2128
|
+
mm_hashes=mm_hashes,
|
|
2129
|
+
)
|
|
2130
|
+
|
|
2131
|
+
# NOTE: `prompt` does not correspond to `mm_missing_data_items`,
|
|
2132
|
+
# so we can't apply prompt updates until the new multimodal
|
|
2133
|
+
# items are combined with the cached multimodal items
|
|
2134
|
+
(
|
|
2135
|
+
prompt_ids,
|
|
2136
|
+
mm_missing_processed_data,
|
|
2137
|
+
is_update_applied,
|
|
2138
|
+
) = self._apply_hf_processor_main(
|
|
2139
|
+
prompt=prompt,
|
|
2140
|
+
mm_items=mm_missing_data_items,
|
|
2141
|
+
hf_processor_mm_kwargs=hf_processor_mm_kwargs,
|
|
2142
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
2143
|
+
enable_hf_prompt_update=False,
|
|
2144
|
+
)
|
|
2145
|
+
|
|
2146
|
+
mm_missing_kwargs = MultiModalKwargsItems.from_hf_inputs(
|
|
2147
|
+
mm_missing_processed_data,
|
|
2148
|
+
self._get_mm_fields_config(
|
|
2149
|
+
mm_missing_processed_data, hf_processor_mm_kwargs
|
|
2150
|
+
),
|
|
2151
|
+
)
|
|
2152
|
+
|
|
2153
|
+
mm_missing_prompt_updates = self._get_mm_prompt_updates(
|
|
2154
|
+
mm_missing_data_items,
|
|
2155
|
+
hf_processor_mm_kwargs,
|
|
2156
|
+
mm_missing_kwargs,
|
|
2157
|
+
)
|
|
2158
|
+
|
|
2159
|
+
with _timed_operation(self.info.ctx, "cache_lookup"):
|
|
2160
|
+
mm_kwargs, mm_prompt_updates = self._merge_mm_kwargs(
|
|
2161
|
+
cache,
|
|
2162
|
+
mm_hashes=mm_hashes,
|
|
2163
|
+
mm_is_cached=mm_is_cached,
|
|
2164
|
+
mm_missing_kwargs=mm_missing_kwargs,
|
|
2165
|
+
mm_missing_prompt_updates=mm_missing_prompt_updates,
|
|
2166
|
+
)
|
|
2167
|
+
|
|
2168
|
+
mm_info = MultiModalProcessingInfo(
|
|
2169
|
+
kwargs=mm_kwargs,
|
|
2170
|
+
hashes=mm_hashes,
|
|
2171
|
+
prompt_updates=mm_prompt_updates,
|
|
2172
|
+
)
|
|
2173
|
+
|
|
2174
|
+
return prompt_ids, mm_info, is_update_applied
|
|
2175
|
+
|
|
2176
|
+
def _apply_token_matches(
|
|
2177
|
+
self,
|
|
2178
|
+
prompt: list[int],
|
|
2179
|
+
mm_prompt_updates: MultiModalPromptUpdates,
|
|
2180
|
+
) -> tuple[list[int], MultiModalPromptUpdatesApplyResult]:
|
|
2181
|
+
tokenizer = self.info.get_tokenizer()
|
|
2182
|
+
return apply_token_matches(prompt, mm_prompt_updates, tokenizer)
|
|
2183
|
+
|
|
2184
|
+
def _apply_text_matches(
|
|
2185
|
+
self,
|
|
2186
|
+
prompt: str,
|
|
2187
|
+
mm_prompt_updates: MultiModalPromptUpdates,
|
|
2188
|
+
) -> tuple[str, MultiModalPromptUpdatesApplyResult]:
|
|
2189
|
+
tokenizer = self.info.get_tokenizer()
|
|
2190
|
+
return apply_text_matches(prompt, mm_prompt_updates, tokenizer)
|
|
2191
|
+
|
|
2192
|
+
def _apply_prompt_updates(
|
|
2193
|
+
self,
|
|
2194
|
+
token_ids: list[int],
|
|
2195
|
+
mm_prompt_updates: MultiModalPromptUpdates,
|
|
2196
|
+
) -> tuple[list[int], Mapping[str, list[PlaceholderFeaturesInfo]]]:
|
|
2197
|
+
tokenizer = self.info.get_tokenizer()
|
|
2198
|
+
|
|
2199
|
+
new_token_ids, match_result = self._apply_token_matches(
|
|
2200
|
+
token_ids,
|
|
2201
|
+
mm_prompt_updates,
|
|
2202
|
+
)
|
|
2203
|
+
|
|
2204
|
+
# If the search text does not represent a special token,
|
|
2205
|
+
# it may have different token IDs in the prompt, because
|
|
2206
|
+
# the tokens may go across the boundaries of the search text.
|
|
2207
|
+
# ----
|
|
2208
|
+
# e.g. when searching for "foo" in "food", if "food" itself makes
|
|
2209
|
+
# up a token, then the token ID of "foo" will not appear at all
|
|
2210
|
+
# ----
|
|
2211
|
+
# Since it is inefficient to search for all possible tokenizations
|
|
2212
|
+
# of the search text in the prompt, we instead perform string-based
|
|
2213
|
+
# updates on the decoded token IDs, then encode them back.
|
|
2214
|
+
if not all(
|
|
2215
|
+
all(update_idx is not None for update_idx in update_idxs)
|
|
2216
|
+
for update_idxs in match_result.values()
|
|
2217
|
+
):
|
|
2218
|
+
new_text, match_result = self._apply_text_matches(
|
|
2219
|
+
_seq2text(tokenizer, token_ids, use_cache=False),
|
|
2220
|
+
mm_prompt_updates,
|
|
2221
|
+
)
|
|
2222
|
+
|
|
2223
|
+
new_token_ids = _seq2tokens(tokenizer, new_text, use_cache=False)
|
|
2224
|
+
|
|
2225
|
+
matched_updates = defaultdict[str, list[Sequence[ResolvedPromptUpdate]]](list)
|
|
2226
|
+
for modality, update_idxs in match_result.items():
|
|
2227
|
+
for item_idx, update_idx in enumerate(update_idxs):
|
|
2228
|
+
assert update_idx is not None, (
|
|
2229
|
+
"Failed to apply prompt replacement for "
|
|
2230
|
+
f"mm_items[{modality!r}][{item_idx}]"
|
|
2231
|
+
)
|
|
2232
|
+
|
|
2233
|
+
matched_updates[modality].append(
|
|
2234
|
+
[mm_prompt_updates[modality][item_idx][update_idx]]
|
|
2235
|
+
)
|
|
2236
|
+
|
|
2237
|
+
placeholders = self._find_mm_placeholders(
|
|
2238
|
+
new_token_ids,
|
|
2239
|
+
dict(matched_updates),
|
|
2240
|
+
)
|
|
2241
|
+
|
|
2242
|
+
return new_token_ids, placeholders
|
|
2243
|
+
|
|
2244
|
+
def _validate_mm_kwargs(
|
|
2245
|
+
self,
|
|
2246
|
+
mm_kwargs: MultiModalKwargsOptionalItems,
|
|
2247
|
+
mm_item_counts: Mapping[str, int],
|
|
2248
|
+
) -> None:
|
|
2249
|
+
for modality, item_count in mm_item_counts.items():
|
|
2250
|
+
items = mm_kwargs.get(modality, [])
|
|
2251
|
+
|
|
2252
|
+
if len(items) != item_count:
|
|
2253
|
+
raise RuntimeError(
|
|
2254
|
+
f"Expected there to be {item_count} {modality} items in "
|
|
2255
|
+
f"keyword arguments corresponding to {item_count} "
|
|
2256
|
+
f"{modality} data items, but only found {len(items)}! "
|
|
2257
|
+
"There is likely a problem with your "
|
|
2258
|
+
"implementation of merged multi-modal processor for this "
|
|
2259
|
+
"model (usually arising from an inconsistency between "
|
|
2260
|
+
"`_call_hf_processor` and `_get_mm_fields_config`)."
|
|
2261
|
+
)
|
|
2262
|
+
|
|
2263
|
+
def _validate_mm_updates(
|
|
2264
|
+
self,
|
|
2265
|
+
mm_updates: MultiModalPromptUpdates,
|
|
2266
|
+
mm_item_counts: Mapping[str, int],
|
|
2267
|
+
) -> None:
|
|
2268
|
+
for modality, item_count in mm_item_counts.items():
|
|
2269
|
+
placeholders = mm_updates.get(modality, [])
|
|
2270
|
+
|
|
2271
|
+
if len(placeholders) != item_count:
|
|
2272
|
+
raise RuntimeError(
|
|
2273
|
+
f"Expected there to be {item_count} prompt updates "
|
|
2274
|
+
f"corresponding to {item_count} {modality} items, but "
|
|
2275
|
+
f"instead found {len(placeholders)} prompt updates! "
|
|
2276
|
+
"This is likely because you forgot to include input "
|
|
2277
|
+
"placeholder tokens (e.g., `<image>`, `<|image_pad|>`) "
|
|
2278
|
+
"in the prompt. If the model has a chat template, make "
|
|
2279
|
+
"sure you have applied it before calling `LLM.generate`."
|
|
2280
|
+
)
|
|
2281
|
+
|
|
2282
|
+
def _validate_mm_placeholders(
|
|
2283
|
+
self,
|
|
2284
|
+
mm_placeholders: Mapping[str, list[PlaceholderFeaturesInfo]],
|
|
2285
|
+
mm_item_counts: Mapping[str, int],
|
|
2286
|
+
) -> None:
|
|
2287
|
+
for modality, item_count in mm_item_counts.items():
|
|
2288
|
+
placeholders = mm_placeholders.get(modality, [])
|
|
2289
|
+
|
|
2290
|
+
if len(placeholders) != item_count:
|
|
2291
|
+
raise RuntimeError(
|
|
2292
|
+
f"Expected there to be {item_count} prompt placeholders "
|
|
2293
|
+
f"corresponding to {item_count} {modality} items, but "
|
|
2294
|
+
f"instead found {len(placeholders)} prompt placeholders! "
|
|
2295
|
+
"Make sure the implementation of `_call_hf_processor` and "
|
|
2296
|
+
"`_get_mm_fields_config` are consistent with each other."
|
|
2297
|
+
)
|
|
2298
|
+
|
|
2299
|
+
def _maybe_apply_prompt_updates(
|
|
2300
|
+
self,
|
|
2301
|
+
mm_items: MultiModalDataItems,
|
|
2302
|
+
prompt_ids: list[int],
|
|
2303
|
+
mm_kwargs: MultiModalKwargsOptionalItems,
|
|
2304
|
+
mm_prompt_updates: MultiModalPromptUpdates,
|
|
2305
|
+
is_update_applied: bool,
|
|
2306
|
+
) -> tuple[list[int], Mapping[str, list[PlaceholderFeaturesInfo]]]:
|
|
2307
|
+
mm_item_counts = mm_items.get_all_counts()
|
|
2308
|
+
self._validate_mm_kwargs(mm_kwargs, mm_item_counts)
|
|
2309
|
+
self._validate_mm_updates(mm_prompt_updates, mm_item_counts)
|
|
2310
|
+
|
|
2311
|
+
if is_update_applied:
|
|
2312
|
+
mm_placeholders = self._find_mm_placeholders(
|
|
2313
|
+
prompt_ids,
|
|
2314
|
+
mm_prompt_updates,
|
|
2315
|
+
)
|
|
2316
|
+
self._validate_mm_placeholders(mm_placeholders, mm_item_counts)
|
|
2317
|
+
else:
|
|
2318
|
+
prompt_ids, mm_placeholders = self._apply_prompt_updates(
|
|
2319
|
+
prompt_ids,
|
|
2320
|
+
mm_prompt_updates,
|
|
2321
|
+
)
|
|
2322
|
+
self._validate_mm_placeholders(mm_placeholders, mm_item_counts)
|
|
2323
|
+
|
|
2324
|
+
return prompt_ids, mm_placeholders
|
|
2325
|
+
|
|
2326
|
+
def apply(
|
|
2327
|
+
self,
|
|
2328
|
+
prompt: str | list[int],
|
|
2329
|
+
mm_data: MultiModalDataDict,
|
|
2330
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
2331
|
+
tokenization_kwargs: Mapping[str, object] | None = None,
|
|
2332
|
+
*,
|
|
2333
|
+
mm_uuids: MultiModalUUIDDict | None = None,
|
|
2334
|
+
) -> MultiModalInputs:
|
|
2335
|
+
"""
|
|
2336
|
+
Process multi-modal inputs to be used in vLLM.
|
|
2337
|
+
|
|
2338
|
+
The main steps are:
|
|
2339
|
+
|
|
2340
|
+
1. Apply HF Processor on prompt text and multi-modal data together,
|
|
2341
|
+
outputting token IDs and processed tensors.
|
|
2342
|
+
2. Find and update sequences in the token IDs with placeholder tokens.
|
|
2343
|
+
The number of placeholder tokens equals the feature size of the
|
|
2344
|
+
multi-modal data outputted by the multi-modal encoder.
|
|
2345
|
+
3. Extract information about the placeholder tokens from the
|
|
2346
|
+
processed token IDs.
|
|
2347
|
+
"""
|
|
2348
|
+
request_id = get_current_request_id()
|
|
2349
|
+
if request_id is not None:
|
|
2350
|
+
self.info.ctx.create_timing_stats(request_id)
|
|
2351
|
+
|
|
2352
|
+
mm_items = self._to_mm_items(mm_data)
|
|
2353
|
+
|
|
2354
|
+
if tokenization_kwargs is None:
|
|
2355
|
+
tokenization_kwargs = {}
|
|
2356
|
+
|
|
2357
|
+
(
|
|
2358
|
+
prompt_ids,
|
|
2359
|
+
mm_info,
|
|
2360
|
+
is_update_applied,
|
|
2361
|
+
) = self._cached_apply_hf_processor(
|
|
2362
|
+
prompt,
|
|
2363
|
+
mm_items,
|
|
2364
|
+
hf_processor_mm_kwargs,
|
|
2365
|
+
tokenization_kwargs=tokenization_kwargs,
|
|
2366
|
+
mm_uuids=mm_uuids,
|
|
2367
|
+
)
|
|
2368
|
+
|
|
2369
|
+
# NOTE: tokenization_kwargs are not required to init processor
|
|
2370
|
+
with _timed_operation(self.info.ctx, "prompt_update"):
|
|
2371
|
+
prompt_ids, mm_placeholders = self._maybe_apply_prompt_updates(
|
|
2372
|
+
mm_items=mm_items,
|
|
2373
|
+
prompt_ids=prompt_ids,
|
|
2374
|
+
mm_kwargs=mm_info.kwargs,
|
|
2375
|
+
mm_prompt_updates=mm_info.prompt_updates,
|
|
2376
|
+
is_update_applied=is_update_applied,
|
|
2377
|
+
)
|
|
2378
|
+
|
|
2379
|
+
mm_placeholder_ranges = {
|
|
2380
|
+
modality: [item.to_range() for item in placeholders]
|
|
2381
|
+
for modality, placeholders in mm_placeholders.items()
|
|
2382
|
+
}
|
|
2383
|
+
|
|
2384
|
+
return MultiModalInputs(
|
|
2385
|
+
type="multimodal",
|
|
2386
|
+
prompt_token_ids=prompt_ids,
|
|
2387
|
+
mm_kwargs=mm_info.kwargs,
|
|
2388
|
+
mm_hashes=mm_info.hashes,
|
|
2389
|
+
mm_placeholders=mm_placeholder_ranges,
|
|
2390
|
+
)
|
|
2391
|
+
|
|
2392
|
+
|
|
2393
|
+
class EncDecMultiModalProcessor(BaseMultiModalProcessor[_I]):
|
|
2394
|
+
@abstractmethod
|
|
2395
|
+
def create_encoder_prompt(
|
|
2396
|
+
self,
|
|
2397
|
+
prompt: str | list[int],
|
|
2398
|
+
mm_data: MultiModalDataDict,
|
|
2399
|
+
) -> str | list[int]:
|
|
2400
|
+
"""
|
|
2401
|
+
Create input prompt for the encoder. HF processor will be applied on
|
|
2402
|
+
this prompt during profiling and generation.
|
|
2403
|
+
"""
|
|
2404
|
+
raise NotImplementedError
|
|
2405
|
+
|
|
2406
|
+
@property
|
|
2407
|
+
def pad_dummy_encoder_prompt(self) -> bool:
|
|
2408
|
+
return False
|
|
2409
|
+
|
|
2410
|
+
def create_decoder_prompt(
|
|
2411
|
+
self,
|
|
2412
|
+
prompt: str | list[int],
|
|
2413
|
+
mm_data: MultiModalDataDict,
|
|
2414
|
+
) -> str | list[int]:
|
|
2415
|
+
"""Create input prompt for the decoder."""
|
|
2416
|
+
return prompt
|
|
2417
|
+
|
|
2418
|
+
def _get_enc_dec_inputs(
|
|
2419
|
+
self,
|
|
2420
|
+
prompt: str | list[int],
|
|
2421
|
+
mm_data: MultiModalDataDict,
|
|
2422
|
+
encoder_inputs: MultiModalInputs,
|
|
2423
|
+
):
|
|
2424
|
+
tokenizer = self.info.get_tokenizer()
|
|
2425
|
+
decoder_prompt_raw = self.create_decoder_prompt(prompt, mm_data)
|
|
2426
|
+
if isinstance(decoder_prompt_raw, str):
|
|
2427
|
+
decoder_prompt_ids = tokenizer.encode(
|
|
2428
|
+
decoder_prompt_raw, add_special_tokens=False
|
|
2429
|
+
)
|
|
2430
|
+
else:
|
|
2431
|
+
decoder_prompt_ids = decoder_prompt_raw
|
|
2432
|
+
|
|
2433
|
+
mm_inputs = MultiModalEncDecInputs(
|
|
2434
|
+
encoder_prompt_token_ids=encoder_inputs["prompt_token_ids"],
|
|
2435
|
+
**encoder_inputs,
|
|
2436
|
+
)
|
|
2437
|
+
mm_inputs["prompt_token_ids"] = decoder_prompt_ids
|
|
2438
|
+
return mm_inputs
|
|
2439
|
+
|
|
2440
|
+
def apply(
|
|
2441
|
+
self,
|
|
2442
|
+
prompt: str | list[int],
|
|
2443
|
+
mm_data: MultiModalDataDict,
|
|
2444
|
+
hf_processor_mm_kwargs: Mapping[str, object],
|
|
2445
|
+
tokenization_kwargs: Mapping[str, object] | None = None,
|
|
2446
|
+
*,
|
|
2447
|
+
mm_uuids: MultiModalUUIDDict | None = None,
|
|
2448
|
+
) -> MultiModalEncDecInputs:
|
|
2449
|
+
"""
|
|
2450
|
+
Process multi-modal inputs to be used in vLLM.
|
|
2451
|
+
The main processing steps are modified to fit encoder-decoder model:
|
|
2452
|
+
1. Create encoder prompt from input prompt text.
|
|
2453
|
+
2. Apply the HF processor on encoder prompt.
|
|
2454
|
+
3. Copy the input prompt text as decoder prompt inputs.
|
|
2455
|
+
"""
|
|
2456
|
+
encoder_prompt = self.create_encoder_prompt(prompt, mm_data)
|
|
2457
|
+
encoder_inputs = super().apply(
|
|
2458
|
+
encoder_prompt,
|
|
2459
|
+
mm_data,
|
|
2460
|
+
hf_processor_mm_kwargs,
|
|
2461
|
+
tokenization_kwargs,
|
|
2462
|
+
mm_uuids=mm_uuids,
|
|
2463
|
+
)
|
|
2464
|
+
|
|
2465
|
+
return self._get_enc_dec_inputs(
|
|
2466
|
+
prompt=prompt,
|
|
2467
|
+
mm_data=mm_data,
|
|
2468
|
+
encoder_inputs=encoder_inputs,
|
|
2469
|
+
)
|