vllm-cpu-amxbf16 0.11.2.post2__cp310-cp310-manylinux_2_17_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vllm/_C.abi3.so +0 -0
- vllm/__init__.py +225 -0
- vllm/_aiter_ops.py +983 -0
- vllm/_bc_linter.py +54 -0
- vllm/_custom_ops.py +2863 -0
- vllm/_ipex_ops.py +457 -0
- vllm/_version.py +34 -0
- vllm/assets/__init__.py +0 -0
- vllm/assets/audio.py +43 -0
- vllm/assets/base.py +40 -0
- vllm/assets/image.py +59 -0
- vllm/assets/video.py +149 -0
- vllm/attention/__init__.py +18 -0
- vllm/attention/backends/__init__.py +0 -0
- vllm/attention/backends/abstract.py +391 -0
- vllm/attention/backends/registry.py +195 -0
- vllm/attention/backends/utils.py +33 -0
- vllm/attention/layer.py +1052 -0
- vllm/attention/layers/__init__.py +0 -0
- vllm/attention/layers/chunked_local_attention.py +121 -0
- vllm/attention/layers/cross_attention.py +178 -0
- vllm/attention/layers/encoder_only_attention.py +103 -0
- vllm/attention/ops/__init__.py +0 -0
- vllm/attention/ops/chunked_prefill_paged_decode.py +401 -0
- vllm/attention/ops/common.py +414 -0
- vllm/attention/ops/flashmla.py +251 -0
- vllm/attention/ops/merge_attn_states.py +47 -0
- vllm/attention/ops/paged_attn.py +262 -0
- vllm/attention/ops/pallas_kv_cache_update.py +130 -0
- vllm/attention/ops/prefix_prefill.py +814 -0
- vllm/attention/ops/rocm_aiter_paged_attn.py +123 -0
- vllm/attention/ops/triton_decode_attention.py +712 -0
- vllm/attention/ops/triton_merge_attn_states.py +105 -0
- vllm/attention/ops/triton_reshape_and_cache_flash.py +184 -0
- vllm/attention/ops/triton_unified_attention.py +941 -0
- vllm/attention/ops/vit_attn_wrappers.py +178 -0
- vllm/attention/selector.py +231 -0
- vllm/attention/utils/__init__.py +0 -0
- vllm/attention/utils/fa_utils.py +109 -0
- vllm/attention/utils/kv_sharing_utils.py +33 -0
- vllm/attention/utils/kv_transfer_utils.py +60 -0
- vllm/beam_search.py +88 -0
- vllm/benchmarks/__init__.py +0 -0
- vllm/benchmarks/datasets.py +3222 -0
- vllm/benchmarks/latency.py +172 -0
- vllm/benchmarks/lib/__init__.py +3 -0
- vllm/benchmarks/lib/endpoint_request_func.py +777 -0
- vllm/benchmarks/lib/ready_checker.py +72 -0
- vllm/benchmarks/lib/utils.py +79 -0
- vllm/benchmarks/serve.py +1531 -0
- vllm/benchmarks/sweep/__init__.py +0 -0
- vllm/benchmarks/sweep/cli.py +38 -0
- vllm/benchmarks/sweep/param_sweep.py +91 -0
- vllm/benchmarks/sweep/plot.py +580 -0
- vllm/benchmarks/sweep/serve.py +416 -0
- vllm/benchmarks/sweep/serve_sla.py +492 -0
- vllm/benchmarks/sweep/server.py +114 -0
- vllm/benchmarks/sweep/sla_sweep.py +132 -0
- vllm/benchmarks/sweep/utils.py +4 -0
- vllm/benchmarks/throughput.py +799 -0
- vllm/collect_env.py +857 -0
- vllm/compilation/__init__.py +0 -0
- vllm/compilation/activation_quant_fusion.py +209 -0
- vllm/compilation/backends.py +759 -0
- vllm/compilation/base_static_graph.py +57 -0
- vllm/compilation/caching.py +178 -0
- vllm/compilation/collective_fusion.py +1234 -0
- vllm/compilation/compiler_interface.py +639 -0
- vllm/compilation/counter.py +48 -0
- vllm/compilation/cuda_graph.py +208 -0
- vllm/compilation/decorators.py +571 -0
- vllm/compilation/fix_functionalization.py +253 -0
- vllm/compilation/fusion.py +374 -0
- vllm/compilation/fusion_attn.py +359 -0
- vllm/compilation/fx_utils.py +91 -0
- vllm/compilation/inductor_pass.py +133 -0
- vllm/compilation/matcher_utils.py +317 -0
- vllm/compilation/monitor.py +62 -0
- vllm/compilation/noop_elimination.py +134 -0
- vllm/compilation/partition_rules.py +72 -0
- vllm/compilation/pass_manager.py +135 -0
- vllm/compilation/piecewise_backend.py +121 -0
- vllm/compilation/post_cleanup.py +21 -0
- vllm/compilation/qk_norm_rope_fusion.py +238 -0
- vllm/compilation/sequence_parallelism.py +363 -0
- vllm/compilation/torch25_custom_graph_pass.py +44 -0
- vllm/compilation/vllm_inductor_pass.py +173 -0
- vllm/compilation/wrapper.py +238 -0
- vllm/config/__init__.py +102 -0
- vllm/config/cache.py +207 -0
- vllm/config/compilation.py +975 -0
- vllm/config/device.py +75 -0
- vllm/config/ec_transfer.py +110 -0
- vllm/config/kv_events.py +56 -0
- vllm/config/kv_transfer.py +114 -0
- vllm/config/load.py +124 -0
- vllm/config/lora.py +112 -0
- vllm/config/model.py +2162 -0
- vllm/config/multimodal.py +248 -0
- vllm/config/observability.py +123 -0
- vllm/config/parallel.py +655 -0
- vllm/config/pooler.py +122 -0
- vllm/config/scheduler.py +298 -0
- vllm/config/speculative.py +654 -0
- vllm/config/speech_to_text.py +38 -0
- vllm/config/structured_outputs.py +92 -0
- vllm/config/utils.py +178 -0
- vllm/config/vllm.py +1166 -0
- vllm/connections.py +189 -0
- vllm/device_allocator/__init__.py +0 -0
- vllm/device_allocator/cumem.py +327 -0
- vllm/distributed/__init__.py +6 -0
- vllm/distributed/communication_op.py +43 -0
- vllm/distributed/device_communicators/__init__.py +0 -0
- vllm/distributed/device_communicators/all2all.py +490 -0
- vllm/distributed/device_communicators/all_reduce_utils.py +344 -0
- vllm/distributed/device_communicators/base_device_communicator.py +297 -0
- vllm/distributed/device_communicators/cpu_communicator.py +209 -0
- vllm/distributed/device_communicators/cuda_communicator.py +340 -0
- vllm/distributed/device_communicators/cuda_wrapper.py +216 -0
- vllm/distributed/device_communicators/custom_all_reduce.py +326 -0
- vllm/distributed/device_communicators/mnnvl_compat.py +27 -0
- vllm/distributed/device_communicators/pynccl.py +386 -0
- vllm/distributed/device_communicators/pynccl_allocator.py +191 -0
- vllm/distributed/device_communicators/pynccl_wrapper.py +564 -0
- vllm/distributed/device_communicators/quick_all_reduce.py +290 -0
- vllm/distributed/device_communicators/ray_communicator.py +259 -0
- vllm/distributed/device_communicators/shm_broadcast.py +733 -0
- vllm/distributed/device_communicators/shm_object_storage.py +660 -0
- vllm/distributed/device_communicators/symm_mem.py +156 -0
- vllm/distributed/device_communicators/tpu_communicator.py +107 -0
- vllm/distributed/device_communicators/xpu_communicator.py +95 -0
- vllm/distributed/ec_transfer/__init__.py +14 -0
- vllm/distributed/ec_transfer/ec_connector/__init__.py +0 -0
- vllm/distributed/ec_transfer/ec_connector/base.py +247 -0
- vllm/distributed/ec_transfer/ec_connector/factory.py +88 -0
- vllm/distributed/ec_transfer/ec_connector/shared_storage_connector.py +201 -0
- vllm/distributed/ec_transfer/ec_transfer_state.py +42 -0
- vllm/distributed/eplb/__init__.py +8 -0
- vllm/distributed/eplb/eplb_state.py +837 -0
- vllm/distributed/eplb/rebalance_algo.py +260 -0
- vllm/distributed/eplb/rebalance_execute.py +431 -0
- vllm/distributed/kv_events.py +371 -0
- vllm/distributed/kv_transfer/README.md +29 -0
- vllm/distributed/kv_transfer/__init__.py +20 -0
- vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg +0 -0
- vllm/distributed/kv_transfer/kv_connector/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/base.py +10 -0
- vllm/distributed/kv_transfer/kv_connector/factory.py +192 -0
- vllm/distributed/kv_transfer/kv_connector/utils.py +268 -0
- vllm/distributed/kv_transfer/kv_connector/v1/__init__.py +19 -0
- vllm/distributed/kv_transfer/kv_connector/v1/base.py +546 -0
- vllm/distributed/kv_transfer/kv_connector/v1/decode_bench_connector.py +419 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +216 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/__init__.py +18 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py +379 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py +221 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py +1411 -0
- vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py +867 -0
- vllm/distributed/kv_transfer/kv_connector/v1/metrics.py +189 -0
- vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py +454 -0
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +2440 -0
- vllm/distributed/kv_transfer/kv_connector/v1/offloading_connector.py +504 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +531 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +632 -0
- vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py +273 -0
- vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +450 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/base.py +179 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/mooncake_store.py +164 -0
- vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py +242 -0
- vllm/distributed/kv_transfer/kv_pipe/__init__.py +0 -0
- vllm/distributed/kv_transfer/kv_pipe/base.py +66 -0
- vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py +295 -0
- vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py +285 -0
- vllm/distributed/kv_transfer/kv_transfer_state.py +78 -0
- vllm/distributed/parallel_state.py +1759 -0
- vllm/distributed/tpu_distributed_utils.py +188 -0
- vllm/distributed/utils.py +543 -0
- vllm/engine/__init__.py +0 -0
- vllm/engine/arg_utils.py +2144 -0
- vllm/engine/async_llm_engine.py +6 -0
- vllm/engine/llm_engine.py +6 -0
- vllm/engine/protocol.py +170 -0
- vllm/entrypoints/__init__.py +0 -0
- vllm/entrypoints/anthropic/__init__.py +0 -0
- vllm/entrypoints/anthropic/protocol.py +162 -0
- vllm/entrypoints/anthropic/serving_messages.py +460 -0
- vllm/entrypoints/api_server.py +184 -0
- vllm/entrypoints/chat_utils.py +1690 -0
- vllm/entrypoints/cli/__init__.py +13 -0
- vllm/entrypoints/cli/benchmark/__init__.py +0 -0
- vllm/entrypoints/cli/benchmark/base.py +25 -0
- vllm/entrypoints/cli/benchmark/latency.py +21 -0
- vllm/entrypoints/cli/benchmark/main.py +56 -0
- vllm/entrypoints/cli/benchmark/serve.py +21 -0
- vllm/entrypoints/cli/benchmark/sweep.py +21 -0
- vllm/entrypoints/cli/benchmark/throughput.py +21 -0
- vllm/entrypoints/cli/collect_env.py +38 -0
- vllm/entrypoints/cli/main.py +79 -0
- vllm/entrypoints/cli/openai.py +256 -0
- vllm/entrypoints/cli/run_batch.py +68 -0
- vllm/entrypoints/cli/serve.py +249 -0
- vllm/entrypoints/cli/types.py +29 -0
- vllm/entrypoints/constants.py +10 -0
- vllm/entrypoints/context.py +572 -0
- vllm/entrypoints/dynamic_lora.py +57 -0
- vllm/entrypoints/harmony_utils.py +535 -0
- vllm/entrypoints/launcher.py +175 -0
- vllm/entrypoints/llm.py +1768 -0
- vllm/entrypoints/logger.py +84 -0
- vllm/entrypoints/openai/__init__.py +0 -0
- vllm/entrypoints/openai/api_server.py +2096 -0
- vllm/entrypoints/openai/cli_args.py +302 -0
- vllm/entrypoints/openai/orca_metrics.py +120 -0
- vllm/entrypoints/openai/protocol.py +3299 -0
- vllm/entrypoints/openai/run_batch.py +547 -0
- vllm/entrypoints/openai/serving_chat.py +1772 -0
- vllm/entrypoints/openai/serving_classification.py +235 -0
- vllm/entrypoints/openai/serving_completion.py +715 -0
- vllm/entrypoints/openai/serving_embedding.py +695 -0
- vllm/entrypoints/openai/serving_engine.py +1433 -0
- vllm/entrypoints/openai/serving_models.py +304 -0
- vllm/entrypoints/openai/serving_pooling.py +346 -0
- vllm/entrypoints/openai/serving_responses.py +2021 -0
- vllm/entrypoints/openai/serving_score.py +503 -0
- vllm/entrypoints/openai/serving_tokenization.py +203 -0
- vllm/entrypoints/openai/serving_tokens.py +269 -0
- vllm/entrypoints/openai/serving_transcription.py +148 -0
- vllm/entrypoints/openai/speech_to_text.py +405 -0
- vllm/entrypoints/openai/tool_parsers/__init__.py +142 -0
- vllm/entrypoints/openai/tool_parsers/abstract_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv31_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/deepseekv3_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/ernie45_tool_parser.py +210 -0
- vllm/entrypoints/openai/tool_parsers/glm4_moe_tool_parser.py +200 -0
- vllm/entrypoints/openai/tool_parsers/granite_20b_fc_tool_parser.py +273 -0
- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +253 -0
- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +494 -0
- vllm/entrypoints/openai/tool_parsers/hunyuan_a13b_tool_parser.py +420 -0
- vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py +227 -0
- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +323 -0
- vllm/entrypoints/openai/tool_parsers/kimi_k2_tool_parser.py +590 -0
- vllm/entrypoints/openai/tool_parsers/llama4_pythonic_tool_parser.py +341 -0
- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +290 -0
- vllm/entrypoints/openai/tool_parsers/longcat_tool_parser.py +37 -0
- vllm/entrypoints/openai/tool_parsers/minimax_m2_tool_parser.py +643 -0
- vllm/entrypoints/openai/tool_parsers/minimax_tool_parser.py +849 -0
- vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +390 -0
- vllm/entrypoints/openai/tool_parsers/olmo3_tool_parser.py +366 -0
- vllm/entrypoints/openai/tool_parsers/openai_tool_parser.py +97 -0
- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +120 -0
- vllm/entrypoints/openai/tool_parsers/pythonic_tool_parser.py +332 -0
- vllm/entrypoints/openai/tool_parsers/qwen3coder_tool_parser.py +781 -0
- vllm/entrypoints/openai/tool_parsers/qwen3xml_tool_parser.py +1316 -0
- vllm/entrypoints/openai/tool_parsers/seed_oss_tool_parser.py +744 -0
- vllm/entrypoints/openai/tool_parsers/step3_tool_parser.py +303 -0
- vllm/entrypoints/openai/tool_parsers/utils.py +229 -0
- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +556 -0
- vllm/entrypoints/renderer.py +409 -0
- vllm/entrypoints/responses_utils.py +77 -0
- vllm/entrypoints/sagemaker/__init__.py +4 -0
- vllm/entrypoints/sagemaker/routes.py +72 -0
- vllm/entrypoints/score_utils.py +242 -0
- vllm/entrypoints/ssl.py +78 -0
- vllm/entrypoints/tool.py +143 -0
- vllm/entrypoints/tool_server.py +209 -0
- vllm/entrypoints/utils.py +319 -0
- vllm/env_override.py +378 -0
- vllm/envs.py +1659 -0
- vllm/forward_context.py +356 -0
- vllm/inputs/__init__.py +44 -0
- vllm/inputs/data.py +359 -0
- vllm/inputs/parse.py +137 -0
- vllm/inputs/preprocess.py +727 -0
- vllm/logger.py +267 -0
- vllm/logging_utils/__init__.py +10 -0
- vllm/logging_utils/dump_input.py +83 -0
- vllm/logging_utils/formatter.py +77 -0
- vllm/logging_utils/log_time.py +34 -0
- vllm/logits_process.py +121 -0
- vllm/logprobs.py +208 -0
- vllm/lora/__init__.py +0 -0
- vllm/lora/layers/__init__.py +41 -0
- vllm/lora/layers/base.py +67 -0
- vllm/lora/layers/base_linear.py +164 -0
- vllm/lora/layers/column_parallel_linear.py +578 -0
- vllm/lora/layers/fused_moe.py +472 -0
- vllm/lora/layers/logits_processor.py +252 -0
- vllm/lora/layers/replicated_linear.py +70 -0
- vllm/lora/layers/row_parallel_linear.py +181 -0
- vllm/lora/layers/utils.py +65 -0
- vllm/lora/layers/vocal_parallel_embedding.py +166 -0
- vllm/lora/lora_weights.py +198 -0
- vllm/lora/models.py +890 -0
- vllm/lora/ops/__init__.py +0 -0
- vllm/lora/ops/ipex_ops/__init__.py +6 -0
- vllm/lora/ops/ipex_ops/lora_ops.py +57 -0
- vllm/lora/ops/torch_ops/__init__.py +20 -0
- vllm/lora/ops/torch_ops/lora_ops.py +128 -0
- vllm/lora/ops/triton_ops/README_TUNING.md +60 -0
- vllm/lora/ops/triton_ops/__init__.py +21 -0
- vllm/lora/ops/triton_ops/fused_moe_lora_op.py +641 -0
- vllm/lora/ops/triton_ops/kernel_utils.py +340 -0
- vllm/lora/ops/triton_ops/lora_expand_op.py +310 -0
- vllm/lora/ops/triton_ops/lora_kernel_metadata.py +154 -0
- vllm/lora/ops/triton_ops/lora_shrink_op.py +287 -0
- vllm/lora/ops/triton_ops/utils.py +295 -0
- vllm/lora/ops/xla_ops/__init__.py +6 -0
- vllm/lora/ops/xla_ops/lora_ops.py +141 -0
- vllm/lora/peft_helper.py +128 -0
- vllm/lora/punica_wrapper/__init__.py +10 -0
- vllm/lora/punica_wrapper/punica_base.py +492 -0
- vllm/lora/punica_wrapper/punica_cpu.py +351 -0
- vllm/lora/punica_wrapper/punica_gpu.py +411 -0
- vllm/lora/punica_wrapper/punica_selector.py +21 -0
- vllm/lora/punica_wrapper/punica_tpu.py +359 -0
- vllm/lora/punica_wrapper/punica_xpu.py +279 -0
- vllm/lora/punica_wrapper/utils.py +150 -0
- vllm/lora/request.py +100 -0
- vllm/lora/resolver.py +88 -0
- vllm/lora/utils.py +293 -0
- vllm/lora/worker_manager.py +279 -0
- vllm/model_executor/__init__.py +11 -0
- vllm/model_executor/custom_op.py +194 -0
- vllm/model_executor/layers/__init__.py +0 -0
- vllm/model_executor/layers/activation.py +569 -0
- vllm/model_executor/layers/attention_layer_base.py +35 -0
- vllm/model_executor/layers/batch_invariant.py +854 -0
- vllm/model_executor/layers/conv.py +236 -0
- vllm/model_executor/layers/fla/__init__.py +8 -0
- vllm/model_executor/layers/fla/ops/__init__.py +17 -0
- vllm/model_executor/layers/fla/ops/chunk.py +240 -0
- vllm/model_executor/layers/fla/ops/chunk_delta_h.py +344 -0
- vllm/model_executor/layers/fla/ops/chunk_o.py +183 -0
- vllm/model_executor/layers/fla/ops/chunk_scaled_dot_kkt.py +154 -0
- vllm/model_executor/layers/fla/ops/cumsum.py +280 -0
- vllm/model_executor/layers/fla/ops/fused_recurrent.py +390 -0
- vllm/model_executor/layers/fla/ops/index.py +41 -0
- vllm/model_executor/layers/fla/ops/kda.py +1351 -0
- vllm/model_executor/layers/fla/ops/l2norm.py +146 -0
- vllm/model_executor/layers/fla/ops/layernorm_guard.py +396 -0
- vllm/model_executor/layers/fla/ops/op.py +60 -0
- vllm/model_executor/layers/fla/ops/solve_tril.py +556 -0
- vllm/model_executor/layers/fla/ops/utils.py +194 -0
- vllm/model_executor/layers/fla/ops/wy_fast.py +158 -0
- vllm/model_executor/layers/fused_moe/__init__.py +106 -0
- vllm/model_executor/layers/fused_moe/all2all_utils.py +160 -0
- vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +406 -0
- vllm/model_executor/layers/fused_moe/batched_triton_or_deep_gemm_moe.py +180 -0
- vllm/model_executor/layers/fused_moe/config.py +916 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=1,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H100,dtype=fp8_w8a8.json +123 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=1856,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=192,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=352,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +122 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=384,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=704,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +114 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=AMD_Instinct_MI308X.json +213 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=768,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_H100_80GB_HBM3.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=928,device_name=NVIDIA_L40S.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=128,N=96,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H100.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3072,device_name=NVIDIA_H200,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +218 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=float8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +130 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI300X.json +201 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=192,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=320,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI350_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=384,device_name=AMD_Instinct_MI355_OAM,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=160,N=640,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=20,N=2560,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325X,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=32,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=384,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_GB200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=40,N=2560,device_name=NVIDIA_H100,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=128,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=256,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_GB200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=512,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_A100-SXM4-80GB.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_B200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H20-3e.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=512,N=64,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=1408,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=176,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=352,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=60,N=704,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=128,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=256,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=62,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1280,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1408,device_name=NVIDIA_B200.json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=2560,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=3072,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=320,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=384,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=640,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H100_PCIe,dtype=fp8_w8a8,block_shape=[128,128].json +147 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=768,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=896,device_name=NVIDIA_H20.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=bf16.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=64,N=8960,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +82 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=72,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +138 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=14336,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=16384,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=1792,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +154 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=2048,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +173 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=4096,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=7168,device_name=NVIDIA_H200.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +164 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X.json +200 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- vllm/model_executor/layers/fused_moe/configs/README +12 -0
- vllm/model_executor/layers/fused_moe/cpu_fused_moe.py +354 -0
- vllm/model_executor/layers/fused_moe/cutlass_moe.py +1052 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +387 -0
- vllm/model_executor/layers/fused_moe/deep_gemm_utils.py +416 -0
- vllm/model_executor/layers/fused_moe/deepep_ht_prepare_finalize.py +420 -0
- vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +367 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py +307 -0
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/flashinfer_trtllm_moe.py +192 -0
- vllm/model_executor/layers/fused_moe/fused_batched_moe.py +1012 -0
- vllm/model_executor/layers/fused_moe/fused_marlin_moe.py +792 -0
- vllm/model_executor/layers/fused_moe/fused_moe.py +2175 -0
- vllm/model_executor/layers/fused_moe/fused_moe_method_base.py +112 -0
- vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py +164 -0
- vllm/model_executor/layers/fused_moe/gpt_oss_triton_kernels_moe.py +316 -0
- vllm/model_executor/layers/fused_moe/layer.py +1944 -0
- vllm/model_executor/layers/fused_moe/modular_kernel.py +1222 -0
- vllm/model_executor/layers/fused_moe/moe_align_block_size.py +174 -0
- vllm/model_executor/layers/fused_moe/moe_pallas.py +83 -0
- vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py +229 -0
- vllm/model_executor/layers/fused_moe/moe_torch_iterative.py +60 -0
- vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +362 -0
- vllm/model_executor/layers/fused_moe/prepare_finalize.py +77 -0
- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +265 -0
- vllm/model_executor/layers/fused_moe/routing_simulator.py +310 -0
- vllm/model_executor/layers/fused_moe/shared_fused_moe.py +97 -0
- vllm/model_executor/layers/fused_moe/topk_weight_and_reduce.py +171 -0
- vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py +163 -0
- vllm/model_executor/layers/fused_moe/trtllm_moe.py +143 -0
- vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py +578 -0
- vllm/model_executor/layers/fused_moe/utils.py +332 -0
- vllm/model_executor/layers/kda.py +448 -0
- vllm/model_executor/layers/layernorm.py +442 -0
- vllm/model_executor/layers/lightning_attn.py +729 -0
- vllm/model_executor/layers/linear.py +1424 -0
- vllm/model_executor/layers/logits_processor.py +106 -0
- vllm/model_executor/layers/mamba/__init__.py +0 -0
- vllm/model_executor/layers/mamba/abstract.py +71 -0
- vllm/model_executor/layers/mamba/linear_attn.py +402 -0
- vllm/model_executor/layers/mamba/mamba_mixer.py +535 -0
- vllm/model_executor/layers/mamba/mamba_mixer2.py +928 -0
- vllm/model_executor/layers/mamba/mamba_utils.py +225 -0
- vllm/model_executor/layers/mamba/ops/__init__.py +0 -0
- vllm/model_executor/layers/mamba/ops/causal_conv1d.py +1240 -0
- vllm/model_executor/layers/mamba/ops/layernorm_gated.py +172 -0
- vllm/model_executor/layers/mamba/ops/mamba_ssm.py +478 -0
- vllm/model_executor/layers/mamba/ops/ssd_bmm.py +211 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_scan.py +456 -0
- vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py +700 -0
- vllm/model_executor/layers/mamba/ops/ssd_combined.py +230 -0
- vllm/model_executor/layers/mamba/ops/ssd_state_passing.py +157 -0
- vllm/model_executor/layers/mamba/short_conv.py +264 -0
- vllm/model_executor/layers/mla.py +168 -0
- vllm/model_executor/layers/pooler.py +817 -0
- vllm/model_executor/layers/quantization/__init__.py +174 -0
- vllm/model_executor/layers/quantization/auto_round.py +454 -0
- vllm/model_executor/layers/quantization/awq.py +277 -0
- vllm/model_executor/layers/quantization/awq_marlin.py +659 -0
- vllm/model_executor/layers/quantization/awq_triton.py +337 -0
- vllm/model_executor/layers/quantization/base_config.py +170 -0
- vllm/model_executor/layers/quantization/bitblas.py +502 -0
- vllm/model_executor/layers/quantization/bitsandbytes.py +658 -0
- vllm/model_executor/layers/quantization/compressed_tensors/__init__.py +3 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +914 -0
- vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +2284 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/__init__.py +35 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_24.py +392 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +55 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_24.py +176 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py +124 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +218 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_fp8.py +183 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a8_int.py +153 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +138 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +200 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +125 -0
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +219 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/linear.py +260 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/module.py +173 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/__init__.py +0 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/schemes/linear_qutlass_nvfp4.py +64 -0
- vllm/model_executor/layers/quantization/compressed_tensors/transform/utils.py +13 -0
- vllm/model_executor/layers/quantization/compressed_tensors/triton_scaled_mm.py +224 -0
- vllm/model_executor/layers/quantization/compressed_tensors/utils.py +216 -0
- vllm/model_executor/layers/quantization/deepspeedfp.py +218 -0
- vllm/model_executor/layers/quantization/experts_int8.py +240 -0
- vllm/model_executor/layers/quantization/fbgemm_fp8.py +195 -0
- vllm/model_executor/layers/quantization/fp8.py +1333 -0
- vllm/model_executor/layers/quantization/fp_quant.py +420 -0
- vllm/model_executor/layers/quantization/gguf.py +643 -0
- vllm/model_executor/layers/quantization/gptq.py +393 -0
- vllm/model_executor/layers/quantization/gptq_bitblas.py +482 -0
- vllm/model_executor/layers/quantization/gptq_marlin.py +789 -0
- vllm/model_executor/layers/quantization/gptq_marlin_24.py +320 -0
- vllm/model_executor/layers/quantization/hqq_marlin.py +371 -0
- vllm/model_executor/layers/quantization/inc.py +65 -0
- vllm/model_executor/layers/quantization/input_quant_fp8.py +171 -0
- vllm/model_executor/layers/quantization/ipex_quant.py +467 -0
- vllm/model_executor/layers/quantization/kernels/__init__.py +0 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/MPLinearKernel.py +94 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/__init__.py +105 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/allspark.py +115 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/bitblas.py +323 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/conch.py +98 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cutlass.py +119 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/dynamic_4bit.py +111 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/exllama.py +161 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/machete.py +159 -0
- vllm/model_executor/layers/quantization/kernels/mixed_precision/marlin.py +166 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/ScaledMMLinearKernel.py +73 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/__init__.py +97 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/aiter.py +120 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py +219 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cutlass.py +140 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/triton.py +42 -0
- vllm/model_executor/layers/quantization/kernels/scaled_mm/xla.py +105 -0
- vllm/model_executor/layers/quantization/kv_cache.py +146 -0
- vllm/model_executor/layers/quantization/modelopt.py +1788 -0
- vllm/model_executor/layers/quantization/moe_wna16.py +541 -0
- vllm/model_executor/layers/quantization/mxfp4.py +1162 -0
- vllm/model_executor/layers/quantization/petit.py +320 -0
- vllm/model_executor/layers/quantization/ptpc_fp8.py +137 -0
- vllm/model_executor/layers/quantization/quark/__init__.py +0 -0
- vllm/model_executor/layers/quantization/quark/quark.py +528 -0
- vllm/model_executor/layers/quantization/quark/quark_moe.py +683 -0
- vllm/model_executor/layers/quantization/quark/schemes/__init__.py +9 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_ocp_mx.py +306 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_scheme.py +55 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +179 -0
- vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_int8.py +139 -0
- vllm/model_executor/layers/quantization/quark/utils.py +105 -0
- vllm/model_executor/layers/quantization/qutlass_utils.py +185 -0
- vllm/model_executor/layers/quantization/rtn.py +652 -0
- vllm/model_executor/layers/quantization/schema.py +90 -0
- vllm/model_executor/layers/quantization/torchao.py +380 -0
- vllm/model_executor/layers/quantization/tpu_int8.py +139 -0
- vllm/model_executor/layers/quantization/utils/__init__.py +6 -0
- vllm/model_executor/layers/quantization/utils/allspark_utils.py +67 -0
- vllm/model_executor/layers/quantization/utils/bitblas_utils.py +229 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=12288,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2112,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=1536,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=3072,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=512,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4096,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=4608,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=512,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +18 -0
- vllm/model_executor/layers/quantization/utils/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2048,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=2304,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128,128].json +146 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=256,device_name=NVIDIA_L20,dtype=fp8_w8a8,block_shape=[128,128].json +26 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=7168,K=8192,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/N=8192,K=1536,device_name=AMD_Instinct_MI325_OAM,dtype=fp8_w8a8,block_shape=[128,128].json +164 -0
- vllm/model_executor/layers/quantization/utils/configs/README.md +3 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_fp4_moe.py +89 -0
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py +298 -0
- vllm/model_executor/layers/quantization/utils/fp8_utils.py +1203 -0
- vllm/model_executor/layers/quantization/utils/gptq_utils.py +158 -0
- vllm/model_executor/layers/quantization/utils/int8_utils.py +489 -0
- vllm/model_executor/layers/quantization/utils/layer_utils.py +41 -0
- vllm/model_executor/layers/quantization/utils/machete_utils.py +56 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils.py +575 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py +397 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_fp8.py +351 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test.py +161 -0
- vllm/model_executor/layers/quantization/utils/marlin_utils_test_24.py +467 -0
- vllm/model_executor/layers/quantization/utils/mxfp4_utils.py +181 -0
- vllm/model_executor/layers/quantization/utils/mxfp6_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/mxfp8_utils.py +24 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +142 -0
- vllm/model_executor/layers/quantization/utils/nvfp4_moe_support.py +63 -0
- vllm/model_executor/layers/quantization/utils/ocp_mx_utils.py +51 -0
- vllm/model_executor/layers/quantization/utils/petit_utils.py +124 -0
- vllm/model_executor/layers/quantization/utils/quant_utils.py +687 -0
- vllm/model_executor/layers/quantization/utils/w8a8_utils.py +516 -0
- vllm/model_executor/layers/resampler.py +283 -0
- vllm/model_executor/layers/rotary_embedding/__init__.py +278 -0
- vllm/model_executor/layers/rotary_embedding/base.py +235 -0
- vllm/model_executor/layers/rotary_embedding/common.py +188 -0
- vllm/model_executor/layers/rotary_embedding/deepseek_scaling_rope.py +165 -0
- vllm/model_executor/layers/rotary_embedding/dual_chunk_rope.py +215 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_alpha_rope.py +43 -0
- vllm/model_executor/layers/rotary_embedding/dynamic_ntk_scaling_rope.py +68 -0
- vllm/model_executor/layers/rotary_embedding/ernie45_vl_rope.py +75 -0
- vllm/model_executor/layers/rotary_embedding/linear_scaling_rope.py +115 -0
- vllm/model_executor/layers/rotary_embedding/llama3_rope.py +54 -0
- vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +80 -0
- vllm/model_executor/layers/rotary_embedding/mrope.py +397 -0
- vllm/model_executor/layers/rotary_embedding/ntk_scaling_rope.py +47 -0
- vllm/model_executor/layers/rotary_embedding/phi3_long_rope_scaled_rope.py +159 -0
- vllm/model_executor/layers/rotary_embedding/yarn_scaling_rope.py +81 -0
- vllm/model_executor/layers/utils.py +251 -0
- vllm/model_executor/layers/vocab_parallel_embedding.py +558 -0
- vllm/model_executor/model_loader/__init__.py +148 -0
- vllm/model_executor/model_loader/base_loader.py +57 -0
- vllm/model_executor/model_loader/bitsandbytes_loader.py +822 -0
- vllm/model_executor/model_loader/default_loader.py +327 -0
- vllm/model_executor/model_loader/dummy_loader.py +28 -0
- vllm/model_executor/model_loader/gguf_loader.py +176 -0
- vllm/model_executor/model_loader/online_quantization.py +224 -0
- vllm/model_executor/model_loader/runai_streamer_loader.py +116 -0
- vllm/model_executor/model_loader/sharded_state_loader.py +206 -0
- vllm/model_executor/model_loader/tensorizer.py +790 -0
- vllm/model_executor/model_loader/tensorizer_loader.py +151 -0
- vllm/model_executor/model_loader/tpu.py +118 -0
- vllm/model_executor/model_loader/utils.py +288 -0
- vllm/model_executor/model_loader/weight_utils.py +1084 -0
- vllm/model_executor/models/__init__.py +44 -0
- vllm/model_executor/models/adapters.py +543 -0
- vllm/model_executor/models/afmoe.py +711 -0
- vllm/model_executor/models/aimv2.py +247 -0
- vllm/model_executor/models/apertus.py +587 -0
- vllm/model_executor/models/arcee.py +439 -0
- vllm/model_executor/models/arctic.py +635 -0
- vllm/model_executor/models/aria.py +655 -0
- vllm/model_executor/models/aya_vision.py +450 -0
- vllm/model_executor/models/baichuan.py +496 -0
- vllm/model_executor/models/bailing_moe.py +646 -0
- vllm/model_executor/models/bamba.py +522 -0
- vllm/model_executor/models/bee.py +157 -0
- vllm/model_executor/models/bert.py +925 -0
- vllm/model_executor/models/bert_with_rope.py +732 -0
- vllm/model_executor/models/blip.py +349 -0
- vllm/model_executor/models/blip2.py +695 -0
- vllm/model_executor/models/bloom.py +390 -0
- vllm/model_executor/models/chameleon.py +1120 -0
- vllm/model_executor/models/chatglm.py +498 -0
- vllm/model_executor/models/clip.py +965 -0
- vllm/model_executor/models/cohere2_vision.py +472 -0
- vllm/model_executor/models/commandr.py +473 -0
- vllm/model_executor/models/config.py +503 -0
- vllm/model_executor/models/dbrx.py +482 -0
- vllm/model_executor/models/deepencoder.py +673 -0
- vllm/model_executor/models/deepseek_eagle.py +260 -0
- vllm/model_executor/models/deepseek_mtp.py +360 -0
- vllm/model_executor/models/deepseek_ocr.py +593 -0
- vllm/model_executor/models/deepseek_v2.py +1649 -0
- vllm/model_executor/models/deepseek_vl2.py +655 -0
- vllm/model_executor/models/dots1.py +574 -0
- vllm/model_executor/models/dots_ocr.py +900 -0
- vllm/model_executor/models/ernie45.py +53 -0
- vllm/model_executor/models/ernie45_moe.py +759 -0
- vllm/model_executor/models/ernie45_vl.py +1742 -0
- vllm/model_executor/models/ernie45_vl_moe.py +803 -0
- vllm/model_executor/models/ernie_mtp.py +279 -0
- vllm/model_executor/models/exaone.py +545 -0
- vllm/model_executor/models/exaone4.py +531 -0
- vllm/model_executor/models/fairseq2_llama.py +154 -0
- vllm/model_executor/models/falcon.py +545 -0
- vllm/model_executor/models/falcon_h1.py +685 -0
- vllm/model_executor/models/flex_olmo.py +155 -0
- vllm/model_executor/models/fuyu.py +373 -0
- vllm/model_executor/models/gemma.py +426 -0
- vllm/model_executor/models/gemma2.py +439 -0
- vllm/model_executor/models/gemma3.py +571 -0
- vllm/model_executor/models/gemma3_mm.py +741 -0
- vllm/model_executor/models/gemma3n.py +1165 -0
- vllm/model_executor/models/gemma3n_mm.py +811 -0
- vllm/model_executor/models/glm.py +23 -0
- vllm/model_executor/models/glm4.py +305 -0
- vllm/model_executor/models/glm4_1v.py +1821 -0
- vllm/model_executor/models/glm4_moe.py +747 -0
- vllm/model_executor/models/glm4_moe_mtp.py +359 -0
- vllm/model_executor/models/glm4v.py +784 -0
- vllm/model_executor/models/gpt2.py +397 -0
- vllm/model_executor/models/gpt_bigcode.py +339 -0
- vllm/model_executor/models/gpt_j.py +346 -0
- vllm/model_executor/models/gpt_neox.py +344 -0
- vllm/model_executor/models/gpt_oss.py +738 -0
- vllm/model_executor/models/granite.py +516 -0
- vllm/model_executor/models/granite_speech.py +913 -0
- vllm/model_executor/models/granitemoe.py +569 -0
- vllm/model_executor/models/granitemoehybrid.py +709 -0
- vllm/model_executor/models/granitemoeshared.py +333 -0
- vllm/model_executor/models/gritlm.py +245 -0
- vllm/model_executor/models/grok1.py +558 -0
- vllm/model_executor/models/h2ovl.py +554 -0
- vllm/model_executor/models/hunyuan_v1.py +1053 -0
- vllm/model_executor/models/hyperclovax_vision.py +1166 -0
- vllm/model_executor/models/idefics2_vision_model.py +426 -0
- vllm/model_executor/models/idefics3.py +717 -0
- vllm/model_executor/models/interfaces.py +1092 -0
- vllm/model_executor/models/interfaces_base.py +214 -0
- vllm/model_executor/models/intern_vit.py +453 -0
- vllm/model_executor/models/internlm2.py +460 -0
- vllm/model_executor/models/internlm2_ve.py +142 -0
- vllm/model_executor/models/interns1.py +830 -0
- vllm/model_executor/models/interns1_vit.py +432 -0
- vllm/model_executor/models/internvl.py +1452 -0
- vllm/model_executor/models/jais.py +397 -0
- vllm/model_executor/models/jamba.py +610 -0
- vllm/model_executor/models/jina_vl.py +147 -0
- vllm/model_executor/models/keye.py +1761 -0
- vllm/model_executor/models/keye_vl1_5.py +726 -0
- vllm/model_executor/models/kimi_linear.py +663 -0
- vllm/model_executor/models/kimi_vl.py +578 -0
- vllm/model_executor/models/lfm2.py +532 -0
- vllm/model_executor/models/lfm2_moe.py +762 -0
- vllm/model_executor/models/lightonocr.py +195 -0
- vllm/model_executor/models/llama.py +732 -0
- vllm/model_executor/models/llama4.py +859 -0
- vllm/model_executor/models/llama4_eagle.py +223 -0
- vllm/model_executor/models/llama_eagle.py +218 -0
- vllm/model_executor/models/llama_eagle3.py +367 -0
- vllm/model_executor/models/llava.py +842 -0
- vllm/model_executor/models/llava_next.py +583 -0
- vllm/model_executor/models/llava_next_video.py +467 -0
- vllm/model_executor/models/llava_onevision.py +923 -0
- vllm/model_executor/models/longcat_flash.py +749 -0
- vllm/model_executor/models/longcat_flash_mtp.py +349 -0
- vllm/model_executor/models/mamba.py +276 -0
- vllm/model_executor/models/mamba2.py +289 -0
- vllm/model_executor/models/medusa.py +179 -0
- vllm/model_executor/models/midashenglm.py +827 -0
- vllm/model_executor/models/mimo.py +188 -0
- vllm/model_executor/models/mimo_mtp.py +294 -0
- vllm/model_executor/models/minicpm.py +664 -0
- vllm/model_executor/models/minicpm3.py +242 -0
- vllm/model_executor/models/minicpm_eagle.py +389 -0
- vllm/model_executor/models/minicpmo.py +768 -0
- vllm/model_executor/models/minicpmv.py +1745 -0
- vllm/model_executor/models/minimax_m2.py +552 -0
- vllm/model_executor/models/minimax_text_01.py +1012 -0
- vllm/model_executor/models/minimax_vl_01.py +396 -0
- vllm/model_executor/models/mistral3.py +637 -0
- vllm/model_executor/models/mixtral.py +621 -0
- vllm/model_executor/models/mllama4.py +1147 -0
- vllm/model_executor/models/mlp_speculator.py +235 -0
- vllm/model_executor/models/modernbert.py +450 -0
- vllm/model_executor/models/module_mapping.py +74 -0
- vllm/model_executor/models/molmo.py +1555 -0
- vllm/model_executor/models/moonvit.py +677 -0
- vllm/model_executor/models/mpt.py +335 -0
- vllm/model_executor/models/nano_nemotron_vl.py +1740 -0
- vllm/model_executor/models/nemotron.py +518 -0
- vllm/model_executor/models/nemotron_h.py +852 -0
- vllm/model_executor/models/nemotron_nas.py +491 -0
- vllm/model_executor/models/nemotron_vl.py +653 -0
- vllm/model_executor/models/nvlm_d.py +216 -0
- vllm/model_executor/models/olmo.py +414 -0
- vllm/model_executor/models/olmo2.py +454 -0
- vllm/model_executor/models/olmoe.py +498 -0
- vllm/model_executor/models/openpangu.py +1062 -0
- vllm/model_executor/models/openpangu_mtp.py +265 -0
- vllm/model_executor/models/opt.py +426 -0
- vllm/model_executor/models/orion.py +372 -0
- vllm/model_executor/models/ouro.py +516 -0
- vllm/model_executor/models/ovis.py +559 -0
- vllm/model_executor/models/ovis2_5.py +673 -0
- vllm/model_executor/models/paddleocr_vl.py +1407 -0
- vllm/model_executor/models/paligemma.py +412 -0
- vllm/model_executor/models/persimmon.py +377 -0
- vllm/model_executor/models/phi.py +374 -0
- vllm/model_executor/models/phi3.py +18 -0
- vllm/model_executor/models/phi3v.py +737 -0
- vllm/model_executor/models/phi4_multimodal.py +1447 -0
- vllm/model_executor/models/phi4mm.py +1253 -0
- vllm/model_executor/models/phi4mm_audio.py +1296 -0
- vllm/model_executor/models/phi4mm_utils.py +1907 -0
- vllm/model_executor/models/phimoe.py +675 -0
- vllm/model_executor/models/pixtral.py +1352 -0
- vllm/model_executor/models/plamo2.py +981 -0
- vllm/model_executor/models/qwen.py +368 -0
- vllm/model_executor/models/qwen2.py +541 -0
- vllm/model_executor/models/qwen2_5_omni_thinker.py +1246 -0
- vllm/model_executor/models/qwen2_5_vl.py +1613 -0
- vllm/model_executor/models/qwen2_audio.py +473 -0
- vllm/model_executor/models/qwen2_moe.py +596 -0
- vllm/model_executor/models/qwen2_rm.py +123 -0
- vllm/model_executor/models/qwen2_vl.py +1670 -0
- vllm/model_executor/models/qwen3.py +336 -0
- vllm/model_executor/models/qwen3_moe.py +744 -0
- vllm/model_executor/models/qwen3_next.py +1395 -0
- vllm/model_executor/models/qwen3_next_mtp.py +296 -0
- vllm/model_executor/models/qwen3_omni_moe_thinker.py +1721 -0
- vllm/model_executor/models/qwen3_vl.py +1673 -0
- vllm/model_executor/models/qwen3_vl_moe.py +415 -0
- vllm/model_executor/models/qwen_vl.py +802 -0
- vllm/model_executor/models/radio.py +555 -0
- vllm/model_executor/models/registry.py +1155 -0
- vllm/model_executor/models/roberta.py +259 -0
- vllm/model_executor/models/rvl.py +107 -0
- vllm/model_executor/models/seed_oss.py +497 -0
- vllm/model_executor/models/siglip.py +1174 -0
- vllm/model_executor/models/siglip2navit.py +724 -0
- vllm/model_executor/models/skyworkr1v.py +953 -0
- vllm/model_executor/models/smolvlm.py +38 -0
- vllm/model_executor/models/solar.py +502 -0
- vllm/model_executor/models/stablelm.py +359 -0
- vllm/model_executor/models/starcoder2.py +367 -0
- vllm/model_executor/models/step3_text.py +559 -0
- vllm/model_executor/models/step3_vl.py +1148 -0
- vllm/model_executor/models/swin.py +514 -0
- vllm/model_executor/models/tarsier.py +619 -0
- vllm/model_executor/models/telechat2.py +153 -0
- vllm/model_executor/models/teleflm.py +78 -0
- vllm/model_executor/models/terratorch.py +319 -0
- vllm/model_executor/models/transformers/__init__.py +127 -0
- vllm/model_executor/models/transformers/base.py +464 -0
- vllm/model_executor/models/transformers/causal.py +65 -0
- vllm/model_executor/models/transformers/legacy.py +90 -0
- vllm/model_executor/models/transformers/moe.py +318 -0
- vllm/model_executor/models/transformers/multimodal.py +411 -0
- vllm/model_executor/models/transformers/pooling.py +119 -0
- vllm/model_executor/models/transformers/utils.py +207 -0
- vllm/model_executor/models/ultravox.py +681 -0
- vllm/model_executor/models/utils.py +877 -0
- vllm/model_executor/models/vision.py +552 -0
- vllm/model_executor/models/voxtral.py +845 -0
- vllm/model_executor/models/whisper.py +959 -0
- vllm/model_executor/models/zamba2.py +986 -0
- vllm/model_executor/parameter.py +642 -0
- vllm/model_executor/utils.py +94 -0
- vllm/model_executor/warmup/__init__.py +0 -0
- vllm/model_executor/warmup/deep_gemm_warmup.py +314 -0
- vllm/model_executor/warmup/kernel_warmup.py +98 -0
- vllm/multimodal/__init__.py +40 -0
- vllm/multimodal/audio.py +118 -0
- vllm/multimodal/base.py +26 -0
- vllm/multimodal/cache.py +755 -0
- vllm/multimodal/evs.py +294 -0
- vllm/multimodal/hasher.py +106 -0
- vllm/multimodal/image.py +130 -0
- vllm/multimodal/inputs.py +1036 -0
- vllm/multimodal/parse.py +544 -0
- vllm/multimodal/processing.py +2186 -0
- vllm/multimodal/profiling.py +369 -0
- vllm/multimodal/registry.py +360 -0
- vllm/multimodal/utils.py +512 -0
- vllm/multimodal/video.py +306 -0
- vllm/outputs.py +345 -0
- vllm/platforms/__init__.py +277 -0
- vllm/platforms/cpu.py +414 -0
- vllm/platforms/cuda.py +657 -0
- vllm/platforms/interface.py +639 -0
- vllm/platforms/rocm.py +466 -0
- vllm/platforms/tpu.py +276 -0
- vllm/platforms/xpu.py +274 -0
- vllm/plugins/__init__.py +78 -0
- vllm/plugins/io_processors/__init__.py +68 -0
- vllm/plugins/io_processors/interface.py +77 -0
- vllm/plugins/lora_resolvers/__init__.py +0 -0
- vllm/plugins/lora_resolvers/filesystem_resolver.py +52 -0
- vllm/pooling_params.py +228 -0
- vllm/profiler/__init__.py +0 -0
- vllm/profiler/gpu_profiler.py +37 -0
- vllm/profiler/layerwise_profile.py +392 -0
- vllm/profiler/utils.py +151 -0
- vllm/py.typed +2 -0
- vllm/ray/__init__.py +0 -0
- vllm/ray/lazy_utils.py +26 -0
- vllm/ray/ray_env.py +79 -0
- vllm/reasoning/__init__.py +92 -0
- vllm/reasoning/abs_reasoning_parsers.py +290 -0
- vllm/reasoning/basic_parsers.py +162 -0
- vllm/reasoning/deepseek_r1_reasoning_parser.py +67 -0
- vllm/reasoning/deepseek_v3_reasoning_parser.py +62 -0
- vllm/reasoning/ernie45_reasoning_parser.py +165 -0
- vllm/reasoning/glm4_moe_reasoning_parser.py +171 -0
- vllm/reasoning/gptoss_reasoning_parser.py +173 -0
- vllm/reasoning/granite_reasoning_parser.py +363 -0
- vllm/reasoning/hunyuan_a13b_reasoning_parser.py +237 -0
- vllm/reasoning/identity_reasoning_parser.py +58 -0
- vllm/reasoning/minimax_m2_reasoning_parser.py +67 -0
- vllm/reasoning/mistral_reasoning_parser.py +55 -0
- vllm/reasoning/olmo3_reasoning_parser.py +302 -0
- vllm/reasoning/qwen3_reasoning_parser.py +67 -0
- vllm/reasoning/seedoss_reasoning_parser.py +27 -0
- vllm/reasoning/step3_reasoning_parser.py +107 -0
- vllm/sampling_params.py +669 -0
- vllm/scalar_type.py +355 -0
- vllm/scripts.py +17 -0
- vllm/sequence.py +98 -0
- vllm/tasks.py +13 -0
- vllm/third_party/__init__.py +0 -0
- vllm/third_party/pynvml.py +6140 -0
- vllm/tracing.py +135 -0
- vllm/transformers_utils/__init__.py +26 -0
- vllm/transformers_utils/chat_templates/__init__.py +5 -0
- vllm/transformers_utils/chat_templates/registry.py +73 -0
- vllm/transformers_utils/chat_templates/template_basic.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_blip2.jinja +11 -0
- vllm/transformers_utils/chat_templates/template_chatml.jinja +10 -0
- vllm/transformers_utils/chat_templates/template_deepseek_ocr.jinja +14 -0
- vllm/transformers_utils/chat_templates/template_deepseek_vl2.jinja +23 -0
- vllm/transformers_utils/chat_templates/template_fuyu.jinja +3 -0
- vllm/transformers_utils/chat_templates/template_minicpmv45.jinja +93 -0
- vllm/transformers_utils/config.py +1203 -0
- vllm/transformers_utils/config_parser_base.py +20 -0
- vllm/transformers_utils/configs/__init__.py +70 -0
- vllm/transformers_utils/configs/afmoe.py +84 -0
- vllm/transformers_utils/configs/arctic.py +206 -0
- vllm/transformers_utils/configs/chatglm.py +75 -0
- vllm/transformers_utils/configs/deepseek_vl2.py +126 -0
- vllm/transformers_utils/configs/dotsocr.py +71 -0
- vllm/transformers_utils/configs/eagle.py +84 -0
- vllm/transformers_utils/configs/falcon.py +89 -0
- vllm/transformers_utils/configs/flex_olmo.py +77 -0
- vllm/transformers_utils/configs/jais.py +243 -0
- vllm/transformers_utils/configs/kimi_linear.py +144 -0
- vllm/transformers_utils/configs/kimi_vl.py +38 -0
- vllm/transformers_utils/configs/lfm2_moe.py +159 -0
- vllm/transformers_utils/configs/medusa.py +65 -0
- vllm/transformers_utils/configs/midashenglm.py +103 -0
- vllm/transformers_utils/configs/mistral.py +174 -0
- vllm/transformers_utils/configs/mlp_speculator.py +69 -0
- vllm/transformers_utils/configs/moonvit.py +33 -0
- vllm/transformers_utils/configs/nemotron.py +212 -0
- vllm/transformers_utils/configs/nemotron_h.py +282 -0
- vllm/transformers_utils/configs/olmo3.py +79 -0
- vllm/transformers_utils/configs/ovis.py +182 -0
- vllm/transformers_utils/configs/qwen3_next.py +274 -0
- vllm/transformers_utils/configs/radio.py +89 -0
- vllm/transformers_utils/configs/speculators/__init__.py +2 -0
- vllm/transformers_utils/configs/speculators/algos.py +38 -0
- vllm/transformers_utils/configs/speculators/base.py +114 -0
- vllm/transformers_utils/configs/step3_vl.py +174 -0
- vllm/transformers_utils/configs/ultravox.py +118 -0
- vllm/transformers_utils/detokenizer_utils.py +198 -0
- vllm/transformers_utils/dynamic_module.py +59 -0
- vllm/transformers_utils/processor.py +402 -0
- vllm/transformers_utils/processors/__init__.py +15 -0
- vllm/transformers_utils/processors/deepseek_ocr.py +438 -0
- vllm/transformers_utils/processors/deepseek_vl2.py +406 -0
- vllm/transformers_utils/processors/ovis.py +453 -0
- vllm/transformers_utils/processors/ovis2_5.py +468 -0
- vllm/transformers_utils/runai_utils.py +104 -0
- vllm/transformers_utils/s3_utils.py +95 -0
- vllm/transformers_utils/tokenizer.py +293 -0
- vllm/transformers_utils/tokenizer_base.py +155 -0
- vllm/transformers_utils/tokenizers/__init__.py +16 -0
- vllm/transformers_utils/tokenizers/mistral.py +502 -0
- vllm/transformers_utils/utils.py +130 -0
- vllm/triton_utils/__init__.py +19 -0
- vllm/triton_utils/importing.py +103 -0
- vllm/usage/__init__.py +0 -0
- vllm/usage/usage_lib.py +294 -0
- vllm/utils/__init__.py +82 -0
- vllm/utils/argparse_utils.py +487 -0
- vllm/utils/async_utils.py +303 -0
- vllm/utils/cache.py +214 -0
- vllm/utils/collection_utils.py +139 -0
- vllm/utils/counter.py +45 -0
- vllm/utils/deep_gemm.py +391 -0
- vllm/utils/flashinfer.py +490 -0
- vllm/utils/func_utils.py +236 -0
- vllm/utils/gc_utils.py +147 -0
- vllm/utils/hashing.py +63 -0
- vllm/utils/import_utils.py +411 -0
- vllm/utils/jsontree.py +165 -0
- vllm/utils/math_utils.py +32 -0
- vllm/utils/mem_constants.py +13 -0
- vllm/utils/mem_utils.py +232 -0
- vllm/utils/nccl.py +64 -0
- vllm/utils/network_utils.py +331 -0
- vllm/utils/platform_utils.py +59 -0
- vllm/utils/profiling.py +56 -0
- vllm/utils/registry.py +49 -0
- vllm/utils/serial_utils.py +169 -0
- vllm/utils/system_utils.py +229 -0
- vllm/utils/tensor_schema.py +255 -0
- vllm/utils/torch_utils.py +657 -0
- vllm/v1/__init__.py +0 -0
- vllm/v1/attention/__init__.py +0 -0
- vllm/v1/attention/backends/__init__.py +0 -0
- vllm/v1/attention/backends/cpu_attn.py +496 -0
- vllm/v1/attention/backends/flash_attn.py +1028 -0
- vllm/v1/attention/backends/flashinfer.py +1572 -0
- vllm/v1/attention/backends/flex_attention.py +926 -0
- vllm/v1/attention/backends/gdn_attn.py +387 -0
- vllm/v1/attention/backends/linear_attn.py +74 -0
- vllm/v1/attention/backends/mamba1_attn.py +165 -0
- vllm/v1/attention/backends/mamba2_attn.py +354 -0
- vllm/v1/attention/backends/mamba_attn.py +115 -0
- vllm/v1/attention/backends/mla/__init__.py +0 -0
- vllm/v1/attention/backends/mla/common.py +2031 -0
- vllm/v1/attention/backends/mla/cutlass_mla.py +275 -0
- vllm/v1/attention/backends/mla/flashattn_mla.py +337 -0
- vllm/v1/attention/backends/mla/flashinfer_mla.py +171 -0
- vllm/v1/attention/backends/mla/flashmla.py +314 -0
- vllm/v1/attention/backends/mla/flashmla_sparse.py +548 -0
- vllm/v1/attention/backends/mla/indexer.py +362 -0
- vllm/v1/attention/backends/mla/rocm_aiter_mla.py +294 -0
- vllm/v1/attention/backends/mla/triton_mla.py +171 -0
- vllm/v1/attention/backends/pallas.py +436 -0
- vllm/v1/attention/backends/rocm_aiter_fa.py +816 -0
- vllm/v1/attention/backends/rocm_aiter_unified_attn.py +196 -0
- vllm/v1/attention/backends/rocm_attn.py +362 -0
- vllm/v1/attention/backends/short_conv_attn.py +105 -0
- vllm/v1/attention/backends/tree_attn.py +425 -0
- vllm/v1/attention/backends/triton_attn.py +373 -0
- vllm/v1/attention/backends/utils.py +1116 -0
- vllm/v1/attention/backends/xformers.py +417 -0
- vllm/v1/core/__init__.py +0 -0
- vllm/v1/core/block_pool.py +428 -0
- vllm/v1/core/encoder_cache_manager.py +343 -0
- vllm/v1/core/kv_cache_coordinator.py +480 -0
- vllm/v1/core/kv_cache_manager.py +420 -0
- vllm/v1/core/kv_cache_utils.py +1340 -0
- vllm/v1/core/sched/__init__.py +0 -0
- vllm/v1/core/sched/async_scheduler.py +62 -0
- vllm/v1/core/sched/interface.py +181 -0
- vllm/v1/core/sched/output.py +202 -0
- vllm/v1/core/sched/request_queue.py +221 -0
- vllm/v1/core/sched/scheduler.py +1617 -0
- vllm/v1/core/sched/utils.py +72 -0
- vllm/v1/core/single_type_kv_cache_manager.py +736 -0
- vllm/v1/cudagraph_dispatcher.py +148 -0
- vllm/v1/engine/__init__.py +206 -0
- vllm/v1/engine/async_llm.py +797 -0
- vllm/v1/engine/coordinator.py +377 -0
- vllm/v1/engine/core.py +1420 -0
- vllm/v1/engine/core_client.py +1400 -0
- vllm/v1/engine/detokenizer.py +351 -0
- vllm/v1/engine/exceptions.py +18 -0
- vllm/v1/engine/llm_engine.py +408 -0
- vllm/v1/engine/logprobs.py +182 -0
- vllm/v1/engine/output_processor.py +642 -0
- vllm/v1/engine/parallel_sampling.py +145 -0
- vllm/v1/engine/processor.py +621 -0
- vllm/v1/engine/utils.py +1072 -0
- vllm/v1/executor/__init__.py +6 -0
- vllm/v1/executor/abstract.py +352 -0
- vllm/v1/executor/multiproc_executor.py +877 -0
- vllm/v1/executor/ray_distributed_executor.py +8 -0
- vllm/v1/executor/ray_executor.py +626 -0
- vllm/v1/executor/ray_utils.py +465 -0
- vllm/v1/executor/uniproc_executor.py +183 -0
- vllm/v1/kv_cache_interface.py +403 -0
- vllm/v1/kv_offload/__init__.py +0 -0
- vllm/v1/kv_offload/abstract.py +161 -0
- vllm/v1/kv_offload/arc_manager.py +237 -0
- vllm/v1/kv_offload/backend.py +97 -0
- vllm/v1/kv_offload/backends/__init__.py +0 -0
- vllm/v1/kv_offload/backends/cpu.py +62 -0
- vllm/v1/kv_offload/cpu.py +93 -0
- vllm/v1/kv_offload/factory.py +56 -0
- vllm/v1/kv_offload/lru_manager.py +139 -0
- vllm/v1/kv_offload/mediums.py +39 -0
- vllm/v1/kv_offload/spec.py +62 -0
- vllm/v1/kv_offload/worker/__init__.py +0 -0
- vllm/v1/kv_offload/worker/cpu_gpu.py +185 -0
- vllm/v1/kv_offload/worker/worker.py +144 -0
- vllm/v1/metrics/__init__.py +0 -0
- vllm/v1/metrics/loggers.py +1238 -0
- vllm/v1/metrics/prometheus.py +82 -0
- vllm/v1/metrics/ray_wrappers.py +169 -0
- vllm/v1/metrics/reader.py +257 -0
- vllm/v1/metrics/stats.py +420 -0
- vllm/v1/outputs.py +249 -0
- vllm/v1/pool/__init__.py +0 -0
- vllm/v1/pool/metadata.py +82 -0
- vllm/v1/request.py +259 -0
- vllm/v1/sample/__init__.py +0 -0
- vllm/v1/sample/logits_processor/__init__.py +352 -0
- vllm/v1/sample/logits_processor/builtin.py +274 -0
- vllm/v1/sample/logits_processor/interface.py +106 -0
- vllm/v1/sample/logits_processor/state.py +165 -0
- vllm/v1/sample/metadata.py +44 -0
- vllm/v1/sample/ops/__init__.py +0 -0
- vllm/v1/sample/ops/bad_words.py +52 -0
- vllm/v1/sample/ops/logprobs.py +25 -0
- vllm/v1/sample/ops/penalties.py +57 -0
- vllm/v1/sample/ops/topk_topp_sampler.py +290 -0
- vllm/v1/sample/rejection_sampler.py +793 -0
- vllm/v1/sample/sampler.py +316 -0
- vllm/v1/sample/tpu/__init__.py +0 -0
- vllm/v1/sample/tpu/metadata.py +120 -0
- vllm/v1/sample/tpu/sampler.py +215 -0
- vllm/v1/serial_utils.py +532 -0
- vllm/v1/spec_decode/__init__.py +0 -0
- vllm/v1/spec_decode/eagle.py +1225 -0
- vllm/v1/spec_decode/medusa.py +73 -0
- vllm/v1/spec_decode/metadata.py +66 -0
- vllm/v1/spec_decode/metrics.py +224 -0
- vllm/v1/spec_decode/ngram_proposer.py +291 -0
- vllm/v1/spec_decode/suffix_decoding.py +103 -0
- vllm/v1/spec_decode/utils.py +16 -0
- vllm/v1/structured_output/__init__.py +338 -0
- vllm/v1/structured_output/backend_guidance.py +265 -0
- vllm/v1/structured_output/backend_lm_format_enforcer.py +177 -0
- vllm/v1/structured_output/backend_outlines.py +324 -0
- vllm/v1/structured_output/backend_types.py +136 -0
- vllm/v1/structured_output/backend_xgrammar.py +362 -0
- vllm/v1/structured_output/request.py +94 -0
- vllm/v1/structured_output/utils.py +469 -0
- vllm/v1/utils.py +414 -0
- vllm/v1/worker/__init__.py +0 -0
- vllm/v1/worker/block_table.py +327 -0
- vllm/v1/worker/cpu_model_runner.py +122 -0
- vllm/v1/worker/cpu_worker.py +206 -0
- vllm/v1/worker/dp_utils.py +230 -0
- vllm/v1/worker/ec_connector_model_runner_mixin.py +87 -0
- vllm/v1/worker/gpu_input_batch.py +975 -0
- vllm/v1/worker/gpu_model_runner.py +5102 -0
- vllm/v1/worker/gpu_ubatch_wrapper.py +466 -0
- vllm/v1/worker/gpu_worker.py +894 -0
- vllm/v1/worker/kv_connector_model_runner_mixin.py +144 -0
- vllm/v1/worker/lora_model_runner_mixin.py +213 -0
- vllm/v1/worker/tpu_input_batch.py +593 -0
- vllm/v1/worker/tpu_model_runner.py +2173 -0
- vllm/v1/worker/tpu_worker.py +355 -0
- vllm/v1/worker/ubatch_utils.py +73 -0
- vllm/v1/worker/ubatching.py +231 -0
- vllm/v1/worker/utils.py +366 -0
- vllm/v1/worker/worker_base.py +375 -0
- vllm/v1/worker/xpu_model_runner.py +55 -0
- vllm/v1/worker/xpu_worker.py +189 -0
- vllm/version.py +39 -0
- vllm/vllm_flash_attn/.gitkeep +0 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/METADATA +345 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/RECORD +1536 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/WHEEL +5 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/entry_points.txt +5 -0
- vllm_cpu_amxbf16-0.11.2.post2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1617 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
2
|
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
3
|
+
import itertools
|
|
4
|
+
import time
|
|
5
|
+
from collections import defaultdict
|
|
6
|
+
from collections.abc import Iterable
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from vllm.config import VllmConfig
|
|
10
|
+
from vllm.distributed.ec_transfer.ec_connector.base import (
|
|
11
|
+
ECConnectorMetadata,
|
|
12
|
+
ECConnectorRole,
|
|
13
|
+
)
|
|
14
|
+
from vllm.distributed.ec_transfer.ec_connector.factory import ECConnectorFactory
|
|
15
|
+
from vllm.distributed.kv_events import EventPublisherFactory, KVEventBatch
|
|
16
|
+
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
|
|
17
|
+
from vllm.distributed.kv_transfer.kv_connector.v1 import (
|
|
18
|
+
KVConnectorBase_V1,
|
|
19
|
+
KVConnectorRole,
|
|
20
|
+
SupportsHMA,
|
|
21
|
+
)
|
|
22
|
+
from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorMetadata
|
|
23
|
+
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats
|
|
24
|
+
from vllm.logger import init_logger
|
|
25
|
+
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
|
26
|
+
from vllm.v1.core.encoder_cache_manager import (
|
|
27
|
+
EncoderCacheManager,
|
|
28
|
+
compute_encoder_budget,
|
|
29
|
+
)
|
|
30
|
+
from vllm.v1.core.kv_cache_manager import KVCacheBlocks, KVCacheManager
|
|
31
|
+
from vllm.v1.core.sched.interface import SchedulerInterface
|
|
32
|
+
from vllm.v1.core.sched.output import (
|
|
33
|
+
CachedRequestData,
|
|
34
|
+
GrammarOutput,
|
|
35
|
+
NewRequestData,
|
|
36
|
+
SchedulerOutput,
|
|
37
|
+
)
|
|
38
|
+
from vllm.v1.core.sched.request_queue import SchedulingPolicy, create_request_queue
|
|
39
|
+
from vllm.v1.core.sched.utils import check_stop, remove_all
|
|
40
|
+
from vllm.v1.engine import EngineCoreEventType, EngineCoreOutput, EngineCoreOutputs
|
|
41
|
+
from vllm.v1.kv_cache_interface import KVCacheConfig
|
|
42
|
+
from vllm.v1.metrics.stats import PrefixCacheStats, SchedulerStats
|
|
43
|
+
from vllm.v1.outputs import DraftTokenIds, KVConnectorOutput, ModelRunnerOutput
|
|
44
|
+
from vllm.v1.request import Request, RequestStatus
|
|
45
|
+
from vllm.v1.spec_decode.metrics import SpecDecodingStats
|
|
46
|
+
from vllm.v1.structured_output import StructuredOutputManager
|
|
47
|
+
from vllm.v1.utils import record_function_or_nullcontext
|
|
48
|
+
|
|
49
|
+
logger = init_logger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class Scheduler(SchedulerInterface):
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
vllm_config: VllmConfig,
|
|
56
|
+
kv_cache_config: KVCacheConfig,
|
|
57
|
+
structured_output_manager: StructuredOutputManager,
|
|
58
|
+
block_size: int,
|
|
59
|
+
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
|
|
60
|
+
include_finished_set: bool = False,
|
|
61
|
+
log_stats: bool = False,
|
|
62
|
+
) -> None:
|
|
63
|
+
self.vllm_config = vllm_config
|
|
64
|
+
self.scheduler_config = vllm_config.scheduler_config
|
|
65
|
+
self.cache_config = vllm_config.cache_config
|
|
66
|
+
self.lora_config = vllm_config.lora_config
|
|
67
|
+
self.kv_cache_config = kv_cache_config
|
|
68
|
+
self.kv_events_config = vllm_config.kv_events_config
|
|
69
|
+
self.parallel_config = vllm_config.parallel_config
|
|
70
|
+
self.log_stats = log_stats
|
|
71
|
+
self.structured_output_manager = structured_output_manager
|
|
72
|
+
self.is_encoder_decoder = vllm_config.model_config.is_encoder_decoder
|
|
73
|
+
|
|
74
|
+
# include_finished_set controls whether a separate set of finished
|
|
75
|
+
# request ids should be included in the EngineCoreOutputs returned
|
|
76
|
+
# by update_from_outputs(). This is currently used in the multi-engine
|
|
77
|
+
# case to track request lifetimes efficiently.
|
|
78
|
+
self.finished_req_ids_dict: dict[int, set[str]] | None = (
|
|
79
|
+
defaultdict(set) if include_finished_set else None
|
|
80
|
+
)
|
|
81
|
+
self.prev_step_scheduled_req_ids: set[str] = set()
|
|
82
|
+
|
|
83
|
+
# Scheduling constraints.
|
|
84
|
+
self.max_num_running_reqs = self.scheduler_config.max_num_seqs
|
|
85
|
+
self.max_num_scheduled_tokens = self.scheduler_config.max_num_batched_tokens
|
|
86
|
+
self.max_model_len = vllm_config.model_config.max_model_len
|
|
87
|
+
self.enable_kv_cache_events = (
|
|
88
|
+
self.kv_events_config is not None
|
|
89
|
+
and self.kv_events_config.enable_kv_cache_events
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Create KVConnector for the Scheduler. Note that each Worker
|
|
93
|
+
# will have a corresponding KVConnector with Role=WORKER.
|
|
94
|
+
# KV Connector pushes/pull of remote KVs for P/D and offloading.
|
|
95
|
+
self.connector = None
|
|
96
|
+
self.connector_prefix_cache_stats: PrefixCacheStats | None = None
|
|
97
|
+
if self.vllm_config.kv_transfer_config is not None:
|
|
98
|
+
assert not self.is_encoder_decoder, (
|
|
99
|
+
"Encoder-decoder models are not currently supported with KV connectors"
|
|
100
|
+
)
|
|
101
|
+
self.connector = KVConnectorFactory.create_connector(
|
|
102
|
+
config=self.vllm_config,
|
|
103
|
+
role=KVConnectorRole.SCHEDULER,
|
|
104
|
+
kv_cache_config=self.kv_cache_config,
|
|
105
|
+
)
|
|
106
|
+
if self.log_stats:
|
|
107
|
+
self.connector_prefix_cache_stats = PrefixCacheStats()
|
|
108
|
+
|
|
109
|
+
self.kv_event_publisher = EventPublisherFactory.create(
|
|
110
|
+
self.kv_events_config,
|
|
111
|
+
self.parallel_config.data_parallel_rank,
|
|
112
|
+
)
|
|
113
|
+
self.ec_connector = None
|
|
114
|
+
if self.vllm_config.ec_transfer_config is not None:
|
|
115
|
+
self.ec_connector = ECConnectorFactory.create_connector(
|
|
116
|
+
config=self.vllm_config, role=ECConnectorRole.SCHEDULER
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
num_gpu_blocks = self.cache_config.num_gpu_blocks
|
|
120
|
+
assert num_gpu_blocks is not None and num_gpu_blocks > 0
|
|
121
|
+
|
|
122
|
+
self.block_size = block_size
|
|
123
|
+
self.dcp_world_size = vllm_config.parallel_config.decode_context_parallel_size
|
|
124
|
+
|
|
125
|
+
# req_id -> Request
|
|
126
|
+
self.requests: dict[str, Request] = {}
|
|
127
|
+
# Scheduling policy
|
|
128
|
+
try:
|
|
129
|
+
self.policy = SchedulingPolicy(self.scheduler_config.policy)
|
|
130
|
+
except ValueError as e:
|
|
131
|
+
raise ValueError(
|
|
132
|
+
f"Unknown scheduling policy: {self.scheduler_config.policy}"
|
|
133
|
+
) from e
|
|
134
|
+
# Priority queues for requests.
|
|
135
|
+
self.waiting = create_request_queue(self.policy)
|
|
136
|
+
self.running: list[Request] = []
|
|
137
|
+
|
|
138
|
+
# The request IDs that are finished in between the previous and the
|
|
139
|
+
# current steps. This is used to notify the workers about the finished
|
|
140
|
+
# requests so that they can free the cached states for those requests.
|
|
141
|
+
# This is flushed at the end of each scheduling step.
|
|
142
|
+
self.finished_req_ids: set[str] = set()
|
|
143
|
+
|
|
144
|
+
# KV Connector: requests in process of async KV loading or recving
|
|
145
|
+
self.finished_recving_kv_req_ids: set[str] = set()
|
|
146
|
+
self.failed_recving_kv_req_ids: set[str] = set()
|
|
147
|
+
|
|
148
|
+
# Encoder-related.
|
|
149
|
+
# Calculate encoder cache size if applicable
|
|
150
|
+
# NOTE: For now we use the same budget for both compute and space.
|
|
151
|
+
# This can be changed when we make encoder cache for embedding caching
|
|
152
|
+
# across requests.
|
|
153
|
+
encoder_compute_budget, encoder_cache_size = compute_encoder_budget(
|
|
154
|
+
model_config=vllm_config.model_config,
|
|
155
|
+
scheduler_config=vllm_config.scheduler_config,
|
|
156
|
+
mm_registry=mm_registry,
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# NOTE(woosuk): Here, "encoder" includes the vision encoder (and
|
|
160
|
+
# projector if needed) for MM models as well as encoder-decoder
|
|
161
|
+
# transformers.
|
|
162
|
+
self.max_num_encoder_input_tokens = encoder_compute_budget
|
|
163
|
+
# NOTE: For the models without encoder (e.g., text-only models),
|
|
164
|
+
# the encoder cache will not be initialized because cache size is 0
|
|
165
|
+
# for these models.
|
|
166
|
+
self.encoder_cache_manager = EncoderCacheManager(cache_size=encoder_cache_size)
|
|
167
|
+
|
|
168
|
+
speculative_config = vllm_config.speculative_config
|
|
169
|
+
self.use_eagle = False
|
|
170
|
+
self.num_spec_tokens = self.num_lookahead_tokens = 0
|
|
171
|
+
if speculative_config:
|
|
172
|
+
self.num_spec_tokens = speculative_config.num_speculative_tokens
|
|
173
|
+
if speculative_config.use_eagle():
|
|
174
|
+
self.use_eagle = True
|
|
175
|
+
self.num_lookahead_tokens = self.num_spec_tokens
|
|
176
|
+
|
|
177
|
+
# Create the KV cache manager.
|
|
178
|
+
self.kv_cache_manager = KVCacheManager(
|
|
179
|
+
kv_cache_config=kv_cache_config,
|
|
180
|
+
max_model_len=self.max_model_len,
|
|
181
|
+
enable_caching=bool(self.cache_config.enable_prefix_caching),
|
|
182
|
+
use_eagle=self.use_eagle,
|
|
183
|
+
log_stats=self.log_stats,
|
|
184
|
+
enable_kv_cache_events=self.enable_kv_cache_events,
|
|
185
|
+
dcp_world_size=self.dcp_world_size,
|
|
186
|
+
)
|
|
187
|
+
self.use_pp = self.parallel_config.pipeline_parallel_size > 1
|
|
188
|
+
|
|
189
|
+
def schedule(self) -> SchedulerOutput:
|
|
190
|
+
# NOTE(woosuk) on the scheduling algorithm:
|
|
191
|
+
# There's no "decoding phase" nor "prefill phase" in the scheduler.
|
|
192
|
+
# Each request just has the num_computed_tokens and
|
|
193
|
+
# num_tokens_with_spec. num_tokens_with_spec =
|
|
194
|
+
# len(prompt_token_ids) + len(output_token_ids) + len(spec_token_ids).
|
|
195
|
+
# At each step, the scheduler tries to assign tokens to the requests
|
|
196
|
+
# so that each request's num_computed_tokens can catch up its
|
|
197
|
+
# num_tokens_with_spec. This is general enough to cover
|
|
198
|
+
# chunked prefills, prefix caching, speculative decoding,
|
|
199
|
+
# and the "jump decoding" optimization in the future.
|
|
200
|
+
|
|
201
|
+
scheduled_new_reqs: list[Request] = []
|
|
202
|
+
scheduled_resumed_reqs: list[Request] = []
|
|
203
|
+
scheduled_running_reqs: list[Request] = []
|
|
204
|
+
preempted_reqs: list[Request] = []
|
|
205
|
+
|
|
206
|
+
req_to_new_blocks: dict[str, KVCacheBlocks] = {}
|
|
207
|
+
num_scheduled_tokens: dict[str, int] = {}
|
|
208
|
+
token_budget = self.max_num_scheduled_tokens
|
|
209
|
+
# Encoder-related.
|
|
210
|
+
scheduled_encoder_inputs: dict[str, list[int]] = {}
|
|
211
|
+
encoder_compute_budget = self.max_num_encoder_input_tokens
|
|
212
|
+
# Spec decode-related.
|
|
213
|
+
scheduled_spec_decode_tokens: dict[str, list[int]] = {}
|
|
214
|
+
|
|
215
|
+
# For logging.
|
|
216
|
+
scheduled_timestamp = time.monotonic()
|
|
217
|
+
|
|
218
|
+
# First, schedule the RUNNING requests.
|
|
219
|
+
req_index = 0
|
|
220
|
+
while req_index < len(self.running) and token_budget > 0:
|
|
221
|
+
request = self.running[req_index]
|
|
222
|
+
|
|
223
|
+
num_new_tokens = (
|
|
224
|
+
request.num_tokens_with_spec
|
|
225
|
+
+ request.num_output_placeholders
|
|
226
|
+
- request.num_computed_tokens
|
|
227
|
+
)
|
|
228
|
+
if 0 < self.scheduler_config.long_prefill_token_threshold < num_new_tokens:
|
|
229
|
+
num_new_tokens = self.scheduler_config.long_prefill_token_threshold
|
|
230
|
+
num_new_tokens = min(num_new_tokens, token_budget)
|
|
231
|
+
|
|
232
|
+
# Make sure the input position does not exceed the max model len or
|
|
233
|
+
# request's max_tokens.
|
|
234
|
+
# This is necessary when using spec decoding and/or async scheduling.
|
|
235
|
+
max_total_tokens = min(
|
|
236
|
+
request.num_prompt_tokens + request.max_tokens, self.max_model_len
|
|
237
|
+
)
|
|
238
|
+
num_new_tokens = min(
|
|
239
|
+
num_new_tokens, max_total_tokens - 1 - request.num_computed_tokens
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# Schedule encoder inputs.
|
|
243
|
+
encoder_inputs_to_schedule = None
|
|
244
|
+
external_load_encoder_input: list[int] = []
|
|
245
|
+
new_encoder_compute_budget = encoder_compute_budget
|
|
246
|
+
if request.has_encoder_inputs:
|
|
247
|
+
(
|
|
248
|
+
encoder_inputs_to_schedule,
|
|
249
|
+
num_new_tokens,
|
|
250
|
+
new_encoder_compute_budget,
|
|
251
|
+
external_load_encoder_input,
|
|
252
|
+
) = self._try_schedule_encoder_inputs(
|
|
253
|
+
request,
|
|
254
|
+
request.num_computed_tokens,
|
|
255
|
+
num_new_tokens,
|
|
256
|
+
encoder_compute_budget,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
if num_new_tokens == 0:
|
|
260
|
+
# The request cannot be scheduled because one of the following
|
|
261
|
+
# reasons:
|
|
262
|
+
# 1. No new tokens to schedule. This may happen when
|
|
263
|
+
# (1) PP>1 and we have already scheduled all prompt tokens
|
|
264
|
+
# but they are not finished yet.
|
|
265
|
+
# (2) Async scheduling and the request has reached to either
|
|
266
|
+
# its max_total_tokens or max_model_len.
|
|
267
|
+
# 2. The encoder budget is exhausted.
|
|
268
|
+
# 3. The encoder cache is exhausted.
|
|
269
|
+
# NOTE(woosuk): Here, by doing `continue` instead of `break`,
|
|
270
|
+
# we do not strictly follow the FCFS scheduling policy and
|
|
271
|
+
# allow the lower-priority requests to be scheduled.
|
|
272
|
+
req_index += 1
|
|
273
|
+
continue
|
|
274
|
+
|
|
275
|
+
# Schedule newly needed KV blocks for the request.
|
|
276
|
+
with record_function_or_nullcontext("schedule: allocate_slots"):
|
|
277
|
+
while True:
|
|
278
|
+
new_blocks = self.kv_cache_manager.allocate_slots(
|
|
279
|
+
request,
|
|
280
|
+
num_new_tokens,
|
|
281
|
+
num_lookahead_tokens=self.num_lookahead_tokens,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
if new_blocks is not None:
|
|
285
|
+
# The request can be scheduled.
|
|
286
|
+
break
|
|
287
|
+
|
|
288
|
+
# The request cannot be scheduled.
|
|
289
|
+
# Preempt the lowest-priority request.
|
|
290
|
+
if self.policy == SchedulingPolicy.PRIORITY:
|
|
291
|
+
preempted_req = max(
|
|
292
|
+
self.running,
|
|
293
|
+
key=lambda r: (r.priority, r.arrival_time),
|
|
294
|
+
)
|
|
295
|
+
self.running.remove(preempted_req)
|
|
296
|
+
if preempted_req in scheduled_running_reqs:
|
|
297
|
+
scheduled_running_reqs.remove(preempted_req)
|
|
298
|
+
token_budget += num_scheduled_tokens[
|
|
299
|
+
preempted_req.request_id
|
|
300
|
+
]
|
|
301
|
+
req_to_new_blocks.pop(preempted_req.request_id)
|
|
302
|
+
num_scheduled_tokens.pop(preempted_req.request_id)
|
|
303
|
+
scheduled_spec_decode_tokens.pop(
|
|
304
|
+
preempted_req.request_id, None
|
|
305
|
+
)
|
|
306
|
+
preempted_encoder_inputs = scheduled_encoder_inputs.pop(
|
|
307
|
+
preempted_req.request_id, None
|
|
308
|
+
)
|
|
309
|
+
if preempted_encoder_inputs:
|
|
310
|
+
# Restore encoder compute budget if the preempted
|
|
311
|
+
# request had encoder inputs scheduled in this step.
|
|
312
|
+
num_tokens_to_restore = sum(
|
|
313
|
+
preempted_req.get_num_encoder_tokens(i)
|
|
314
|
+
for i in preempted_encoder_inputs
|
|
315
|
+
)
|
|
316
|
+
encoder_compute_budget += num_tokens_to_restore
|
|
317
|
+
req_index -= 1
|
|
318
|
+
else:
|
|
319
|
+
preempted_req = self.running.pop()
|
|
320
|
+
|
|
321
|
+
self.kv_cache_manager.free(preempted_req)
|
|
322
|
+
self.encoder_cache_manager.free(preempted_req)
|
|
323
|
+
preempted_req.status = RequestStatus.PREEMPTED
|
|
324
|
+
preempted_req.num_computed_tokens = 0
|
|
325
|
+
preempted_req.num_preemptions += 1
|
|
326
|
+
if self.log_stats:
|
|
327
|
+
preempted_req.record_event(
|
|
328
|
+
EngineCoreEventType.PREEMPTED, scheduled_timestamp
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
self.waiting.prepend_request(preempted_req)
|
|
332
|
+
preempted_reqs.append(preempted_req)
|
|
333
|
+
if preempted_req == request:
|
|
334
|
+
# No more request to preempt. Cannot schedule this request.
|
|
335
|
+
break
|
|
336
|
+
|
|
337
|
+
if new_blocks is None:
|
|
338
|
+
# Cannot schedule this request.
|
|
339
|
+
break
|
|
340
|
+
|
|
341
|
+
# Schedule the request.
|
|
342
|
+
scheduled_running_reqs.append(request)
|
|
343
|
+
req_to_new_blocks[request.request_id] = new_blocks
|
|
344
|
+
num_scheduled_tokens[request.request_id] = num_new_tokens
|
|
345
|
+
token_budget -= num_new_tokens
|
|
346
|
+
req_index += 1
|
|
347
|
+
|
|
348
|
+
# Speculative decode related.
|
|
349
|
+
if request.spec_token_ids:
|
|
350
|
+
num_scheduled_spec_tokens = (
|
|
351
|
+
num_new_tokens
|
|
352
|
+
+ request.num_computed_tokens
|
|
353
|
+
- request.num_tokens
|
|
354
|
+
- request.num_output_placeholders
|
|
355
|
+
)
|
|
356
|
+
if num_scheduled_spec_tokens > 0:
|
|
357
|
+
# Trim spec_token_ids list to num_scheduled_spec_tokens.
|
|
358
|
+
del request.spec_token_ids[num_scheduled_spec_tokens:]
|
|
359
|
+
scheduled_spec_decode_tokens[request.request_id] = (
|
|
360
|
+
request.spec_token_ids
|
|
361
|
+
)
|
|
362
|
+
# New spec tokens will be set in `update_draft_token_ids` before the
|
|
363
|
+
# next step when applicable.
|
|
364
|
+
request.spec_token_ids = []
|
|
365
|
+
|
|
366
|
+
# Encoder-related.
|
|
367
|
+
if encoder_inputs_to_schedule:
|
|
368
|
+
scheduled_encoder_inputs[request.request_id] = (
|
|
369
|
+
encoder_inputs_to_schedule
|
|
370
|
+
)
|
|
371
|
+
# Allocate the encoder cache.
|
|
372
|
+
for i in encoder_inputs_to_schedule:
|
|
373
|
+
self.encoder_cache_manager.allocate(request, i)
|
|
374
|
+
encoder_compute_budget = new_encoder_compute_budget
|
|
375
|
+
if external_load_encoder_input:
|
|
376
|
+
for i in external_load_encoder_input:
|
|
377
|
+
self.encoder_cache_manager.allocate(request, i)
|
|
378
|
+
if self.ec_connector is not None:
|
|
379
|
+
self.ec_connector.update_state_after_alloc(request, i)
|
|
380
|
+
|
|
381
|
+
# Record the LoRAs in scheduled_running_reqs
|
|
382
|
+
scheduled_loras: set[int] = set()
|
|
383
|
+
if self.lora_config:
|
|
384
|
+
scheduled_loras = set(
|
|
385
|
+
req.lora_request.lora_int_id
|
|
386
|
+
for req in scheduled_running_reqs
|
|
387
|
+
if req.lora_request and req.lora_request.lora_int_id > 0
|
|
388
|
+
)
|
|
389
|
+
assert len(scheduled_loras) <= self.lora_config.max_loras
|
|
390
|
+
|
|
391
|
+
# Use a temporary RequestQueue to collect requests that need to be
|
|
392
|
+
# skipped and put back at the head of the waiting queue later
|
|
393
|
+
skipped_waiting_requests = create_request_queue(self.policy)
|
|
394
|
+
|
|
395
|
+
# Next, schedule the WAITING requests.
|
|
396
|
+
if not preempted_reqs:
|
|
397
|
+
while self.waiting and token_budget > 0:
|
|
398
|
+
if len(self.running) == self.max_num_running_reqs:
|
|
399
|
+
break
|
|
400
|
+
|
|
401
|
+
request = self.waiting.peek_request()
|
|
402
|
+
|
|
403
|
+
# KVTransfer: skip request if still waiting for remote kvs.
|
|
404
|
+
if request.status == RequestStatus.WAITING_FOR_REMOTE_KVS:
|
|
405
|
+
is_ready = self._update_waiting_for_remote_kv(request)
|
|
406
|
+
if is_ready:
|
|
407
|
+
request.status = RequestStatus.WAITING
|
|
408
|
+
else:
|
|
409
|
+
logger.debug(
|
|
410
|
+
"%s is still in WAITING_FOR_REMOTE_KVS state.",
|
|
411
|
+
request.request_id,
|
|
412
|
+
)
|
|
413
|
+
self.waiting.pop_request()
|
|
414
|
+
skipped_waiting_requests.prepend_request(request)
|
|
415
|
+
continue
|
|
416
|
+
|
|
417
|
+
# Skip request if the structured output request is still waiting
|
|
418
|
+
# for FSM compilation.
|
|
419
|
+
if request.status == RequestStatus.WAITING_FOR_FSM:
|
|
420
|
+
structured_output_req = request.structured_output_request
|
|
421
|
+
if structured_output_req and structured_output_req.grammar:
|
|
422
|
+
request.status = RequestStatus.WAITING
|
|
423
|
+
else:
|
|
424
|
+
self.waiting.pop_request()
|
|
425
|
+
skipped_waiting_requests.prepend_request(request)
|
|
426
|
+
continue
|
|
427
|
+
|
|
428
|
+
# Check that adding the request still respects the max_loras
|
|
429
|
+
# constraint.
|
|
430
|
+
if (
|
|
431
|
+
self.lora_config
|
|
432
|
+
and request.lora_request
|
|
433
|
+
and (
|
|
434
|
+
len(scheduled_loras) == self.lora_config.max_loras
|
|
435
|
+
and request.lora_request.lora_int_id not in scheduled_loras
|
|
436
|
+
)
|
|
437
|
+
):
|
|
438
|
+
# Scheduling would exceed max_loras, skip.
|
|
439
|
+
self.waiting.pop_request()
|
|
440
|
+
skipped_waiting_requests.prepend_request(request)
|
|
441
|
+
continue
|
|
442
|
+
|
|
443
|
+
num_external_computed_tokens = 0
|
|
444
|
+
load_kv_async = False
|
|
445
|
+
|
|
446
|
+
# Get already-cached tokens.
|
|
447
|
+
if request.num_computed_tokens == 0:
|
|
448
|
+
# Get locally-cached tokens.
|
|
449
|
+
new_computed_blocks, num_new_local_computed_tokens = (
|
|
450
|
+
self.kv_cache_manager.get_computed_blocks(request)
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
# Get externally-cached tokens if using a KVConnector.
|
|
454
|
+
if self.connector is not None:
|
|
455
|
+
ext_tokens, load_kv_async = (
|
|
456
|
+
self.connector.get_num_new_matched_tokens(
|
|
457
|
+
request, num_new_local_computed_tokens
|
|
458
|
+
)
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
if ext_tokens is None:
|
|
462
|
+
# The request cannot be scheduled because
|
|
463
|
+
# the KVConnector couldn't determine
|
|
464
|
+
# the number of matched tokens.
|
|
465
|
+
self.waiting.pop_request()
|
|
466
|
+
skipped_waiting_requests.prepend_request(request)
|
|
467
|
+
continue
|
|
468
|
+
|
|
469
|
+
num_external_computed_tokens = ext_tokens
|
|
470
|
+
|
|
471
|
+
# Total computed tokens (local + external).
|
|
472
|
+
num_computed_tokens = (
|
|
473
|
+
num_new_local_computed_tokens + num_external_computed_tokens
|
|
474
|
+
)
|
|
475
|
+
else:
|
|
476
|
+
# KVTransfer: WAITING reqs have num_computed_tokens > 0
|
|
477
|
+
# after async KV recvs are completed.
|
|
478
|
+
new_computed_blocks = self.kv_cache_manager.empty_kv_cache_blocks
|
|
479
|
+
num_new_local_computed_tokens = 0
|
|
480
|
+
num_computed_tokens = request.num_computed_tokens
|
|
481
|
+
|
|
482
|
+
encoder_inputs_to_schedule = None
|
|
483
|
+
external_load_encoder_input = []
|
|
484
|
+
new_encoder_compute_budget = encoder_compute_budget
|
|
485
|
+
|
|
486
|
+
if load_kv_async:
|
|
487
|
+
# KVTransfer: loading remote KV, do not allocate for new work.
|
|
488
|
+
assert num_external_computed_tokens > 0
|
|
489
|
+
num_new_tokens = 0
|
|
490
|
+
else:
|
|
491
|
+
# Number of tokens to be scheduled.
|
|
492
|
+
# We use `request.num_tokens` instead of
|
|
493
|
+
# `request.num_prompt_tokens` to consider the resumed
|
|
494
|
+
# requests, which have output tokens.
|
|
495
|
+
num_new_tokens = request.num_tokens - num_computed_tokens
|
|
496
|
+
threshold = self.scheduler_config.long_prefill_token_threshold
|
|
497
|
+
if 0 < threshold < num_new_tokens:
|
|
498
|
+
num_new_tokens = threshold
|
|
499
|
+
|
|
500
|
+
# chunked prefill has to be enabled explicitly to allow
|
|
501
|
+
# pooling requests to be chunked
|
|
502
|
+
if (
|
|
503
|
+
not self.scheduler_config.enable_chunked_prefill
|
|
504
|
+
and num_new_tokens > token_budget
|
|
505
|
+
):
|
|
506
|
+
self.waiting.pop_request()
|
|
507
|
+
skipped_waiting_requests.prepend_request(request)
|
|
508
|
+
continue
|
|
509
|
+
|
|
510
|
+
num_new_tokens = min(num_new_tokens, token_budget)
|
|
511
|
+
assert num_new_tokens > 0
|
|
512
|
+
|
|
513
|
+
# Schedule encoder inputs.
|
|
514
|
+
if request.has_encoder_inputs:
|
|
515
|
+
(
|
|
516
|
+
encoder_inputs_to_schedule,
|
|
517
|
+
num_new_tokens,
|
|
518
|
+
new_encoder_compute_budget,
|
|
519
|
+
external_load_encoder_input,
|
|
520
|
+
) = self._try_schedule_encoder_inputs(
|
|
521
|
+
request,
|
|
522
|
+
num_computed_tokens,
|
|
523
|
+
num_new_tokens,
|
|
524
|
+
encoder_compute_budget,
|
|
525
|
+
)
|
|
526
|
+
if num_new_tokens == 0:
|
|
527
|
+
# The request cannot be scheduled.
|
|
528
|
+
break
|
|
529
|
+
|
|
530
|
+
# Handles an edge case when P/D Disaggregation
|
|
531
|
+
# is used with Spec Decoding where an
|
|
532
|
+
# extra block gets allocated which
|
|
533
|
+
# creates a mismatch between the number
|
|
534
|
+
# of local and remote blocks.
|
|
535
|
+
effective_lookahead_tokens = (
|
|
536
|
+
0 if request.num_computed_tokens == 0 else self.num_lookahead_tokens
|
|
537
|
+
)
|
|
538
|
+
|
|
539
|
+
# Determine if we need to allocate cross-attention blocks.
|
|
540
|
+
if self.is_encoder_decoder and request.has_encoder_inputs:
|
|
541
|
+
# TODO(russellb): For Whisper, we know that the input is
|
|
542
|
+
# always padded to the maximum length. If we support other
|
|
543
|
+
# encoder-decoder models, this will need to be updated if we
|
|
544
|
+
# want to only allocate what is needed.
|
|
545
|
+
num_encoder_tokens = (
|
|
546
|
+
self.scheduler_config.max_num_encoder_input_tokens
|
|
547
|
+
)
|
|
548
|
+
else:
|
|
549
|
+
num_encoder_tokens = 0
|
|
550
|
+
|
|
551
|
+
new_blocks = self.kv_cache_manager.allocate_slots(
|
|
552
|
+
request,
|
|
553
|
+
num_new_tokens + num_external_computed_tokens,
|
|
554
|
+
num_new_local_computed_tokens,
|
|
555
|
+
new_computed_blocks,
|
|
556
|
+
num_lookahead_tokens=effective_lookahead_tokens,
|
|
557
|
+
delay_cache_blocks=load_kv_async,
|
|
558
|
+
num_encoder_tokens=num_encoder_tokens,
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
if new_blocks is None:
|
|
562
|
+
# The request cannot be scheduled.
|
|
563
|
+
break
|
|
564
|
+
|
|
565
|
+
# KVTransfer: the connector uses this info to determine
|
|
566
|
+
# if a load is needed. Note that
|
|
567
|
+
# This information is used to determine if a load is
|
|
568
|
+
# needed for this request.
|
|
569
|
+
if self.connector is not None:
|
|
570
|
+
self.connector.update_state_after_alloc(
|
|
571
|
+
request,
|
|
572
|
+
new_computed_blocks + new_blocks,
|
|
573
|
+
num_external_computed_tokens,
|
|
574
|
+
)
|
|
575
|
+
self._update_connector_prefix_cache_stats(
|
|
576
|
+
request, num_external_computed_tokens
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
# Request was already popped from self.waiting
|
|
580
|
+
# unless it was re-added above due to new_blocks being None.
|
|
581
|
+
request = self.waiting.pop_request()
|
|
582
|
+
if load_kv_async:
|
|
583
|
+
# If loading async, allocate memory and put request
|
|
584
|
+
# into the WAITING_FOR_REMOTE_KV state.
|
|
585
|
+
skipped_waiting_requests.prepend_request(request)
|
|
586
|
+
request.status = RequestStatus.WAITING_FOR_REMOTE_KVS
|
|
587
|
+
continue
|
|
588
|
+
|
|
589
|
+
req_index += 1
|
|
590
|
+
self.running.append(request)
|
|
591
|
+
if self.log_stats:
|
|
592
|
+
request.record_event(
|
|
593
|
+
EngineCoreEventType.SCHEDULED, scheduled_timestamp
|
|
594
|
+
)
|
|
595
|
+
if request.status == RequestStatus.WAITING:
|
|
596
|
+
scheduled_new_reqs.append(request)
|
|
597
|
+
elif request.status == RequestStatus.PREEMPTED:
|
|
598
|
+
scheduled_resumed_reqs.append(request)
|
|
599
|
+
else:
|
|
600
|
+
raise RuntimeError(f"Invalid request status: {request.status}")
|
|
601
|
+
|
|
602
|
+
if self.lora_config and request.lora_request:
|
|
603
|
+
scheduled_loras.add(request.lora_request.lora_int_id)
|
|
604
|
+
req_to_new_blocks[request.request_id] = (
|
|
605
|
+
self.kv_cache_manager.get_blocks(request.request_id)
|
|
606
|
+
)
|
|
607
|
+
num_scheduled_tokens[request.request_id] = num_new_tokens
|
|
608
|
+
token_budget -= num_new_tokens
|
|
609
|
+
request.status = RequestStatus.RUNNING
|
|
610
|
+
request.num_computed_tokens = num_computed_tokens
|
|
611
|
+
# Count the number of prefix cached tokens.
|
|
612
|
+
if request.num_cached_tokens < 0:
|
|
613
|
+
request.num_cached_tokens = num_computed_tokens
|
|
614
|
+
# Encoder-related.
|
|
615
|
+
if encoder_inputs_to_schedule:
|
|
616
|
+
scheduled_encoder_inputs[request.request_id] = (
|
|
617
|
+
encoder_inputs_to_schedule
|
|
618
|
+
)
|
|
619
|
+
# Allocate the encoder cache.
|
|
620
|
+
for i in encoder_inputs_to_schedule:
|
|
621
|
+
self.encoder_cache_manager.allocate(request, i)
|
|
622
|
+
encoder_compute_budget = new_encoder_compute_budget
|
|
623
|
+
# Allocate for external load encoder cache
|
|
624
|
+
if external_load_encoder_input:
|
|
625
|
+
for i in external_load_encoder_input:
|
|
626
|
+
self.encoder_cache_manager.allocate(request, i)
|
|
627
|
+
if self.ec_connector is not None:
|
|
628
|
+
self.ec_connector.update_state_after_alloc(request, i)
|
|
629
|
+
# Put back any skipped requests at the head of the waiting queue
|
|
630
|
+
if skipped_waiting_requests:
|
|
631
|
+
self.waiting.prepend_requests(skipped_waiting_requests)
|
|
632
|
+
|
|
633
|
+
# Check if the scheduling constraints are satisfied.
|
|
634
|
+
total_num_scheduled_tokens = sum(num_scheduled_tokens.values())
|
|
635
|
+
assert total_num_scheduled_tokens <= self.max_num_scheduled_tokens
|
|
636
|
+
|
|
637
|
+
assert token_budget >= 0
|
|
638
|
+
assert len(self.running) <= self.max_num_running_reqs
|
|
639
|
+
# Since some requests in the RUNNING queue may not be scheduled in
|
|
640
|
+
# this step, the total number of scheduled requests can be smaller than
|
|
641
|
+
# len(self.running).
|
|
642
|
+
assert len(scheduled_new_reqs) + len(scheduled_resumed_reqs) + len(
|
|
643
|
+
scheduled_running_reqs
|
|
644
|
+
) <= len(self.running)
|
|
645
|
+
|
|
646
|
+
# Get the longest common prefix among all requests in the running queue.
|
|
647
|
+
# This can be potentially used for cascade attention.
|
|
648
|
+
num_common_prefix_blocks = [0] * len(self.kv_cache_config.kv_cache_groups)
|
|
649
|
+
with record_function_or_nullcontext("schedule: get_num_common_prefix_blocks"):
|
|
650
|
+
if self.running:
|
|
651
|
+
any_request = self.running[0]
|
|
652
|
+
num_common_prefix_blocks = (
|
|
653
|
+
self.kv_cache_manager.get_num_common_prefix_blocks(
|
|
654
|
+
any_request.request_id
|
|
655
|
+
)
|
|
656
|
+
)
|
|
657
|
+
|
|
658
|
+
# Construct the scheduler output.
|
|
659
|
+
new_reqs_data = [
|
|
660
|
+
NewRequestData.from_request(
|
|
661
|
+
req, req_to_new_blocks[req.request_id].get_block_ids()
|
|
662
|
+
)
|
|
663
|
+
for req in scheduled_new_reqs
|
|
664
|
+
]
|
|
665
|
+
with record_function_or_nullcontext("schedule: make_cached_request_data"):
|
|
666
|
+
cached_reqs_data = self._make_cached_request_data(
|
|
667
|
+
scheduled_running_reqs,
|
|
668
|
+
scheduled_resumed_reqs,
|
|
669
|
+
num_scheduled_tokens,
|
|
670
|
+
scheduled_spec_decode_tokens,
|
|
671
|
+
req_to_new_blocks,
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
# Record the request ids that were scheduled in this step.
|
|
675
|
+
self.prev_step_scheduled_req_ids.clear()
|
|
676
|
+
self.prev_step_scheduled_req_ids.update(num_scheduled_tokens.keys())
|
|
677
|
+
|
|
678
|
+
scheduler_output = SchedulerOutput(
|
|
679
|
+
scheduled_new_reqs=new_reqs_data,
|
|
680
|
+
scheduled_cached_reqs=cached_reqs_data,
|
|
681
|
+
num_scheduled_tokens=num_scheduled_tokens,
|
|
682
|
+
total_num_scheduled_tokens=total_num_scheduled_tokens,
|
|
683
|
+
scheduled_spec_decode_tokens=scheduled_spec_decode_tokens,
|
|
684
|
+
scheduled_encoder_inputs=scheduled_encoder_inputs,
|
|
685
|
+
num_common_prefix_blocks=num_common_prefix_blocks,
|
|
686
|
+
# finished_req_ids is an existing state in the scheduler,
|
|
687
|
+
# instead of being newly scheduled in this step.
|
|
688
|
+
# It contains the request IDs that are finished in between
|
|
689
|
+
# the previous and the current steps.
|
|
690
|
+
finished_req_ids=self.finished_req_ids,
|
|
691
|
+
free_encoder_mm_hashes=self.encoder_cache_manager.get_freed_mm_hashes(),
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
# NOTE(Kuntai): this function is designed for multiple purposes:
|
|
695
|
+
# 1. Plan the KV cache store
|
|
696
|
+
# 2. Wrap up all the KV cache load / save ops into an opaque object
|
|
697
|
+
# 3. Clear the internal states of the connector
|
|
698
|
+
if self.connector is not None:
|
|
699
|
+
meta: KVConnectorMetadata = self.connector.build_connector_meta(
|
|
700
|
+
scheduler_output
|
|
701
|
+
)
|
|
702
|
+
scheduler_output.kv_connector_metadata = meta
|
|
703
|
+
|
|
704
|
+
# Build the connector meta for ECConnector
|
|
705
|
+
if self.ec_connector is not None:
|
|
706
|
+
ec_meta: ECConnectorMetadata = self.ec_connector.build_connector_meta(
|
|
707
|
+
scheduler_output
|
|
708
|
+
)
|
|
709
|
+
scheduler_output.ec_connector_metadata = ec_meta
|
|
710
|
+
|
|
711
|
+
with record_function_or_nullcontext("schedule: update_after_schedule"):
|
|
712
|
+
self._update_after_schedule(scheduler_output)
|
|
713
|
+
return scheduler_output
|
|
714
|
+
|
|
715
|
+
def _update_after_schedule(
|
|
716
|
+
self,
|
|
717
|
+
scheduler_output: SchedulerOutput,
|
|
718
|
+
) -> None:
|
|
719
|
+
# Advance the number of computed tokens for the request AFTER
|
|
720
|
+
# the request is scheduled.
|
|
721
|
+
# 1. The scheduler_output of the current step has to include the
|
|
722
|
+
# original number of scheduled tokens to determine input IDs.
|
|
723
|
+
# 2. Advance the number of computed tokens here allowing us to
|
|
724
|
+
# schedule the prefill request again immediately in the next
|
|
725
|
+
# scheduling step.
|
|
726
|
+
# 3. If some tokens (e.g. spec tokens) are rejected later, the number of
|
|
727
|
+
# computed tokens will be adjusted in update_from_output.
|
|
728
|
+
num_scheduled_tokens = scheduler_output.num_scheduled_tokens
|
|
729
|
+
for req_id, num_scheduled_token in num_scheduled_tokens.items():
|
|
730
|
+
request = self.requests[req_id]
|
|
731
|
+
request.num_computed_tokens += num_scheduled_token
|
|
732
|
+
|
|
733
|
+
# NOTE: _free_encoder_inputs relies on num_computed_tokens, which
|
|
734
|
+
# may be updated again in _update_from_output for speculative
|
|
735
|
+
# decoding. However, it is safe to call the method here because
|
|
736
|
+
# encoder inputs are always part of the prompt, not the output,
|
|
737
|
+
# and thus are unaffected by speculative decoding.
|
|
738
|
+
if request.has_encoder_inputs:
|
|
739
|
+
self._free_encoder_inputs(request)
|
|
740
|
+
|
|
741
|
+
# Clear the finished request IDs.
|
|
742
|
+
# NOTE: We shouldn't do self.finished_req_ids.clear() here because
|
|
743
|
+
# it will also affect the scheduler output.
|
|
744
|
+
self.finished_req_ids = set()
|
|
745
|
+
|
|
746
|
+
def _make_cached_request_data(
|
|
747
|
+
self,
|
|
748
|
+
running_reqs: list[Request],
|
|
749
|
+
resumed_reqs: list[Request],
|
|
750
|
+
num_scheduled_tokens: dict[str, int],
|
|
751
|
+
spec_decode_tokens: dict[str, list[int]],
|
|
752
|
+
req_to_new_blocks: dict[str, KVCacheBlocks],
|
|
753
|
+
) -> CachedRequestData:
|
|
754
|
+
req_ids: list[str] = []
|
|
755
|
+
new_token_ids: list[list[int]] = []
|
|
756
|
+
new_block_ids: list[tuple[list[int], ...] | None] = []
|
|
757
|
+
all_token_ids: dict[str, list[int]] = {}
|
|
758
|
+
num_computed_tokens: list[int] = []
|
|
759
|
+
num_output_tokens: list[int] = []
|
|
760
|
+
resumed_req_ids = set()
|
|
761
|
+
|
|
762
|
+
num_running_reqs = len(running_reqs)
|
|
763
|
+
for idx, req in enumerate(itertools.chain(running_reqs, resumed_reqs)):
|
|
764
|
+
req_id = req.request_id
|
|
765
|
+
req_ids.append(req_id)
|
|
766
|
+
num_tokens = num_scheduled_tokens[req_id] - len(
|
|
767
|
+
spec_decode_tokens.get(req_id, ())
|
|
768
|
+
)
|
|
769
|
+
if self.use_pp:
|
|
770
|
+
# When using PP, the scheduler sends the sampled tokens back,
|
|
771
|
+
# because there's no direct communication between the first-
|
|
772
|
+
# stage worker and the last-stage worker. Otherwise, we don't
|
|
773
|
+
# need to send the sampled tokens back because the model runner
|
|
774
|
+
# will cache them.
|
|
775
|
+
token_ids = req.all_token_ids[
|
|
776
|
+
req.num_computed_tokens : req.num_computed_tokens + num_tokens
|
|
777
|
+
]
|
|
778
|
+
new_token_ids.append(token_ids)
|
|
779
|
+
scheduled_in_prev_step = req_id in self.prev_step_scheduled_req_ids
|
|
780
|
+
if idx >= num_running_reqs:
|
|
781
|
+
assert not scheduled_in_prev_step
|
|
782
|
+
resumed_req_ids.add(req_id)
|
|
783
|
+
if not scheduled_in_prev_step:
|
|
784
|
+
all_token_ids[req_id] = req.all_token_ids.copy()
|
|
785
|
+
new_block_ids.append(
|
|
786
|
+
req_to_new_blocks[req_id].get_block_ids(allow_none=True)
|
|
787
|
+
)
|
|
788
|
+
num_computed_tokens.append(req.num_computed_tokens)
|
|
789
|
+
num_output_tokens.append(
|
|
790
|
+
req.num_output_tokens + req.num_output_placeholders
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
return CachedRequestData(
|
|
794
|
+
req_ids=req_ids,
|
|
795
|
+
resumed_req_ids=resumed_req_ids,
|
|
796
|
+
new_token_ids=new_token_ids,
|
|
797
|
+
all_token_ids=all_token_ids,
|
|
798
|
+
new_block_ids=new_block_ids,
|
|
799
|
+
num_computed_tokens=num_computed_tokens,
|
|
800
|
+
num_output_tokens=num_output_tokens,
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
def _try_schedule_encoder_inputs(
|
|
804
|
+
self,
|
|
805
|
+
request: Request,
|
|
806
|
+
num_computed_tokens: int,
|
|
807
|
+
num_new_tokens: int,
|
|
808
|
+
encoder_compute_budget: int,
|
|
809
|
+
) -> tuple[list[int], int, int, list[int]]:
|
|
810
|
+
"""
|
|
811
|
+
Determine which encoder inputs need to be scheduled in the current step,
|
|
812
|
+
and update `num_new_tokens` and encoder token budget accordingly.
|
|
813
|
+
|
|
814
|
+
An encoder input will be scheduled if:
|
|
815
|
+
- Its output tokens overlap with the range of tokens being computed
|
|
816
|
+
in this step, i.e.,
|
|
817
|
+
[num_computed_tokens, num_computed_tokens + num_new_tokens).
|
|
818
|
+
- It is not already computed and stored in the encoder cache.
|
|
819
|
+
- It is not exist on remote encoder cache (via ECConnector)
|
|
820
|
+
- There is sufficient encoder token budget to process it.
|
|
821
|
+
- The encoder cache has space to store it.
|
|
822
|
+
|
|
823
|
+
If an encoder input cannot be scheduled due to cache or budget
|
|
824
|
+
limitations, the method adjusts `num_new_tokens` to schedule only the
|
|
825
|
+
decoder tokens up to just before the unschedulable encoder input.
|
|
826
|
+
|
|
827
|
+
Note that num_computed_tokens includes both locally cached
|
|
828
|
+
blocks and externally cached blocks (via KVConnector).
|
|
829
|
+
"""
|
|
830
|
+
if num_new_tokens == 0 or not request.has_encoder_inputs:
|
|
831
|
+
return [], num_new_tokens, encoder_compute_budget, []
|
|
832
|
+
encoder_inputs_to_schedule: list[int] = []
|
|
833
|
+
mm_features = request.mm_features
|
|
834
|
+
assert mm_features is not None
|
|
835
|
+
assert len(mm_features) > 0
|
|
836
|
+
external_load_encoder_input = []
|
|
837
|
+
|
|
838
|
+
# Check remote cache first
|
|
839
|
+
if self.ec_connector is not None:
|
|
840
|
+
remote_cache_has_item = self.ec_connector.has_caches(request)
|
|
841
|
+
# NOTE: since scheduler operates on the request level (possibly with
|
|
842
|
+
# multiple encoder inputs per request), we need to create temporary
|
|
843
|
+
# trackers for accounting at the encoder input level.
|
|
844
|
+
mm_hashes_to_schedule = set()
|
|
845
|
+
num_tokens_to_schedule = 0
|
|
846
|
+
for i, mm_feature in enumerate(mm_features):
|
|
847
|
+
start_pos = mm_feature.mm_position.offset
|
|
848
|
+
num_encoder_tokens = mm_feature.mm_position.length
|
|
849
|
+
|
|
850
|
+
# The encoder output is needed if the two ranges overlap:
|
|
851
|
+
# [num_computed_tokens, num_computed_tokens + num_new_tokens) and
|
|
852
|
+
# [start_pos, start_pos + num_encoder_tokens)
|
|
853
|
+
if start_pos >= num_computed_tokens + num_new_tokens:
|
|
854
|
+
# The encoder input is not needed in this step.
|
|
855
|
+
break
|
|
856
|
+
|
|
857
|
+
if self.is_encoder_decoder and num_computed_tokens > 0:
|
|
858
|
+
assert start_pos == 0, (
|
|
859
|
+
"Encoder input should be processed at the beginning of "
|
|
860
|
+
"the sequence when encoder-decoder models are used."
|
|
861
|
+
)
|
|
862
|
+
# Encoder input has already been computed
|
|
863
|
+
# The calculation here is a bit different. We don't turn encoder
|
|
864
|
+
# output into tokens that get processed by the decoder and
|
|
865
|
+
# reflected in num_computed_tokens. Instead, start_pos reflects
|
|
866
|
+
# the position where we need to ensure we calculate encoder
|
|
867
|
+
# inputs. This should always be 0 to ensure we calculate encoder
|
|
868
|
+
# inputs before running the decoder. Once we've calculated some
|
|
869
|
+
# decoder tokens (num_computed_tokens > 0), then we know we
|
|
870
|
+
# already calculated encoder inputs and can skip here.
|
|
871
|
+
continue
|
|
872
|
+
elif start_pos + num_encoder_tokens <= num_computed_tokens:
|
|
873
|
+
# The encoder input is already computed and stored
|
|
874
|
+
# in the decoder's KV cache.
|
|
875
|
+
continue
|
|
876
|
+
|
|
877
|
+
if not self.is_encoder_decoder:
|
|
878
|
+
# We are not using the encoder cache for encoder-decoder models,
|
|
879
|
+
# yet.
|
|
880
|
+
if request.mm_features[i].identifier in mm_hashes_to_schedule:
|
|
881
|
+
# The same encoder input has already been scheduled in the
|
|
882
|
+
# current step.
|
|
883
|
+
continue
|
|
884
|
+
|
|
885
|
+
if self.encoder_cache_manager.check_and_update_cache(request, i):
|
|
886
|
+
# The encoder input is already computed and cached from a
|
|
887
|
+
# previous step.
|
|
888
|
+
continue
|
|
889
|
+
|
|
890
|
+
# If no encoder input chunking is allowed, we do not want to
|
|
891
|
+
# partially schedule a multimodal item. If the scheduled range would
|
|
892
|
+
# only cover part of the mm input, roll back to before the mm item.
|
|
893
|
+
if (
|
|
894
|
+
self.scheduler_config.disable_chunked_mm_input
|
|
895
|
+
and num_computed_tokens < start_pos
|
|
896
|
+
and (num_computed_tokens + num_new_tokens)
|
|
897
|
+
< (start_pos + num_encoder_tokens)
|
|
898
|
+
):
|
|
899
|
+
num_new_tokens = start_pos - num_computed_tokens
|
|
900
|
+
break
|
|
901
|
+
|
|
902
|
+
if not self.encoder_cache_manager.can_allocate(
|
|
903
|
+
request, i, encoder_compute_budget, num_tokens_to_schedule
|
|
904
|
+
):
|
|
905
|
+
# The encoder cache is full or the encoder budget is exhausted.
|
|
906
|
+
# NOTE(woosuk): We assume that the encoder input tokens should
|
|
907
|
+
# be processed altogether, as the encoder usually uses
|
|
908
|
+
# bidirectional attention.
|
|
909
|
+
if num_computed_tokens < start_pos:
|
|
910
|
+
# We only schedule the decoder tokens just before the
|
|
911
|
+
# encoder input.
|
|
912
|
+
num_new_tokens = start_pos - num_computed_tokens
|
|
913
|
+
else:
|
|
914
|
+
# Because of prefix caching, num_computed_tokens is greater
|
|
915
|
+
# than start_pos even though its encoder input is not
|
|
916
|
+
# available. In this case, we can't schedule any token for
|
|
917
|
+
# the request in this step.
|
|
918
|
+
num_new_tokens = 0
|
|
919
|
+
break
|
|
920
|
+
|
|
921
|
+
if self.ec_connector is not None and remote_cache_has_item[i]:
|
|
922
|
+
mm_hashes_to_schedule.add(request.mm_features[i].identifier)
|
|
923
|
+
external_load_encoder_input.append(i)
|
|
924
|
+
num_tokens_to_schedule += num_encoder_tokens
|
|
925
|
+
continue
|
|
926
|
+
|
|
927
|
+
num_tokens_to_schedule += num_encoder_tokens
|
|
928
|
+
encoder_compute_budget -= num_encoder_tokens
|
|
929
|
+
mm_hashes_to_schedule.add(request.mm_features[i].identifier)
|
|
930
|
+
encoder_inputs_to_schedule.append(i)
|
|
931
|
+
|
|
932
|
+
return (
|
|
933
|
+
encoder_inputs_to_schedule,
|
|
934
|
+
num_new_tokens,
|
|
935
|
+
encoder_compute_budget,
|
|
936
|
+
external_load_encoder_input,
|
|
937
|
+
)
|
|
938
|
+
|
|
939
|
+
def get_grammar_bitmask(
|
|
940
|
+
self,
|
|
941
|
+
scheduler_output: SchedulerOutput,
|
|
942
|
+
) -> GrammarOutput | None:
|
|
943
|
+
# Collect list of scheduled request ids that use structured output.
|
|
944
|
+
# The corresponding rows of the bitmask will be in this order.
|
|
945
|
+
# PERF: in case of chunked prefill,
|
|
946
|
+
# request might not include any new tokens.
|
|
947
|
+
# Therefore, we might introduce some additional
|
|
948
|
+
# cycle to fill in the bitmask, which could be a big no-op.
|
|
949
|
+
structured_output_request_ids = [
|
|
950
|
+
req_id
|
|
951
|
+
for req_id in scheduler_output.num_scheduled_tokens
|
|
952
|
+
if (req := self.requests.get(req_id)) and req.use_structured_output
|
|
953
|
+
]
|
|
954
|
+
if not structured_output_request_ids:
|
|
955
|
+
return None
|
|
956
|
+
|
|
957
|
+
bitmask = self.structured_output_manager.grammar_bitmask(
|
|
958
|
+
self.requests,
|
|
959
|
+
structured_output_request_ids,
|
|
960
|
+
scheduler_output.scheduled_spec_decode_tokens,
|
|
961
|
+
)
|
|
962
|
+
return GrammarOutput(structured_output_request_ids, bitmask)
|
|
963
|
+
|
|
964
|
+
def update_from_output(
|
|
965
|
+
self,
|
|
966
|
+
scheduler_output: SchedulerOutput,
|
|
967
|
+
model_runner_output: ModelRunnerOutput,
|
|
968
|
+
) -> dict[int, EngineCoreOutputs]:
|
|
969
|
+
sampled_token_ids = model_runner_output.sampled_token_ids
|
|
970
|
+
logprobs = model_runner_output.logprobs
|
|
971
|
+
prompt_logprobs_dict = model_runner_output.prompt_logprobs_dict
|
|
972
|
+
num_scheduled_tokens = scheduler_output.num_scheduled_tokens
|
|
973
|
+
pooler_outputs = model_runner_output.pooler_output
|
|
974
|
+
num_nans_in_logits = model_runner_output.num_nans_in_logits
|
|
975
|
+
kv_connector_output = model_runner_output.kv_connector_output
|
|
976
|
+
|
|
977
|
+
outputs: dict[int, list[EngineCoreOutput]] = defaultdict(list)
|
|
978
|
+
spec_decoding_stats: SpecDecodingStats | None = None
|
|
979
|
+
kv_connector_stats: KVConnectorStats | None = (
|
|
980
|
+
kv_connector_output.kv_connector_stats if kv_connector_output else None
|
|
981
|
+
)
|
|
982
|
+
if kv_connector_stats and self.connector:
|
|
983
|
+
kv_stats = self.connector.get_kv_connector_stats()
|
|
984
|
+
if kv_stats:
|
|
985
|
+
kv_connector_stats = kv_connector_stats.aggregate(kv_stats)
|
|
986
|
+
|
|
987
|
+
failed_kv_load_req_ids = None
|
|
988
|
+
if kv_connector_output and kv_connector_output.invalid_block_ids:
|
|
989
|
+
# These blocks contain externally computed tokens that failed to
|
|
990
|
+
# load. Identify affected requests and adjust their computed token
|
|
991
|
+
# count to trigger recomputation of the invalid blocks.
|
|
992
|
+
failed_kv_load_req_ids = self._handle_invalid_blocks(
|
|
993
|
+
kv_connector_output.invalid_block_ids
|
|
994
|
+
)
|
|
995
|
+
|
|
996
|
+
# NOTE(woosuk): As len(num_scheduled_tokens) can be up to 1K or more,
|
|
997
|
+
# the below loop can be a performance bottleneck. We should do our best
|
|
998
|
+
# to avoid expensive operations inside the loop.
|
|
999
|
+
stopped_running_reqs: set[Request] = set()
|
|
1000
|
+
stopped_preempted_reqs: set[Request] = set()
|
|
1001
|
+
for req_id, num_tokens_scheduled in num_scheduled_tokens.items():
|
|
1002
|
+
assert num_tokens_scheduled > 0
|
|
1003
|
+
if failed_kv_load_req_ids and req_id in failed_kv_load_req_ids:
|
|
1004
|
+
# Skip requests that were recovered from KV load failure
|
|
1005
|
+
continue
|
|
1006
|
+
request = self.requests.get(req_id)
|
|
1007
|
+
if request is None:
|
|
1008
|
+
# The request is already finished. This can happen if the
|
|
1009
|
+
# request is aborted while the model is executing it (e.g.,
|
|
1010
|
+
# in pipeline parallelism).
|
|
1011
|
+
continue
|
|
1012
|
+
|
|
1013
|
+
req_index = model_runner_output.req_id_to_index[req_id]
|
|
1014
|
+
generated_token_ids: list[int] = (
|
|
1015
|
+
sampled_token_ids[req_index].tolist() if sampled_token_ids else []
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
scheduled_spec_token_ids = (
|
|
1019
|
+
scheduler_output.scheduled_spec_decode_tokens.get(req_id)
|
|
1020
|
+
)
|
|
1021
|
+
if scheduled_spec_token_ids:
|
|
1022
|
+
num_draft_tokens = len(scheduled_spec_token_ids)
|
|
1023
|
+
num_accepted = len(generated_token_ids) - 1
|
|
1024
|
+
num_rejected = num_draft_tokens - num_accepted
|
|
1025
|
+
# num_computed_tokens represents the number of tokens
|
|
1026
|
+
# processed in the current step, considering scheduled
|
|
1027
|
+
# tokens and rejections. If some tokens are rejected,
|
|
1028
|
+
# num_computed_tokens is decreased by the number of rejected
|
|
1029
|
+
# tokens.
|
|
1030
|
+
if request.num_computed_tokens > 0:
|
|
1031
|
+
request.num_computed_tokens -= num_rejected
|
|
1032
|
+
# If async scheduling, num_output_placeholders also includes
|
|
1033
|
+
# the scheduled spec tokens count and so is similarly adjusted.
|
|
1034
|
+
if request.num_output_placeholders > 0:
|
|
1035
|
+
request.num_output_placeholders -= num_rejected
|
|
1036
|
+
spec_decoding_stats = self.make_spec_decoding_stats(
|
|
1037
|
+
spec_decoding_stats,
|
|
1038
|
+
num_draft_tokens=num_draft_tokens,
|
|
1039
|
+
num_accepted_tokens=num_accepted,
|
|
1040
|
+
)
|
|
1041
|
+
|
|
1042
|
+
stopped = False
|
|
1043
|
+
new_logprobs = None
|
|
1044
|
+
new_token_ids = generated_token_ids
|
|
1045
|
+
kv_transfer_params = None
|
|
1046
|
+
status_before_stop = request.status
|
|
1047
|
+
|
|
1048
|
+
# Check for stop and update request status.
|
|
1049
|
+
if new_token_ids:
|
|
1050
|
+
new_token_ids, stopped = self._update_request_with_output(
|
|
1051
|
+
request, new_token_ids
|
|
1052
|
+
)
|
|
1053
|
+
|
|
1054
|
+
# Stop checking for pooler models.
|
|
1055
|
+
pooler_output = None
|
|
1056
|
+
if pooler_outputs:
|
|
1057
|
+
pooler_output = pooler_outputs[req_index]
|
|
1058
|
+
stopped = check_stop(request, self.max_model_len, pooler_output)
|
|
1059
|
+
|
|
1060
|
+
if stopped:
|
|
1061
|
+
kv_transfer_params = self._free_request(request)
|
|
1062
|
+
if status_before_stop == RequestStatus.RUNNING:
|
|
1063
|
+
stopped_running_reqs.add(request)
|
|
1064
|
+
else:
|
|
1065
|
+
stopped_preempted_reqs.add(request)
|
|
1066
|
+
|
|
1067
|
+
# Extract sample logprobs if needed.
|
|
1068
|
+
if (
|
|
1069
|
+
request.sampling_params is not None
|
|
1070
|
+
and request.sampling_params.logprobs is not None
|
|
1071
|
+
and logprobs
|
|
1072
|
+
):
|
|
1073
|
+
# NOTE: once we support N tokens per step (spec decode),
|
|
1074
|
+
# the outer lists can be of length > 1.
|
|
1075
|
+
new_logprobs = logprobs.slice(req_index, req_index + 1)
|
|
1076
|
+
|
|
1077
|
+
if new_token_ids and self.structured_output_manager.should_advance(request):
|
|
1078
|
+
struct_output_request = request.structured_output_request
|
|
1079
|
+
assert struct_output_request is not None
|
|
1080
|
+
assert struct_output_request.grammar is not None
|
|
1081
|
+
struct_output_request.grammar.accept_tokens(req_id, new_token_ids)
|
|
1082
|
+
|
|
1083
|
+
if num_nans_in_logits is not None and req_id in num_nans_in_logits:
|
|
1084
|
+
request.num_nans_in_logits = num_nans_in_logits[req_id]
|
|
1085
|
+
|
|
1086
|
+
# Get prompt logprobs for this request.
|
|
1087
|
+
prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id)
|
|
1088
|
+
if new_token_ids or pooler_output is not None or kv_transfer_params:
|
|
1089
|
+
# Add EngineCoreOutput for this Request.
|
|
1090
|
+
outputs[request.client_index].append(
|
|
1091
|
+
EngineCoreOutput(
|
|
1092
|
+
request_id=req_id,
|
|
1093
|
+
new_token_ids=new_token_ids,
|
|
1094
|
+
finish_reason=request.get_finished_reason(),
|
|
1095
|
+
new_logprobs=new_logprobs,
|
|
1096
|
+
new_prompt_logprobs_tensors=prompt_logprobs_tensors,
|
|
1097
|
+
pooling_output=pooler_output,
|
|
1098
|
+
stop_reason=request.stop_reason,
|
|
1099
|
+
events=request.take_events(),
|
|
1100
|
+
kv_transfer_params=kv_transfer_params,
|
|
1101
|
+
trace_headers=request.trace_headers,
|
|
1102
|
+
num_cached_tokens=request.num_cached_tokens,
|
|
1103
|
+
num_nans_in_logits=request.num_nans_in_logits,
|
|
1104
|
+
)
|
|
1105
|
+
)
|
|
1106
|
+
else:
|
|
1107
|
+
# Invariant: EngineCore returns no partial prefill outputs.
|
|
1108
|
+
assert not prompt_logprobs_tensors
|
|
1109
|
+
|
|
1110
|
+
# Remove the stopped requests from the running and waiting queues.
|
|
1111
|
+
if stopped_running_reqs:
|
|
1112
|
+
self.running = remove_all(self.running, stopped_running_reqs)
|
|
1113
|
+
if stopped_preempted_reqs:
|
|
1114
|
+
# This is a rare case and unlikely to impact performance.
|
|
1115
|
+
self.waiting.remove_requests(stopped_preempted_reqs)
|
|
1116
|
+
|
|
1117
|
+
# KV Connector: update state for finished KV Transfers.
|
|
1118
|
+
if kv_connector_output:
|
|
1119
|
+
self._update_from_kv_xfer_finished(kv_connector_output)
|
|
1120
|
+
|
|
1121
|
+
# collect KV cache events from KV cache manager
|
|
1122
|
+
events = self.kv_cache_manager.take_events()
|
|
1123
|
+
|
|
1124
|
+
# collect KV cache events from connector
|
|
1125
|
+
if self.connector is not None:
|
|
1126
|
+
connector_events = self.connector.take_events()
|
|
1127
|
+
if connector_events:
|
|
1128
|
+
if events is None:
|
|
1129
|
+
events = list(connector_events)
|
|
1130
|
+
else:
|
|
1131
|
+
events.extend(connector_events)
|
|
1132
|
+
|
|
1133
|
+
# publish collected KV cache events
|
|
1134
|
+
if events:
|
|
1135
|
+
batch = KVEventBatch(ts=time.time(), events=events)
|
|
1136
|
+
self.kv_event_publisher.publish(batch)
|
|
1137
|
+
|
|
1138
|
+
# Create EngineCoreOutputs for all clients that have requests with
|
|
1139
|
+
# outputs in this step.
|
|
1140
|
+
engine_core_outputs = {
|
|
1141
|
+
client_index: EngineCoreOutputs(outputs=outs)
|
|
1142
|
+
for client_index, outs in outputs.items()
|
|
1143
|
+
}
|
|
1144
|
+
|
|
1145
|
+
finished_req_ids = self.finished_req_ids_dict
|
|
1146
|
+
if finished_req_ids:
|
|
1147
|
+
# Include ids of requests that finished since last outputs
|
|
1148
|
+
# were sent.
|
|
1149
|
+
for client_index, finished_set in finished_req_ids.items():
|
|
1150
|
+
# Set finished request set in EngineCoreOutputs for this client.
|
|
1151
|
+
if (eco := engine_core_outputs.get(client_index)) is not None:
|
|
1152
|
+
eco.finished_requests = finished_set
|
|
1153
|
+
else:
|
|
1154
|
+
engine_core_outputs[client_index] = EngineCoreOutputs(
|
|
1155
|
+
finished_requests=finished_set
|
|
1156
|
+
)
|
|
1157
|
+
finished_req_ids.clear()
|
|
1158
|
+
|
|
1159
|
+
if (
|
|
1160
|
+
stats := self.make_stats(spec_decoding_stats, kv_connector_stats)
|
|
1161
|
+
) is not None:
|
|
1162
|
+
# Return stats to only one of the front-ends.
|
|
1163
|
+
if (eco := next(iter(engine_core_outputs.values()), None)) is None:
|
|
1164
|
+
# We must return the stats even if there are no request
|
|
1165
|
+
# outputs this step.
|
|
1166
|
+
engine_core_outputs[0] = eco = EngineCoreOutputs()
|
|
1167
|
+
eco.scheduler_stats = stats
|
|
1168
|
+
|
|
1169
|
+
return engine_core_outputs
|
|
1170
|
+
|
|
1171
|
+
def _update_request_with_output(
|
|
1172
|
+
self,
|
|
1173
|
+
request: Request,
|
|
1174
|
+
new_token_ids: list[int],
|
|
1175
|
+
) -> tuple[list[int], bool]:
|
|
1176
|
+
# Append generated tokens and check for stop. Note that if
|
|
1177
|
+
# a request is still being prefilled, we expect the model runner
|
|
1178
|
+
# to return empty token ids for the request.
|
|
1179
|
+
stopped = False
|
|
1180
|
+
for num_new, output_token_id in enumerate(new_token_ids, 1):
|
|
1181
|
+
request.append_output_token_ids(output_token_id)
|
|
1182
|
+
|
|
1183
|
+
# Check for stop and update request state.
|
|
1184
|
+
# This must be called before we make the EngineCoreOutput.
|
|
1185
|
+
stopped = check_stop(request, self.max_model_len)
|
|
1186
|
+
if stopped:
|
|
1187
|
+
del new_token_ids[num_new:] # Trim new tokens if needed.
|
|
1188
|
+
break
|
|
1189
|
+
return new_token_ids, stopped
|
|
1190
|
+
|
|
1191
|
+
def _free_encoder_inputs(self, request: Request) -> None:
|
|
1192
|
+
cached_encoder_input_ids = self.encoder_cache_manager.get_cached_input_ids(
|
|
1193
|
+
request
|
|
1194
|
+
)
|
|
1195
|
+
# OPTIMIZATION: Avoid list(set) if the set is empty.
|
|
1196
|
+
if not cached_encoder_input_ids:
|
|
1197
|
+
return
|
|
1198
|
+
|
|
1199
|
+
# Here, we use list(set) to avoid modifying the set while iterating
|
|
1200
|
+
# over it.
|
|
1201
|
+
for input_id in list(cached_encoder_input_ids):
|
|
1202
|
+
mm_feature = request.mm_features[input_id]
|
|
1203
|
+
start_pos = mm_feature.mm_position.offset
|
|
1204
|
+
num_tokens = mm_feature.mm_position.length
|
|
1205
|
+
if self.is_encoder_decoder and request.num_computed_tokens > 0:
|
|
1206
|
+
# With Whisper, as soon as we've generated a single token,
|
|
1207
|
+
# we know we're done with the encoder input. Cross Attention
|
|
1208
|
+
# KVs have been calculated and cached already.
|
|
1209
|
+
self.encoder_cache_manager.free_encoder_input(request, input_id)
|
|
1210
|
+
elif start_pos + num_tokens <= request.num_computed_tokens:
|
|
1211
|
+
# The encoder output is already processed and stored
|
|
1212
|
+
# in the decoder's KV cache.
|
|
1213
|
+
self.encoder_cache_manager.free_encoder_input(request, input_id)
|
|
1214
|
+
|
|
1215
|
+
def update_draft_token_ids(
|
|
1216
|
+
self,
|
|
1217
|
+
draft_token_ids: DraftTokenIds,
|
|
1218
|
+
) -> None:
|
|
1219
|
+
for req_id, spec_token_ids in zip(
|
|
1220
|
+
draft_token_ids.req_ids,
|
|
1221
|
+
draft_token_ids.draft_token_ids,
|
|
1222
|
+
):
|
|
1223
|
+
request = self.requests.get(req_id)
|
|
1224
|
+
if request is None or request.is_finished():
|
|
1225
|
+
# The request may have been finished. Skip.
|
|
1226
|
+
continue
|
|
1227
|
+
|
|
1228
|
+
# Add newly generated spec token ids to the request.
|
|
1229
|
+
if self.structured_output_manager.should_advance(request):
|
|
1230
|
+
metadata = request.structured_output_request
|
|
1231
|
+
request.spec_token_ids = metadata.grammar.validate_tokens( # type: ignore[union-attr]
|
|
1232
|
+
spec_token_ids
|
|
1233
|
+
)
|
|
1234
|
+
else:
|
|
1235
|
+
request.spec_token_ids = spec_token_ids
|
|
1236
|
+
|
|
1237
|
+
def get_request_counts(self) -> tuple[int, int]:
|
|
1238
|
+
"""Returns (num_running_reqs, num_waiting_reqs)."""
|
|
1239
|
+
return len(self.running), len(self.waiting)
|
|
1240
|
+
|
|
1241
|
+
def add_request(self, request: Request) -> None:
|
|
1242
|
+
self.waiting.add_request(request)
|
|
1243
|
+
self.requests[request.request_id] = request
|
|
1244
|
+
if self.log_stats:
|
|
1245
|
+
request.record_event(EngineCoreEventType.QUEUED)
|
|
1246
|
+
|
|
1247
|
+
def finish_requests(
|
|
1248
|
+
self,
|
|
1249
|
+
request_ids: str | Iterable[str],
|
|
1250
|
+
finished_status: RequestStatus,
|
|
1251
|
+
) -> None:
|
|
1252
|
+
"""Handles the finish signal from outside the scheduler.
|
|
1253
|
+
|
|
1254
|
+
For example, the API server can abort a request when the client
|
|
1255
|
+
disconnects.
|
|
1256
|
+
"""
|
|
1257
|
+
assert RequestStatus.is_finished(finished_status)
|
|
1258
|
+
if isinstance(request_ids, str):
|
|
1259
|
+
request_ids = (request_ids,)
|
|
1260
|
+
else:
|
|
1261
|
+
request_ids = set(request_ids)
|
|
1262
|
+
|
|
1263
|
+
running_requests_to_remove = set()
|
|
1264
|
+
waiting_requests_to_remove = []
|
|
1265
|
+
valid_requests = []
|
|
1266
|
+
|
|
1267
|
+
# First pass: collect requests to remove from queues
|
|
1268
|
+
for req_id in request_ids:
|
|
1269
|
+
request = self.requests.get(req_id)
|
|
1270
|
+
if request is None or request.is_finished():
|
|
1271
|
+
# Invalid request ID.
|
|
1272
|
+
continue
|
|
1273
|
+
|
|
1274
|
+
valid_requests.append(request)
|
|
1275
|
+
if request.status == RequestStatus.RUNNING:
|
|
1276
|
+
running_requests_to_remove.add(request)
|
|
1277
|
+
else:
|
|
1278
|
+
waiting_requests_to_remove.append(request)
|
|
1279
|
+
|
|
1280
|
+
# Remove all requests from queues at once for better efficiency
|
|
1281
|
+
if running_requests_to_remove:
|
|
1282
|
+
self.running = remove_all(self.running, running_requests_to_remove)
|
|
1283
|
+
if waiting_requests_to_remove:
|
|
1284
|
+
self.waiting.remove_requests(waiting_requests_to_remove)
|
|
1285
|
+
|
|
1286
|
+
# Second pass: set status and free requests
|
|
1287
|
+
for request in valid_requests:
|
|
1288
|
+
request.status = finished_status
|
|
1289
|
+
self._free_request(request)
|
|
1290
|
+
|
|
1291
|
+
def _free_request(self, request: Request) -> dict[str, Any] | None:
|
|
1292
|
+
assert request.is_finished()
|
|
1293
|
+
|
|
1294
|
+
delay_free_blocks, kv_xfer_params = self._connector_finished(request)
|
|
1295
|
+
self.encoder_cache_manager.free(request)
|
|
1296
|
+
request_id = request.request_id
|
|
1297
|
+
self.finished_req_ids.add(request_id)
|
|
1298
|
+
if self.finished_req_ids_dict is not None:
|
|
1299
|
+
self.finished_req_ids_dict[request.client_index].add(request_id)
|
|
1300
|
+
|
|
1301
|
+
if not delay_free_blocks:
|
|
1302
|
+
self._free_blocks(request)
|
|
1303
|
+
|
|
1304
|
+
return kv_xfer_params
|
|
1305
|
+
|
|
1306
|
+
def _free_blocks(self, request: Request):
|
|
1307
|
+
assert request.is_finished()
|
|
1308
|
+
self.kv_cache_manager.free(request)
|
|
1309
|
+
del self.requests[request.request_id]
|
|
1310
|
+
|
|
1311
|
+
def get_num_unfinished_requests(self) -> int:
|
|
1312
|
+
return len(self.waiting) + len(self.running)
|
|
1313
|
+
|
|
1314
|
+
def has_finished_requests(self) -> bool:
|
|
1315
|
+
return len(self.finished_req_ids) > 0
|
|
1316
|
+
|
|
1317
|
+
def reset_prefix_cache(self) -> bool:
|
|
1318
|
+
return self.kv_cache_manager.reset_prefix_cache()
|
|
1319
|
+
|
|
1320
|
+
def make_stats(
|
|
1321
|
+
self,
|
|
1322
|
+
spec_decoding_stats: SpecDecodingStats | None = None,
|
|
1323
|
+
kv_connector_stats: KVConnectorStats | None = None,
|
|
1324
|
+
) -> SchedulerStats | None:
|
|
1325
|
+
if not self.log_stats:
|
|
1326
|
+
return None
|
|
1327
|
+
prefix_cache_stats = self.kv_cache_manager.make_prefix_cache_stats()
|
|
1328
|
+
assert prefix_cache_stats is not None
|
|
1329
|
+
connector_prefix_cache_stats = self._make_connector_prefix_cache_stats()
|
|
1330
|
+
return SchedulerStats(
|
|
1331
|
+
num_running_reqs=len(self.running),
|
|
1332
|
+
num_waiting_reqs=len(self.waiting),
|
|
1333
|
+
kv_cache_usage=self.kv_cache_manager.usage,
|
|
1334
|
+
prefix_cache_stats=prefix_cache_stats,
|
|
1335
|
+
connector_prefix_cache_stats=connector_prefix_cache_stats,
|
|
1336
|
+
spec_decoding_stats=spec_decoding_stats,
|
|
1337
|
+
kv_connector_stats=kv_connector_stats.data if kv_connector_stats else None,
|
|
1338
|
+
)
|
|
1339
|
+
|
|
1340
|
+
def make_spec_decoding_stats(
|
|
1341
|
+
self,
|
|
1342
|
+
spec_decoding_stats: SpecDecodingStats | None,
|
|
1343
|
+
num_draft_tokens: int,
|
|
1344
|
+
num_accepted_tokens: int,
|
|
1345
|
+
) -> SpecDecodingStats | None:
|
|
1346
|
+
if not self.log_stats:
|
|
1347
|
+
return None
|
|
1348
|
+
if spec_decoding_stats is None:
|
|
1349
|
+
spec_decoding_stats = SpecDecodingStats.new(self.num_spec_tokens)
|
|
1350
|
+
spec_decoding_stats.observe_draft(
|
|
1351
|
+
num_draft_tokens=num_draft_tokens, num_accepted_tokens=num_accepted_tokens
|
|
1352
|
+
)
|
|
1353
|
+
return spec_decoding_stats
|
|
1354
|
+
|
|
1355
|
+
def shutdown(self) -> None:
|
|
1356
|
+
if self.kv_event_publisher:
|
|
1357
|
+
self.kv_event_publisher.shutdown()
|
|
1358
|
+
if self.connector is not None:
|
|
1359
|
+
self.connector.shutdown()
|
|
1360
|
+
|
|
1361
|
+
########################################################################
|
|
1362
|
+
# KV Connector Related Methods
|
|
1363
|
+
########################################################################
|
|
1364
|
+
|
|
1365
|
+
def _update_connector_prefix_cache_stats(
|
|
1366
|
+
self, request: Request, num_external_tokens: int
|
|
1367
|
+
) -> None:
|
|
1368
|
+
if self.connector_prefix_cache_stats is None:
|
|
1369
|
+
return
|
|
1370
|
+
|
|
1371
|
+
self.connector_prefix_cache_stats.record(
|
|
1372
|
+
num_tokens=request.num_tokens,
|
|
1373
|
+
num_hits=num_external_tokens,
|
|
1374
|
+
preempted=request.num_preemptions > 0,
|
|
1375
|
+
)
|
|
1376
|
+
|
|
1377
|
+
def _make_connector_prefix_cache_stats(self) -> PrefixCacheStats | None:
|
|
1378
|
+
if self.connector_prefix_cache_stats is None:
|
|
1379
|
+
return None
|
|
1380
|
+
stats = self.connector_prefix_cache_stats
|
|
1381
|
+
self.connector_prefix_cache_stats = PrefixCacheStats()
|
|
1382
|
+
return stats
|
|
1383
|
+
|
|
1384
|
+
def get_kv_connector(self) -> KVConnectorBase_V1 | None:
|
|
1385
|
+
return self.connector
|
|
1386
|
+
|
|
1387
|
+
def _connector_finished(
|
|
1388
|
+
self, request: Request
|
|
1389
|
+
) -> tuple[bool, dict[str, Any] | None]:
|
|
1390
|
+
"""
|
|
1391
|
+
Invoke the KV connector request_finished() method if applicable.
|
|
1392
|
+
|
|
1393
|
+
Returns optional kv transfer parameters to be included with the
|
|
1394
|
+
request outputs.
|
|
1395
|
+
"""
|
|
1396
|
+
if self.connector is None:
|
|
1397
|
+
return False, None
|
|
1398
|
+
|
|
1399
|
+
block_ids = self.kv_cache_manager.get_block_ids(request.request_id)
|
|
1400
|
+
|
|
1401
|
+
if not isinstance(self.connector, SupportsHMA):
|
|
1402
|
+
# NOTE(Kuntai): We should deprecate this code path after we enforce
|
|
1403
|
+
# all connectors to support HMA.
|
|
1404
|
+
# Hybrid memory allocator should be already turned off for this
|
|
1405
|
+
# code path, but let's double-check here.
|
|
1406
|
+
assert len(self.kv_cache_config.kv_cache_groups) == 1
|
|
1407
|
+
return self.connector.request_finished(request, block_ids[0])
|
|
1408
|
+
|
|
1409
|
+
return self.connector.request_finished_all_groups(request, block_ids)
|
|
1410
|
+
|
|
1411
|
+
def _update_waiting_for_remote_kv(self, request: Request) -> bool:
|
|
1412
|
+
"""
|
|
1413
|
+
KV Connector: check if the request_id is finished_recving.
|
|
1414
|
+
|
|
1415
|
+
The finished_recving_kv_req_ids list is populated
|
|
1416
|
+
on the previous steps()'s update_from_output based
|
|
1417
|
+
on the worker side connector.
|
|
1418
|
+
|
|
1419
|
+
When the kv transfer is ready, we cache the blocks
|
|
1420
|
+
and the request state will be moved back to WAITING from
|
|
1421
|
+
WAITING_FOR_REMOTE_KV.
|
|
1422
|
+
"""
|
|
1423
|
+
assert self.connector is not None
|
|
1424
|
+
if request.request_id not in self.finished_recving_kv_req_ids:
|
|
1425
|
+
return False
|
|
1426
|
+
|
|
1427
|
+
if request.request_id in self.failed_recving_kv_req_ids:
|
|
1428
|
+
# Request had KV load failures; num_computed_tokens was already
|
|
1429
|
+
# updated in _update_requests_with_invalid_blocks
|
|
1430
|
+
if request.num_computed_tokens:
|
|
1431
|
+
# Cache any valid computed tokens.
|
|
1432
|
+
self.kv_cache_manager.cache_blocks(request, request.num_computed_tokens)
|
|
1433
|
+
else:
|
|
1434
|
+
# No valid computed tokens, release allocated blocks.
|
|
1435
|
+
# There may be a local cache hit on retry.
|
|
1436
|
+
self.kv_cache_manager.free(request)
|
|
1437
|
+
|
|
1438
|
+
self.failed_recving_kv_req_ids.remove(request.request_id)
|
|
1439
|
+
else:
|
|
1440
|
+
# Now that the blocks are ready, actually cache them.
|
|
1441
|
+
(block_ids,) = self.kv_cache_manager.get_block_ids(request.request_id)
|
|
1442
|
+
num_computed_tokens = len(block_ids) * self.block_size
|
|
1443
|
+
# Handle the case where num request tokens less than one block.
|
|
1444
|
+
num_computed_tokens = min(num_computed_tokens, request.num_tokens)
|
|
1445
|
+
if num_computed_tokens == request.num_tokens:
|
|
1446
|
+
num_computed_tokens -= 1
|
|
1447
|
+
# This will cache the blocks iff caching is enabled.
|
|
1448
|
+
self.kv_cache_manager.cache_blocks(request, num_computed_tokens)
|
|
1449
|
+
|
|
1450
|
+
# Update the request state for scheduling.
|
|
1451
|
+
request.num_computed_tokens = num_computed_tokens
|
|
1452
|
+
|
|
1453
|
+
# Return that we are ready.
|
|
1454
|
+
self.finished_recving_kv_req_ids.remove(request.request_id)
|
|
1455
|
+
return True
|
|
1456
|
+
|
|
1457
|
+
def _update_from_kv_xfer_finished(self, kv_connector_output: KVConnectorOutput):
|
|
1458
|
+
"""
|
|
1459
|
+
KV Connector: update the scheduler state based on the output.
|
|
1460
|
+
|
|
1461
|
+
The Worker side connectors add finished_recving and
|
|
1462
|
+
finished_sending reqs to the output.
|
|
1463
|
+
* if finished_sending: free the blocks
|
|
1464
|
+
# if finished_recving: add to state so we can
|
|
1465
|
+
schedule the request during the next step.
|
|
1466
|
+
"""
|
|
1467
|
+
|
|
1468
|
+
if self.connector is not None:
|
|
1469
|
+
self.connector.update_connector_output(kv_connector_output)
|
|
1470
|
+
|
|
1471
|
+
# KV Connector:: update recv and send status from last step.
|
|
1472
|
+
for req_id in kv_connector_output.finished_recving or ():
|
|
1473
|
+
logger.debug("Finished recving KV transfer for request %s", req_id)
|
|
1474
|
+
self.finished_recving_kv_req_ids.add(req_id)
|
|
1475
|
+
for req_id in kv_connector_output.finished_sending or ():
|
|
1476
|
+
logger.debug("Finished sending KV transfer for request %s", req_id)
|
|
1477
|
+
assert req_id in self.requests
|
|
1478
|
+
self._free_blocks(self.requests[req_id])
|
|
1479
|
+
|
|
1480
|
+
def _update_requests_with_invalid_blocks(
|
|
1481
|
+
self, requests: Iterable[Request], invalid_block_ids: set[int]
|
|
1482
|
+
) -> tuple[set[str], int]:
|
|
1483
|
+
"""
|
|
1484
|
+
Identify and update requests affected by invalid KV cache blocks.
|
|
1485
|
+
|
|
1486
|
+
This method scans the given requests, detects those with invalid blocks
|
|
1487
|
+
and adjusts their `num_computed_tokens` to the longest valid prefix.
|
|
1488
|
+
For observability, it also accumulates the total number of tokens that
|
|
1489
|
+
will need to be recomputed across all affected requests.
|
|
1490
|
+
|
|
1491
|
+
Args:
|
|
1492
|
+
requests: The set of requests to scan for invalid blocks.
|
|
1493
|
+
invalid_block_ids: IDs of invalid blocks.
|
|
1494
|
+
|
|
1495
|
+
Returns:
|
|
1496
|
+
tuple:
|
|
1497
|
+
- affected_req_ids (set[str]): IDs of requests impacted by
|
|
1498
|
+
invalid blocks.
|
|
1499
|
+
- total_affected_tokens (int): Total number of tokens that must
|
|
1500
|
+
be recomputed across all affected requests (for observability).
|
|
1501
|
+
"""
|
|
1502
|
+
affected_req_ids: set[str] = set()
|
|
1503
|
+
total_affected_tokens = 0
|
|
1504
|
+
# If a block is invalid and shared by multiple requests in the batch,
|
|
1505
|
+
# these requests must be rescheduled, but only the first will recompute
|
|
1506
|
+
# it. This set tracks blocks already marked for recomputation.
|
|
1507
|
+
marked_invalid_block_ids: set[int] = set()
|
|
1508
|
+
for request in requests:
|
|
1509
|
+
is_affected = False
|
|
1510
|
+
marked_invalid_block = False
|
|
1511
|
+
req_id = request.request_id
|
|
1512
|
+
# TODO (davidb): add support for hybrid memory allocator
|
|
1513
|
+
(req_block_ids,) = self.kv_cache_manager.get_block_ids(req_id)
|
|
1514
|
+
# We iterate only over blocks that may contain externally computed
|
|
1515
|
+
# tokens
|
|
1516
|
+
if request.status == RequestStatus.WAITING_FOR_REMOTE_KVS:
|
|
1517
|
+
# Async loading. If num_computed_tokens is set it implies we
|
|
1518
|
+
# already processed some block failures for it in a prior step
|
|
1519
|
+
req_num_computed_tokens = (
|
|
1520
|
+
request.num_computed_tokens
|
|
1521
|
+
if req_id in self.failed_recving_kv_req_ids
|
|
1522
|
+
else len(req_block_ids) * self.block_size
|
|
1523
|
+
)
|
|
1524
|
+
else:
|
|
1525
|
+
# Sync loading. num_computed_tokens includes new tokens
|
|
1526
|
+
req_num_computed_tokens = request.num_cached_tokens
|
|
1527
|
+
|
|
1528
|
+
req_num_computed_blocks = (
|
|
1529
|
+
req_num_computed_tokens + self.block_size - 1
|
|
1530
|
+
) // self.block_size
|
|
1531
|
+
for idx, block_id in zip(range(req_num_computed_blocks), req_block_ids):
|
|
1532
|
+
if block_id not in invalid_block_ids:
|
|
1533
|
+
continue
|
|
1534
|
+
|
|
1535
|
+
is_affected = True
|
|
1536
|
+
|
|
1537
|
+
if block_id in marked_invalid_block_ids:
|
|
1538
|
+
# This invalid block is shared with a previous request
|
|
1539
|
+
# and was already marked for recomputation.
|
|
1540
|
+
# This means this request can still consider this block
|
|
1541
|
+
# as computed when rescheduled.
|
|
1542
|
+
# Currently this only applies to sync loading; Async
|
|
1543
|
+
# loading does not yet support block sharing
|
|
1544
|
+
continue
|
|
1545
|
+
|
|
1546
|
+
marked_invalid_block_ids.add(block_id)
|
|
1547
|
+
|
|
1548
|
+
if marked_invalid_block:
|
|
1549
|
+
# This request has already marked an invalid block for
|
|
1550
|
+
# recomputation and updated its num_computed_tokens.
|
|
1551
|
+
continue
|
|
1552
|
+
|
|
1553
|
+
marked_invalid_block = True
|
|
1554
|
+
# Truncate the computed tokens at the first failed block
|
|
1555
|
+
request.num_computed_tokens = idx * self.block_size
|
|
1556
|
+
total_affected_tokens += (
|
|
1557
|
+
req_num_computed_tokens - request.num_computed_tokens
|
|
1558
|
+
)
|
|
1559
|
+
|
|
1560
|
+
if is_affected:
|
|
1561
|
+
if not marked_invalid_block:
|
|
1562
|
+
# All invalid blocks of this request are shared with
|
|
1563
|
+
# previous requests and will be recomputed by them.
|
|
1564
|
+
# Revert to considering only cached tokens as computed.
|
|
1565
|
+
# Currently this only applies to sync loading; Async
|
|
1566
|
+
# loading does not yet support block sharing
|
|
1567
|
+
total_affected_tokens += (
|
|
1568
|
+
request.num_computed_tokens - request.num_cached_tokens
|
|
1569
|
+
)
|
|
1570
|
+
request.num_computed_tokens = request.num_cached_tokens
|
|
1571
|
+
|
|
1572
|
+
affected_req_ids.add(request.request_id)
|
|
1573
|
+
|
|
1574
|
+
return affected_req_ids, total_affected_tokens
|
|
1575
|
+
|
|
1576
|
+
def _handle_invalid_blocks(self, invalid_block_ids: set[int]) -> set[str]:
|
|
1577
|
+
total_requests_to_reschedule = 0
|
|
1578
|
+
total_tokens_to_reschedule = 0
|
|
1579
|
+
|
|
1580
|
+
# --- Handle async KV loads (WAITING_FOR_REMOTE_KVS) ---
|
|
1581
|
+
async_load_reqs = (
|
|
1582
|
+
req
|
|
1583
|
+
for req in self.waiting
|
|
1584
|
+
if req.status == RequestStatus.WAITING_FOR_REMOTE_KVS
|
|
1585
|
+
)
|
|
1586
|
+
async_affected_req_ids, num_tokens_to_reschedule = (
|
|
1587
|
+
self._update_requests_with_invalid_blocks(
|
|
1588
|
+
async_load_reqs, invalid_block_ids
|
|
1589
|
+
)
|
|
1590
|
+
)
|
|
1591
|
+
|
|
1592
|
+
total_requests_to_reschedule += len(async_affected_req_ids)
|
|
1593
|
+
total_tokens_to_reschedule += num_tokens_to_reschedule
|
|
1594
|
+
|
|
1595
|
+
# Mark requests with async KV load failures; they will be rescheduled
|
|
1596
|
+
# once loading completes.
|
|
1597
|
+
self.failed_recving_kv_req_ids |= async_affected_req_ids
|
|
1598
|
+
|
|
1599
|
+
# --- Handle sync KV loads (running requests) ---
|
|
1600
|
+
sync_affected_req_ids, num_tokens_to_reschedule = (
|
|
1601
|
+
self._update_requests_with_invalid_blocks(self.running, invalid_block_ids)
|
|
1602
|
+
)
|
|
1603
|
+
|
|
1604
|
+
total_requests_to_reschedule += len(sync_affected_req_ids)
|
|
1605
|
+
total_tokens_to_reschedule += num_tokens_to_reschedule
|
|
1606
|
+
|
|
1607
|
+
if total_requests_to_reschedule:
|
|
1608
|
+
logger.warning(
|
|
1609
|
+
"Recovered from KV load failure: "
|
|
1610
|
+
"%d request(s) rescheduled (%d tokens affected).",
|
|
1611
|
+
total_requests_to_reschedule,
|
|
1612
|
+
total_tokens_to_reschedule,
|
|
1613
|
+
)
|
|
1614
|
+
|
|
1615
|
+
# Return the IDs of affected running requests to skip in
|
|
1616
|
+
# update_from_output.
|
|
1617
|
+
return sync_affected_req_ids
|