sglang 0.5.3rc0__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +54 -37
- sglang/bench_one_batch_server.py +340 -34
- sglang/bench_serving.py +340 -159
- sglang/check_env.py +1 -1
- sglang/compile_deep_gemm.py +6 -2
- sglang/global_config.py +1 -25
- sglang/lang/api.py +6 -0
- sglang/lang/backend/runtime_endpoint.py +1 -1
- sglang/lang/interpreter.py +1 -0
- sglang/lang/ir.py +13 -0
- sglang/launch_server.py +9 -2
- sglang/profiler.py +20 -3
- sglang/srt/_custom_ops.py +1 -1
- sglang/srt/batch_invariant_ops/__init__.py +27 -0
- sglang/srt/batch_invariant_ops/batch_invariant_ops.py +547 -0
- sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
- sglang/srt/compilation/backend.py +437 -0
- sglang/srt/compilation/compilation_config.py +20 -0
- sglang/srt/compilation/compilation_counter.py +47 -0
- sglang/srt/compilation/compile.py +210 -0
- sglang/srt/compilation/compiler_interface.py +503 -0
- sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
- sglang/srt/compilation/fix_functionalization.py +134 -0
- sglang/srt/compilation/fx_utils.py +83 -0
- sglang/srt/compilation/inductor_pass.py +140 -0
- sglang/srt/compilation/pass_manager.py +66 -0
- sglang/srt/compilation/piecewise_context_manager.py +40 -0
- sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
- sglang/srt/configs/__init__.py +8 -0
- sglang/srt/configs/deepseek_ocr.py +262 -0
- sglang/srt/configs/deepseekvl2.py +194 -96
- sglang/srt/configs/dots_ocr.py +64 -0
- sglang/srt/configs/dots_vlm.py +2 -7
- sglang/srt/configs/falcon_h1.py +309 -0
- sglang/srt/configs/load_config.py +33 -2
- sglang/srt/configs/mamba_utils.py +117 -0
- sglang/srt/configs/model_config.py +284 -118
- sglang/srt/configs/modelopt_config.py +30 -0
- sglang/srt/configs/nemotron_h.py +286 -0
- sglang/srt/configs/olmo3.py +105 -0
- sglang/srt/configs/points_v15_chat.py +29 -0
- sglang/srt/configs/qwen3_next.py +11 -47
- sglang/srt/configs/qwen3_omni.py +613 -0
- sglang/srt/configs/qwen3_vl.py +576 -0
- sglang/srt/connector/remote_instance.py +1 -1
- sglang/srt/constrained/base_grammar_backend.py +6 -1
- sglang/srt/constrained/llguidance_backend.py +5 -0
- sglang/srt/constrained/outlines_backend.py +1 -1
- sglang/srt/constrained/outlines_jump_forward.py +1 -1
- sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
- sglang/srt/constrained/utils.py +12 -0
- sglang/srt/constrained/xgrammar_backend.py +26 -15
- sglang/srt/debug_utils/dumper.py +10 -3
- sglang/srt/disaggregation/ascend/conn.py +2 -2
- sglang/srt/disaggregation/ascend/transfer_engine.py +48 -10
- sglang/srt/disaggregation/base/conn.py +17 -4
- sglang/srt/disaggregation/common/conn.py +268 -98
- sglang/srt/disaggregation/decode.py +172 -39
- sglang/srt/disaggregation/decode_kvcache_offload_manager.py +185 -0
- sglang/srt/disaggregation/decode_schedule_batch_mixin.py +25 -16
- sglang/srt/disaggregation/fake/conn.py +11 -3
- sglang/srt/disaggregation/mooncake/conn.py +203 -555
- sglang/srt/disaggregation/nixl/conn.py +217 -63
- sglang/srt/disaggregation/prefill.py +113 -270
- sglang/srt/disaggregation/utils.py +36 -5
- sglang/srt/distributed/device_communicators/all_reduce_utils.py +16 -0
- sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
- sglang/srt/distributed/device_communicators/pynccl.py +24 -12
- sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
- sglang/srt/distributed/device_communicators/shm_broadcast.py +4 -2
- sglang/srt/distributed/device_communicators/symm_mem.py +164 -0
- sglang/srt/distributed/naive_distributed.py +5 -4
- sglang/srt/distributed/parallel_state.py +203 -97
- sglang/srt/elastic_ep/elastic_ep.py +74 -0
- sglang/srt/entrypoints/context.py +3 -2
- sglang/srt/entrypoints/engine.py +85 -65
- sglang/srt/entrypoints/grpc_server.py +632 -305
- sglang/srt/entrypoints/harmony_utils.py +2 -2
- sglang/srt/entrypoints/http_server.py +169 -17
- sglang/srt/entrypoints/http_server_engine.py +1 -7
- sglang/srt/entrypoints/openai/protocol.py +327 -34
- sglang/srt/entrypoints/openai/serving_base.py +74 -8
- sglang/srt/entrypoints/openai/serving_chat.py +202 -118
- sglang/srt/entrypoints/openai/serving_classify.py +204 -0
- sglang/srt/entrypoints/openai/serving_completions.py +20 -4
- sglang/srt/entrypoints/openai/serving_embedding.py +1 -0
- sglang/srt/entrypoints/openai/serving_responses.py +47 -2
- sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
- sglang/srt/environ.py +323 -0
- sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
- sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
- sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
- sglang/srt/eplb/expert_distribution.py +3 -4
- sglang/srt/eplb/expert_location.py +30 -5
- sglang/srt/eplb/expert_location_dispatch.py +2 -2
- sglang/srt/eplb/expert_location_updater.py +2 -2
- sglang/srt/function_call/base_format_detector.py +17 -18
- sglang/srt/function_call/function_call_parser.py +21 -16
- sglang/srt/function_call/glm4_moe_detector.py +4 -8
- sglang/srt/function_call/gpt_oss_detector.py +24 -1
- sglang/srt/function_call/json_array_parser.py +61 -0
- sglang/srt/function_call/kimik2_detector.py +17 -4
- sglang/srt/function_call/utils.py +98 -7
- sglang/srt/grpc/compile_proto.py +245 -0
- sglang/srt/grpc/grpc_request_manager.py +915 -0
- sglang/srt/grpc/health_servicer.py +189 -0
- sglang/srt/grpc/scheduler_launcher.py +181 -0
- sglang/srt/grpc/sglang_scheduler_pb2.py +81 -68
- sglang/srt/grpc/sglang_scheduler_pb2.pyi +124 -61
- sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +92 -1
- sglang/srt/layers/activation.py +11 -7
- sglang/srt/layers/attention/aiter_backend.py +17 -18
- sglang/srt/layers/attention/ascend_backend.py +125 -10
- sglang/srt/layers/attention/attention_registry.py +226 -0
- sglang/srt/layers/attention/base_attn_backend.py +32 -4
- sglang/srt/layers/attention/cutlass_mla_backend.py +3 -3
- sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
- sglang/srt/layers/attention/dual_chunk_flashattention_backend.py +1 -1
- sglang/srt/layers/attention/fla/chunk.py +0 -1
- sglang/srt/layers/attention/fla/chunk_o.py +1 -1
- sglang/srt/layers/attention/fla/chunk_scaled_dot_kkt.py +2 -2
- sglang/srt/layers/attention/fla/fused_recurrent.py +4 -4
- sglang/srt/layers/attention/fla/fused_sigmoid_gating_recurrent.py +2 -2
- sglang/srt/layers/attention/fla/index.py +0 -2
- sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
- sglang/srt/layers/attention/fla/utils.py +0 -3
- sglang/srt/layers/attention/fla/wy_fast.py +0 -2
- sglang/srt/layers/attention/flashattention_backend.py +52 -15
- sglang/srt/layers/attention/flashinfer_backend.py +357 -212
- sglang/srt/layers/attention/flashinfer_mla_backend.py +31 -33
- sglang/srt/layers/attention/flashmla_backend.py +9 -7
- sglang/srt/layers/attention/hybrid_attn_backend.py +12 -4
- sglang/srt/layers/attention/hybrid_linear_attn_backend.py +236 -133
- sglang/srt/layers/attention/intel_amx_backend.py +1 -1
- sglang/srt/layers/attention/mamba/causal_conv1d.py +2 -1
- sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +24 -103
- sglang/srt/layers/attention/mamba/mamba.py +514 -1
- sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
- sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
- sglang/srt/layers/attention/mamba/ops/__init__.py +2 -0
- sglang/srt/layers/attention/mamba/ops/layernorm_gated.py +172 -0
- sglang/srt/layers/attention/mamba/ops/mamba_ssm.py +442 -0
- sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +214 -0
- sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +562 -0
- sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +646 -0
- sglang/srt/layers/attention/mamba/ops/ssd_combined.py +261 -0
- sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +264 -0
- sglang/srt/layers/attention/npu_ops/mla_preprocess.py +393 -0
- sglang/srt/layers/attention/nsa/dequant_k_cache.py +163 -0
- sglang/srt/layers/attention/nsa/index_buf_accessor.py +354 -0
- sglang/srt/layers/attention/nsa/nsa_indexer.py +718 -0
- sglang/srt/layers/attention/nsa/quant_k_cache.py +255 -0
- sglang/srt/layers/attention/nsa/tilelang_kernel.py +785 -0
- sglang/srt/layers/attention/nsa/transform_index.py +144 -0
- sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
- sglang/srt/layers/attention/nsa/utils.py +23 -0
- sglang/srt/layers/attention/nsa_backend.py +1201 -0
- sglang/srt/layers/attention/tbo_backend.py +6 -6
- sglang/srt/layers/attention/torch_flex_backend.py +325 -0
- sglang/srt/layers/attention/triton_backend.py +249 -42
- sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
- sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
- sglang/srt/layers/attention/trtllm_mha_backend.py +7 -9
- sglang/srt/layers/attention/trtllm_mla_backend.py +523 -48
- sglang/srt/layers/attention/utils.py +11 -7
- sglang/srt/layers/attention/vision.py +61 -3
- sglang/srt/layers/attention/wave_backend.py +4 -4
- sglang/srt/layers/attention/xpu_backend.py +1028 -0
- sglang/srt/layers/communicator.py +19 -7
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
- sglang/srt/layers/deep_gemm_wrapper/configurer.py +25 -0
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
- sglang/srt/layers/dp_attention.py +28 -1
- sglang/srt/layers/elementwise.py +3 -1
- sglang/srt/layers/layernorm.py +47 -15
- sglang/srt/layers/linear.py +30 -5
- sglang/srt/layers/logits_processor.py +161 -18
- sglang/srt/layers/modelopt_utils.py +11 -0
- sglang/srt/layers/moe/cutlass_moe.py +0 -2
- sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
- sglang/srt/layers/moe/ep_moe/kernels.py +36 -458
- sglang/srt/layers/moe/ep_moe/layer.py +243 -448
- sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +52 -25
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +17 -5
- sglang/srt/layers/moe/fused_moe_triton/layer.py +86 -81
- sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
- sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
- sglang/srt/layers/moe/moe_runner/runner.py +3 -0
- sglang/srt/layers/moe/moe_runner/triton.py +3 -1
- sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
- sglang/srt/layers/moe/router.py +51 -15
- sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
- sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
- sglang/srt/layers/moe/token_dispatcher/deepep.py +177 -106
- sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
- sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
- sglang/srt/layers/moe/topk.py +3 -2
- sglang/srt/layers/moe/utils.py +27 -1
- sglang/srt/layers/parameter.py +23 -6
- sglang/srt/layers/quantization/__init__.py +2 -53
- sglang/srt/layers/quantization/awq.py +183 -6
- sglang/srt/layers/quantization/awq_triton.py +29 -0
- sglang/srt/layers/quantization/base_config.py +20 -1
- sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +21 -49
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
- sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +5 -0
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +173 -0
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
- sglang/srt/layers/quantization/fp8.py +86 -20
- sglang/srt/layers/quantization/fp8_kernel.py +55 -10
- sglang/srt/layers/quantization/fp8_utils.py +43 -15
- sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
- sglang/srt/layers/quantization/gptq.py +0 -1
- sglang/srt/layers/quantization/int8_kernel.py +18 -2
- sglang/srt/layers/quantization/marlin_utils.py +12 -0
- sglang/srt/layers/quantization/modelopt_quant.py +141 -81
- sglang/srt/layers/quantization/mxfp4.py +17 -34
- sglang/srt/layers/quantization/petit.py +1 -1
- sglang/srt/layers/quantization/quark/quark.py +3 -1
- sglang/srt/layers/quantization/quark/quark_moe.py +18 -5
- sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
- sglang/srt/layers/quantization/unquant.py +1 -4
- sglang/srt/layers/quantization/utils.py +0 -1
- sglang/srt/layers/quantization/w4afp8.py +51 -24
- sglang/srt/layers/quantization/w8a8_int8.py +45 -27
- sglang/srt/layers/radix_attention.py +59 -9
- sglang/srt/layers/rotary_embedding.py +750 -46
- sglang/srt/layers/sampler.py +84 -16
- sglang/srt/layers/sparse_pooler.py +98 -0
- sglang/srt/layers/utils.py +23 -1
- sglang/srt/layers/vocab_parallel_embedding.py +4 -1
- sglang/srt/lora/backend/base_backend.py +3 -3
- sglang/srt/lora/backend/chunked_backend.py +348 -0
- sglang/srt/lora/backend/triton_backend.py +9 -4
- sglang/srt/lora/eviction_policy.py +139 -0
- sglang/srt/lora/lora.py +7 -5
- sglang/srt/lora/lora_manager.py +33 -7
- sglang/srt/lora/lora_registry.py +1 -1
- sglang/srt/lora/mem_pool.py +41 -17
- sglang/srt/lora/triton_ops/__init__.py +4 -0
- sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +214 -0
- sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +176 -0
- sglang/srt/lora/utils.py +7 -5
- sglang/srt/managers/cache_controller.py +83 -152
- sglang/srt/managers/data_parallel_controller.py +156 -87
- sglang/srt/managers/detokenizer_manager.py +51 -24
- sglang/srt/managers/io_struct.py +223 -129
- sglang/srt/managers/mm_utils.py +49 -10
- sglang/srt/managers/multi_tokenizer_mixin.py +83 -98
- sglang/srt/managers/multimodal_processor.py +1 -2
- sglang/srt/managers/overlap_utils.py +130 -0
- sglang/srt/managers/schedule_batch.py +340 -529
- sglang/srt/managers/schedule_policy.py +158 -18
- sglang/srt/managers/scheduler.py +665 -620
- sglang/srt/managers/scheduler_input_blocker.py +1 -1
- sglang/srt/managers/scheduler_metrics_mixin.py +150 -131
- sglang/srt/managers/scheduler_output_processor_mixin.py +337 -122
- sglang/srt/managers/scheduler_pp_mixin.py +341 -0
- sglang/srt/managers/scheduler_profiler_mixin.py +62 -15
- sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
- sglang/srt/managers/scheduler_update_weights_mixin.py +40 -14
- sglang/srt/managers/tokenizer_communicator_mixin.py +141 -19
- sglang/srt/managers/tokenizer_manager.py +462 -226
- sglang/srt/managers/tp_worker.py +217 -156
- sglang/srt/managers/utils.py +79 -47
- sglang/srt/mem_cache/allocator.py +21 -22
- sglang/srt/mem_cache/allocator_ascend.py +42 -28
- sglang/srt/mem_cache/base_prefix_cache.py +3 -3
- sglang/srt/mem_cache/chunk_cache.py +20 -2
- sglang/srt/mem_cache/common.py +480 -0
- sglang/srt/mem_cache/evict_policy.py +38 -0
- sglang/srt/mem_cache/hicache_storage.py +44 -2
- sglang/srt/mem_cache/hiradix_cache.py +134 -34
- sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
- sglang/srt/mem_cache/memory_pool.py +602 -208
- sglang/srt/mem_cache/memory_pool_host.py +134 -183
- sglang/srt/mem_cache/multimodal_cache.py +0 -1
- sglang/srt/mem_cache/radix_cache.py +263 -78
- sglang/srt/mem_cache/radix_cache_cpp.py +29 -21
- sglang/srt/mem_cache/storage/__init__.py +10 -0
- sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +157 -0
- sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +97 -0
- sglang/srt/mem_cache/storage/backend_factory.py +223 -0
- sglang/srt/mem_cache/storage/eic/eic_storage.py +777 -0
- sglang/srt/mem_cache/storage/eic/test_unit.py +115 -0
- sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
- sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +180 -59
- sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +15 -9
- sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +217 -26
- sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
- sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
- sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
- sglang/srt/mem_cache/swa_radix_cache.py +115 -58
- sglang/srt/metrics/collector.py +113 -120
- sglang/srt/metrics/func_timer.py +3 -8
- sglang/srt/metrics/utils.py +8 -1
- sglang/srt/model_executor/cpu_graph_runner.py +2 -2
- sglang/srt/model_executor/cuda_graph_runner.py +81 -36
- sglang/srt/model_executor/forward_batch_info.py +40 -50
- sglang/srt/model_executor/model_runner.py +507 -319
- sglang/srt/model_executor/npu_graph_runner.py +11 -5
- sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
- sglang/srt/model_loader/__init__.py +1 -1
- sglang/srt/model_loader/loader.py +438 -37
- sglang/srt/model_loader/utils.py +0 -1
- sglang/srt/model_loader/weight_utils.py +200 -27
- sglang/srt/models/apertus.py +2 -3
- sglang/srt/models/arcee.py +2 -2
- sglang/srt/models/bailing_moe.py +40 -56
- sglang/srt/models/bailing_moe_nextn.py +3 -4
- sglang/srt/models/bert.py +1 -1
- sglang/srt/models/deepseek_nextn.py +25 -4
- sglang/srt/models/deepseek_ocr.py +1516 -0
- sglang/srt/models/deepseek_v2.py +793 -235
- sglang/srt/models/dots_ocr.py +171 -0
- sglang/srt/models/dots_vlm.py +0 -1
- sglang/srt/models/dots_vlm_vit.py +1 -1
- sglang/srt/models/falcon_h1.py +570 -0
- sglang/srt/models/gemma3_causal.py +0 -2
- sglang/srt/models/gemma3_mm.py +17 -1
- sglang/srt/models/gemma3n_mm.py +2 -3
- sglang/srt/models/glm4_moe.py +17 -40
- sglang/srt/models/glm4_moe_nextn.py +4 -4
- sglang/srt/models/glm4v.py +3 -2
- sglang/srt/models/glm4v_moe.py +6 -6
- sglang/srt/models/gpt_oss.py +12 -35
- sglang/srt/models/grok.py +10 -23
- sglang/srt/models/hunyuan.py +2 -7
- sglang/srt/models/interns1.py +0 -1
- sglang/srt/models/kimi_vl.py +1 -7
- sglang/srt/models/kimi_vl_moonvit.py +4 -2
- sglang/srt/models/llama.py +6 -2
- sglang/srt/models/llama_eagle3.py +1 -1
- sglang/srt/models/longcat_flash.py +6 -23
- sglang/srt/models/longcat_flash_nextn.py +4 -15
- sglang/srt/models/mimo.py +2 -13
- sglang/srt/models/mimo_mtp.py +1 -2
- sglang/srt/models/minicpmo.py +7 -5
- sglang/srt/models/mixtral.py +1 -4
- sglang/srt/models/mllama.py +1 -1
- sglang/srt/models/mllama4.py +27 -6
- sglang/srt/models/nemotron_h.py +511 -0
- sglang/srt/models/olmo2.py +31 -4
- sglang/srt/models/opt.py +5 -5
- sglang/srt/models/phi.py +1 -1
- sglang/srt/models/phi4mm.py +1 -1
- sglang/srt/models/phimoe.py +0 -1
- sglang/srt/models/pixtral.py +0 -3
- sglang/srt/models/points_v15_chat.py +186 -0
- sglang/srt/models/qwen.py +0 -1
- sglang/srt/models/qwen2.py +0 -7
- sglang/srt/models/qwen2_5_vl.py +5 -5
- sglang/srt/models/qwen2_audio.py +2 -15
- sglang/srt/models/qwen2_moe.py +70 -4
- sglang/srt/models/qwen2_vl.py +6 -3
- sglang/srt/models/qwen3.py +18 -3
- sglang/srt/models/qwen3_moe.py +50 -38
- sglang/srt/models/qwen3_next.py +43 -21
- sglang/srt/models/qwen3_next_mtp.py +3 -4
- sglang/srt/models/qwen3_omni_moe.py +661 -0
- sglang/srt/models/qwen3_vl.py +791 -0
- sglang/srt/models/qwen3_vl_moe.py +343 -0
- sglang/srt/models/registry.py +15 -3
- sglang/srt/models/roberta.py +55 -3
- sglang/srt/models/sarashina2_vision.py +268 -0
- sglang/srt/models/solar.py +505 -0
- sglang/srt/models/starcoder2.py +357 -0
- sglang/srt/models/step3_vl.py +3 -5
- sglang/srt/models/torch_native_llama.py +9 -2
- sglang/srt/models/utils.py +61 -0
- sglang/srt/multimodal/processors/base_processor.py +21 -9
- sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
- sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
- sglang/srt/multimodal/processors/dots_vlm.py +2 -4
- sglang/srt/multimodal/processors/glm4v.py +1 -5
- sglang/srt/multimodal/processors/internvl.py +20 -10
- sglang/srt/multimodal/processors/janus_pro.py +0 -1
- sglang/srt/multimodal/processors/mllama4.py +0 -8
- sglang/srt/multimodal/processors/phi4mm.py +0 -1
- sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
- sglang/srt/multimodal/processors/qwen_vl.py +83 -17
- sglang/srt/multimodal/processors/sarashina2_vision.py +81 -0
- sglang/srt/multimodal/processors/step3_vl.py +1 -1
- sglang/srt/parser/conversation.py +41 -0
- sglang/srt/parser/jinja_template_utils.py +6 -0
- sglang/srt/parser/reasoning_parser.py +0 -1
- sglang/srt/sampling/custom_logit_processor.py +77 -2
- sglang/srt/sampling/sampling_batch_info.py +36 -23
- sglang/srt/sampling/sampling_params.py +75 -0
- sglang/srt/server_args.py +1300 -338
- sglang/srt/server_args_config_parser.py +146 -0
- sglang/srt/single_batch_overlap.py +161 -0
- sglang/srt/speculative/base_spec_worker.py +34 -0
- sglang/srt/speculative/cpp_ngram/ngram.cpp +374 -0
- sglang/srt/speculative/cpp_ngram/ngram.h +110 -0
- sglang/srt/speculative/cpp_ngram/ngram_cache.py +138 -0
- sglang/srt/speculative/cpp_ngram/ngram_cache_binding.cpp +43 -0
- sglang/srt/speculative/cpp_ngram/param.h +125 -0
- sglang/srt/speculative/cpp_ngram/queue.h +71 -0
- sglang/srt/speculative/draft_utils.py +226 -0
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +26 -8
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +26 -3
- sglang/srt/speculative/eagle_info.py +786 -0
- sglang/srt/speculative/eagle_info_v2.py +458 -0
- sglang/srt/speculative/eagle_utils.py +113 -1270
- sglang/srt/speculative/eagle_worker.py +120 -285
- sglang/srt/speculative/eagle_worker_v2.py +702 -0
- sglang/srt/speculative/ngram_info.py +433 -0
- sglang/srt/speculative/ngram_worker.py +246 -0
- sglang/srt/speculative/spec_info.py +49 -0
- sglang/srt/speculative/spec_utils.py +641 -0
- sglang/srt/speculative/standalone_worker.py +4 -14
- sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
- sglang/srt/tracing/trace.py +32 -6
- sglang/srt/two_batch_overlap.py +35 -18
- sglang/srt/utils/__init__.py +2 -0
- sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
- sglang/srt/{utils.py → utils/common.py} +583 -113
- sglang/srt/{hf_transformers_utils.py → utils/hf_transformers_utils.py} +86 -19
- sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
- sglang/srt/{offloader.py → utils/offloader.py} +4 -4
- sglang/srt/{patch_torch.py → utils/patch_torch.py} +8 -0
- sglang/srt/utils/profile_merger.py +199 -0
- sglang/srt/utils/rpd_utils.py +452 -0
- sglang/srt/utils/slow_rank_detector.py +71 -0
- sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +5 -7
- sglang/srt/warmup.py +8 -4
- sglang/srt/weight_sync/utils.py +1 -1
- sglang/test/attention/test_flashattn_backend.py +1 -1
- sglang/test/attention/test_flashattn_mla_backend.py +0 -1
- sglang/test/attention/test_prefix_chunk_info.py +0 -2
- sglang/test/attention/test_trtllm_mla_backend.py +221 -53
- sglang/test/few_shot_gsm8k_engine.py +2 -4
- sglang/test/get_logits_ut.py +57 -0
- sglang/test/kit_matched_stop.py +157 -0
- sglang/test/longbench_v2/__init__.py +1 -0
- sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
- sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
- sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
- sglang/test/run_eval.py +120 -11
- sglang/test/runners.py +3 -1
- sglang/test/send_one.py +42 -7
- sglang/test/simple_eval_common.py +8 -2
- sglang/test/simple_eval_gpqa.py +0 -1
- sglang/test/simple_eval_humaneval.py +0 -3
- sglang/test/simple_eval_longbench_v2.py +344 -0
- sglang/test/simple_eval_mmmu_vlm.py +441 -0
- sglang/test/test_block_fp8.py +3 -4
- sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
- sglang/test/test_cutlass_moe.py +1 -2
- sglang/test/test_cutlass_w4a8_moe.py +10 -20
- sglang/test/test_deterministic.py +430 -0
- sglang/test/test_deterministic_utils.py +73 -0
- sglang/test/test_disaggregation_utils.py +93 -1
- sglang/test/test_marlin_moe.py +0 -1
- sglang/test/test_programs.py +1 -1
- sglang/test/test_utils.py +432 -16
- sglang/utils.py +10 -1
- sglang/version.py +1 -1
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/METADATA +64 -43
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/RECORD +476 -346
- sglang/srt/entrypoints/grpc_request_manager.py +0 -580
- sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +0 -32
- sglang/srt/managers/tp_worker_overlap_thread.py +0 -319
- sglang/srt/mem_cache/lora_radix_cache.py +0 -421
- sglang/srt/speculative/build_eagle_tree.py +0 -427
- sglang/test/test_block_fp8_ep.py +0 -358
- /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
- /sglang/srt/{remote_instance_weight_loader_utils.py → model_loader/remote_instance_weight_loader_utils.py} +0 -0
- /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
- /sglang/srt/{poll_based_barrier.py → utils/poll_based_barrier.py} +0 -0
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
# Adapted from https://github.com/vllm-project/vllm/blob/v0.10.0/vllm/compilation/compiler_interface.py
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import copy
|
|
5
|
+
import hashlib
|
|
6
|
+
import os
|
|
7
|
+
from contextlib import ExitStack
|
|
8
|
+
from typing import Any, Callable, Optional
|
|
9
|
+
from unittest.mock import patch
|
|
10
|
+
|
|
11
|
+
import torch
|
|
12
|
+
import torch._inductor.compile_fx
|
|
13
|
+
import torch.fx as fx
|
|
14
|
+
|
|
15
|
+
from sglang.srt.compilation.compilation_counter import compilation_counter
|
|
16
|
+
from sglang.srt.compilation.inductor_pass import pass_context
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class CompilerInterface:
|
|
20
|
+
"""
|
|
21
|
+
The interface for a compiler that can be used by vLLM.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
# The name of the compiler, e.g. inductor.
|
|
25
|
+
# This is a class-level attribute.
|
|
26
|
+
name: str
|
|
27
|
+
|
|
28
|
+
def initialize_cache(
|
|
29
|
+
self, cache_dir: str, disable_cache: bool = False, prefix: str = ""
|
|
30
|
+
):
|
|
31
|
+
"""
|
|
32
|
+
when the vLLM process uses `cache_dir` as the cache directory,
|
|
33
|
+
the compiler should initialize itself with the cache directory,
|
|
34
|
+
e.g. by re-directing its own cache directory to a sub-directory.
|
|
35
|
+
|
|
36
|
+
prefix can be used in combination with cache_dir to figure out the base
|
|
37
|
+
cache directory, e.g. there're multiple parts of model being compiled,
|
|
38
|
+
but we want to share the same cache directory for all of them.
|
|
39
|
+
|
|
40
|
+
e.g.
|
|
41
|
+
cache_dir = "/path/to/dir/backbone", prefix = "backbone"
|
|
42
|
+
cache_dir = "/path/to/dir/eagle_head", prefix = "eagle_head"
|
|
43
|
+
"""
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
def compute_hash(self) -> str:
|
|
47
|
+
"""
|
|
48
|
+
Gather all the relevant information from the vLLM config,
|
|
49
|
+
to compute a hash so that we can cache the compiled model.
|
|
50
|
+
|
|
51
|
+
See [`VllmConfig.compute_hash`][vllm.config.VllmConfig.compute_hash]
|
|
52
|
+
to check what information
|
|
53
|
+
is already considered by default. This function should only
|
|
54
|
+
consider the information that is specific to the compiler.
|
|
55
|
+
"""
|
|
56
|
+
return ""
|
|
57
|
+
|
|
58
|
+
def compile(
|
|
59
|
+
self,
|
|
60
|
+
graph: fx.GraphModule,
|
|
61
|
+
example_inputs: list[Any],
|
|
62
|
+
compiler_config: dict[str, Any],
|
|
63
|
+
runtime_shape: Optional[int] = None,
|
|
64
|
+
key: Optional[str] = None,
|
|
65
|
+
) -> tuple[Optional[Callable], Optional[Any]]:
|
|
66
|
+
"""
|
|
67
|
+
Compile the graph with the given example inputs and compiler config,
|
|
68
|
+
with a runtime shape. If the `runtime_shape` is None, it means
|
|
69
|
+
the `example_inputs` have a dynamic shape. Otherwise, the
|
|
70
|
+
`runtime_shape` specifies the shape of the inputs. Right now we only
|
|
71
|
+
support one variable shape for all inputs, which is the batchsize
|
|
72
|
+
(number of tokens) during inference.
|
|
73
|
+
|
|
74
|
+
Dynamo will make sure `graph(*example_inputs)` is valid.
|
|
75
|
+
|
|
76
|
+
The function should return a compiled callable function, as well as
|
|
77
|
+
a handle that can be used to directly load the compiled function.
|
|
78
|
+
|
|
79
|
+
The handle should be a plain Python object, preferably a string or a
|
|
80
|
+
file path for readability.
|
|
81
|
+
|
|
82
|
+
If the compiler doesn't support caching, it should return None for the
|
|
83
|
+
handle. If the compiler fails to compile the graph, it should return
|
|
84
|
+
None for the compiled function as well.
|
|
85
|
+
|
|
86
|
+
`key` is required for StandaloneInductorAdapter, it specifies where to
|
|
87
|
+
save the compiled artifact. The compiled artifact gets saved to
|
|
88
|
+
`cache_dir/key`.
|
|
89
|
+
"""
|
|
90
|
+
return None, None
|
|
91
|
+
|
|
92
|
+
def load(
|
|
93
|
+
self,
|
|
94
|
+
handle: Any,
|
|
95
|
+
graph: fx.GraphModule,
|
|
96
|
+
example_inputs: list[Any],
|
|
97
|
+
graph_index: int,
|
|
98
|
+
runtime_shape: Optional[int] = None,
|
|
99
|
+
) -> Callable:
|
|
100
|
+
"""
|
|
101
|
+
Load the compiled function from the handle.
|
|
102
|
+
Raises an error if the handle is invalid.
|
|
103
|
+
|
|
104
|
+
The handle is the second return value of the `compile` function.
|
|
105
|
+
"""
|
|
106
|
+
raise NotImplementedError("caching is not supported")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def get_inductor_factors() -> list[Any]:
|
|
110
|
+
factors: list[Any] = []
|
|
111
|
+
# summarize system state
|
|
112
|
+
from torch._inductor.codecache import CacheBase
|
|
113
|
+
|
|
114
|
+
system_factors = CacheBase.get_system()
|
|
115
|
+
factors.append(system_factors)
|
|
116
|
+
|
|
117
|
+
# summarize pytorch state
|
|
118
|
+
from torch._inductor.codecache import torch_key
|
|
119
|
+
|
|
120
|
+
torch_factors = torch_key()
|
|
121
|
+
factors.append(torch_factors)
|
|
122
|
+
return factors
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class AlwaysHitShapeEnv:
|
|
126
|
+
"""
|
|
127
|
+
Why do we need this class:
|
|
128
|
+
|
|
129
|
+
For normal `torch.compile` usage, every compilation will have
|
|
130
|
+
one Dynamo bytecode compilation and one Inductor compilation.
|
|
131
|
+
The Inductor compilation happens under the context of the
|
|
132
|
+
Dynamo bytecode compilation, and that context is used to
|
|
133
|
+
determine the dynamic shape information, etc.
|
|
134
|
+
|
|
135
|
+
For our use case, we only run Dynamo bytecode compilation once,
|
|
136
|
+
and run Inductor compilation multiple times with different shapes
|
|
137
|
+
plus a general shape. The compilation for specific shapes happens
|
|
138
|
+
outside of the context of the Dynamo bytecode compilation. At that
|
|
139
|
+
time, we don't have shape environment to provide to Inductor, and
|
|
140
|
+
it will fail the Inductor code cache lookup.
|
|
141
|
+
|
|
142
|
+
By providing a dummy shape environment that always hits, we can
|
|
143
|
+
make the Inductor code cache lookup always hit, and we can
|
|
144
|
+
compile the graph for different shapes as needed.
|
|
145
|
+
|
|
146
|
+
The following dummy methods are obtained by trial-and-error
|
|
147
|
+
until it works.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
def __init__(self) -> None:
|
|
151
|
+
self.guards: list[Any] = []
|
|
152
|
+
|
|
153
|
+
def evaluate_guards_expression(self, *args, **kwargs):
|
|
154
|
+
return True
|
|
155
|
+
|
|
156
|
+
def get_pruned_guards(self, *args, **kwargs):
|
|
157
|
+
return []
|
|
158
|
+
|
|
159
|
+
def produce_guards_expression(self, *args, **kwargs):
|
|
160
|
+
return ""
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class InductorAdaptor(CompilerInterface):
|
|
164
|
+
"""
|
|
165
|
+
The adaptor for the Inductor compiler, version 2.5, 2.6, 2.7.
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
name = "inductor"
|
|
169
|
+
|
|
170
|
+
def compute_hash(self) -> str:
|
|
171
|
+
factors = get_inductor_factors()
|
|
172
|
+
hash_str = hashlib.md5(
|
|
173
|
+
str(factors).encode(), usedforsecurity=False
|
|
174
|
+
).hexdigest()[:10]
|
|
175
|
+
return hash_str
|
|
176
|
+
|
|
177
|
+
def initialize_cache(
|
|
178
|
+
self, cache_dir: str, disable_cache: bool = False, prefix: str = ""
|
|
179
|
+
):
|
|
180
|
+
self.cache_dir = cache_dir
|
|
181
|
+
self.prefix = prefix
|
|
182
|
+
self.base_cache_dir = cache_dir[: -len(prefix)] if prefix else cache_dir
|
|
183
|
+
if disable_cache:
|
|
184
|
+
return
|
|
185
|
+
# redirect the cache directory to a sub-directory
|
|
186
|
+
# set flags so that Inductor and Triton store their cache
|
|
187
|
+
# in the cache_dir, then users only need to copy the cache_dir
|
|
188
|
+
# to another machine to reuse the cache.
|
|
189
|
+
inductor_cache = os.path.join(self.base_cache_dir, "inductor_cache")
|
|
190
|
+
os.makedirs(inductor_cache, exist_ok=True)
|
|
191
|
+
os.environ["TORCHINDUCTOR_CACHE_DIR"] = inductor_cache
|
|
192
|
+
triton_cache = os.path.join(self.base_cache_dir, "triton_cache")
|
|
193
|
+
os.makedirs(triton_cache, exist_ok=True)
|
|
194
|
+
os.environ["TRITON_CACHE_DIR"] = triton_cache
|
|
195
|
+
|
|
196
|
+
def compile(
|
|
197
|
+
self,
|
|
198
|
+
graph: fx.GraphModule,
|
|
199
|
+
example_inputs: list[Any],
|
|
200
|
+
compiler_config: dict[str, Any],
|
|
201
|
+
runtime_shape: Optional[int] = None,
|
|
202
|
+
key: Optional[str] = None,
|
|
203
|
+
) -> tuple[Optional[Callable], Optional[Any]]:
|
|
204
|
+
compilation_counter.num_inductor_compiles += 1
|
|
205
|
+
from torch._inductor.compile_fx import compile_fx
|
|
206
|
+
|
|
207
|
+
current_config = {}
|
|
208
|
+
if compiler_config is not None:
|
|
209
|
+
current_config.update(compiler_config)
|
|
210
|
+
|
|
211
|
+
# disable remote cache
|
|
212
|
+
current_config["fx_graph_cache"] = True
|
|
213
|
+
current_config["fx_graph_remote_cache"] = False
|
|
214
|
+
|
|
215
|
+
set_inductor_config(current_config, runtime_shape)
|
|
216
|
+
|
|
217
|
+
# inductor can inplace modify the graph, so we need to copy it
|
|
218
|
+
# see https://github.com/pytorch/pytorch/issues/138980
|
|
219
|
+
graph = copy.deepcopy(graph)
|
|
220
|
+
|
|
221
|
+
# it's the first time we compile this graph
|
|
222
|
+
# the assumption is that we don't have nested Inductor compilation.
|
|
223
|
+
# compiled_fx_graph_hash will only be called once, and we can hook
|
|
224
|
+
# it to get the hash of the compiled graph directly.
|
|
225
|
+
|
|
226
|
+
hash_str, file_path = None, None
|
|
227
|
+
from torch._inductor.codecache import FxGraphCache, compiled_fx_graph_hash
|
|
228
|
+
|
|
229
|
+
if torch.__version__.startswith("2.5"):
|
|
230
|
+
original_load = FxGraphCache.load
|
|
231
|
+
original_load_name = "torch._inductor.codecache.FxGraphCache.load"
|
|
232
|
+
|
|
233
|
+
def hijack_load(*args, **kwargs):
|
|
234
|
+
inductor_compiled_graph = original_load(*args, **kwargs)
|
|
235
|
+
nonlocal file_path
|
|
236
|
+
compiled_fn = inductor_compiled_graph.current_callable
|
|
237
|
+
file_path = compiled_fn.__code__.co_filename # noqa
|
|
238
|
+
if not file_path.startswith(self.base_cache_dir):
|
|
239
|
+
# hooked in the align_inputs_from_check_idxs function
|
|
240
|
+
# in torch/_inductor/utils.py
|
|
241
|
+
for cell in compiled_fn.__closure__:
|
|
242
|
+
if not callable(cell.cell_contents):
|
|
243
|
+
continue
|
|
244
|
+
if cell.cell_contents.__code__.co_filename.startswith(
|
|
245
|
+
self.base_cache_dir
|
|
246
|
+
):
|
|
247
|
+
# this is the real file path compiled from Inductor
|
|
248
|
+
file_path = cell.cell_contents.__code__.co_filename
|
|
249
|
+
break
|
|
250
|
+
return inductor_compiled_graph
|
|
251
|
+
|
|
252
|
+
hijacked_compile_fx_inner = (
|
|
253
|
+
torch._inductor.compile_fx.compile_fx_inner
|
|
254
|
+
) # noqa
|
|
255
|
+
elif torch.__version__ >= "2.6":
|
|
256
|
+
# function renamed in 2.6
|
|
257
|
+
original_load_name = None
|
|
258
|
+
|
|
259
|
+
def hijacked_compile_fx_inner(*args, **kwargs):
|
|
260
|
+
output = torch._inductor.compile_fx.compile_fx_inner(*args, **kwargs)
|
|
261
|
+
nonlocal hash_str
|
|
262
|
+
inductor_compiled_graph = output
|
|
263
|
+
if inductor_compiled_graph is not None:
|
|
264
|
+
nonlocal file_path
|
|
265
|
+
compiled_fn = inductor_compiled_graph.current_callable
|
|
266
|
+
file_path = compiled_fn.__code__.co_filename # noqa
|
|
267
|
+
if not file_path.startswith(self.base_cache_dir):
|
|
268
|
+
# hooked in the align_inputs_from_check_idxs function
|
|
269
|
+
# in torch/_inductor/utils.py
|
|
270
|
+
for cell in compiled_fn.__closure__:
|
|
271
|
+
if not callable(cell.cell_contents):
|
|
272
|
+
continue
|
|
273
|
+
code = cell.cell_contents.__code__
|
|
274
|
+
if code.co_filename.startswith(self.base_cache_dir):
|
|
275
|
+
# this is the real file path
|
|
276
|
+
# compiled from Inductor
|
|
277
|
+
file_path = code.co_filename
|
|
278
|
+
break
|
|
279
|
+
hash_str = inductor_compiled_graph._fx_graph_cache_key
|
|
280
|
+
return output
|
|
281
|
+
|
|
282
|
+
def hijack_compiled_fx_graph_hash(*args, **kwargs):
|
|
283
|
+
out = compiled_fx_graph_hash(*args, **kwargs)
|
|
284
|
+
nonlocal hash_str
|
|
285
|
+
hash_str = out[0]
|
|
286
|
+
return out
|
|
287
|
+
|
|
288
|
+
def _check_can_cache(*args, **kwargs):
|
|
289
|
+
# no error means it can be cached.
|
|
290
|
+
# Inductor refuses to cache the graph outside of Dynamo
|
|
291
|
+
# tracing context, and also disables caching for graphs
|
|
292
|
+
# with high-order ops.
|
|
293
|
+
# For vLLM, in either case, we want to cache the graph.
|
|
294
|
+
# see https://github.com/pytorch/pytorch/blob/9f5ebf3fc609105a74eab4ccc24932d6353ff566/torch/_inductor/codecache.py#L1221 # noqa
|
|
295
|
+
return
|
|
296
|
+
|
|
297
|
+
def _get_shape_env() -> AlwaysHitShapeEnv:
|
|
298
|
+
return AlwaysHitShapeEnv()
|
|
299
|
+
|
|
300
|
+
with ExitStack() as stack:
|
|
301
|
+
# hijack to get the compiled graph itself
|
|
302
|
+
if original_load_name is not None:
|
|
303
|
+
stack.enter_context(patch(original_load_name, hijack_load))
|
|
304
|
+
|
|
305
|
+
# for hijacking the hash of the compiled graph
|
|
306
|
+
stack.enter_context(
|
|
307
|
+
patch(
|
|
308
|
+
"torch._inductor.codecache.compiled_fx_graph_hash",
|
|
309
|
+
hijack_compiled_fx_graph_hash,
|
|
310
|
+
)
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# for providing a dummy shape environment
|
|
314
|
+
stack.enter_context(
|
|
315
|
+
patch(
|
|
316
|
+
"torch._inductor.codecache.FxGraphCache._get_shape_env",
|
|
317
|
+
_get_shape_env,
|
|
318
|
+
)
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
from torch._functorch._aot_autograd.autograd_cache import AOTAutogradCache
|
|
322
|
+
|
|
323
|
+
# torch 2.8+ on main uses _get_shape_env in AOTAutogradCache
|
|
324
|
+
if hasattr(AOTAutogradCache, "_get_shape_env"):
|
|
325
|
+
stack.enter_context(
|
|
326
|
+
patch(
|
|
327
|
+
"torch._functorch._aot_autograd.autograd_cache.AOTAutogradCache._get_shape_env",
|
|
328
|
+
_get_shape_env,
|
|
329
|
+
)
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# for forcing the graph to be cached
|
|
333
|
+
stack.enter_context(
|
|
334
|
+
patch(
|
|
335
|
+
"torch._inductor.codecache.FxGraphCache._check_can_cache",
|
|
336
|
+
_check_can_cache,
|
|
337
|
+
)
|
|
338
|
+
)
|
|
339
|
+
|
|
340
|
+
# Dynamo metrics context, see method for more details.
|
|
341
|
+
stack.enter_context(self.metrics_context())
|
|
342
|
+
|
|
343
|
+
# Disable remote caching. When these are on, on remote cache-hit,
|
|
344
|
+
# the monkey-patched functions never actually get called.
|
|
345
|
+
# vLLM today assumes and requires the monkey-patched functions to
|
|
346
|
+
# get hit.
|
|
347
|
+
# TODO(zou3519): we're going to replace this all with
|
|
348
|
+
# standalone_compile sometime.
|
|
349
|
+
|
|
350
|
+
stack.enter_context(
|
|
351
|
+
torch._inductor.config.patch(fx_graph_remote_cache=False)
|
|
352
|
+
)
|
|
353
|
+
# InductorAdaptor (unfortunately) requires AOTAutogradCache
|
|
354
|
+
# to be turned off to run. It will fail to acquire the hash_str
|
|
355
|
+
# and error if not.
|
|
356
|
+
# StandaloneInductorAdaptor (PyTorch 2.8+) fixes this problem.
|
|
357
|
+
stack.enter_context(
|
|
358
|
+
torch._functorch.config.patch(enable_autograd_cache=False)
|
|
359
|
+
)
|
|
360
|
+
stack.enter_context(
|
|
361
|
+
torch._functorch.config.patch(enable_remote_autograd_cache=False)
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
with pass_context(runtime_shape):
|
|
365
|
+
compiled_graph = compile_fx(
|
|
366
|
+
graph,
|
|
367
|
+
example_inputs,
|
|
368
|
+
inner_compile=hijacked_compile_fx_inner,
|
|
369
|
+
config_patches=current_config,
|
|
370
|
+
)
|
|
371
|
+
return compiled_graph, (hash_str, file_path)
|
|
372
|
+
|
|
373
|
+
def load(
|
|
374
|
+
self,
|
|
375
|
+
handle: Any,
|
|
376
|
+
graph: fx.GraphModule,
|
|
377
|
+
example_inputs: list[Any],
|
|
378
|
+
graph_index: int,
|
|
379
|
+
runtime_shape: Optional[int] = None,
|
|
380
|
+
) -> Callable:
|
|
381
|
+
assert isinstance(handle, tuple)
|
|
382
|
+
assert isinstance(handle[0], str)
|
|
383
|
+
assert isinstance(handle[1], str)
|
|
384
|
+
hash_str = handle[0]
|
|
385
|
+
|
|
386
|
+
from torch._functorch._aot_autograd.autograd_cache import AOTAutogradCache
|
|
387
|
+
from torch._inductor.codecache import FxGraphCache
|
|
388
|
+
|
|
389
|
+
with ExitStack() as exit_stack:
|
|
390
|
+
exit_stack.enter_context(
|
|
391
|
+
patch(
|
|
392
|
+
"torch._inductor.codecache.FxGraphCache._get_shape_env",
|
|
393
|
+
lambda *args, **kwargs: AlwaysHitShapeEnv(),
|
|
394
|
+
)
|
|
395
|
+
)
|
|
396
|
+
# torch 2.8+ on main uses _get_shape_env in AOTAutogradCache
|
|
397
|
+
if hasattr(AOTAutogradCache, "_get_shape_env"):
|
|
398
|
+
exit_stack.enter_context(
|
|
399
|
+
patch(
|
|
400
|
+
"torch._functorch._aot_autograd.autograd_cache.AOTAutogradCache._get_shape_env",
|
|
401
|
+
lambda *args, **kwargs: AlwaysHitShapeEnv(),
|
|
402
|
+
)
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
# Dynamo metrics context, see method for more details.
|
|
406
|
+
exit_stack.enter_context(self.metrics_context())
|
|
407
|
+
|
|
408
|
+
if torch.__version__.startswith("2.5"):
|
|
409
|
+
inductor_compiled_graph = FxGraphCache._lookup_graph(
|
|
410
|
+
hash_str, example_inputs, True, False
|
|
411
|
+
)
|
|
412
|
+
assert inductor_compiled_graph is not None, (
|
|
413
|
+
"Inductor cache lookup failed. Please remove"
|
|
414
|
+
f"the cache directory and try again." # noqa
|
|
415
|
+
)
|
|
416
|
+
elif torch.__version__ >= "2.6":
|
|
417
|
+
from torch._inductor.output_code import CompiledFxGraphConstantsWithGm
|
|
418
|
+
|
|
419
|
+
constants = CompiledFxGraphConstantsWithGm(graph)
|
|
420
|
+
inductor_compiled_graph, _ = FxGraphCache._lookup_graph(
|
|
421
|
+
hash_str, example_inputs, True, None, constants
|
|
422
|
+
)
|
|
423
|
+
assert inductor_compiled_graph is not None, (
|
|
424
|
+
"Inductor cache lookup failed. Please remove"
|
|
425
|
+
f"the cache directory and try again." # noqa
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
# Inductor calling convention (function signature):
|
|
429
|
+
# f(list) -> tuple
|
|
430
|
+
# Dynamo calling convention (function signature):
|
|
431
|
+
# f(*args) -> Any
|
|
432
|
+
|
|
433
|
+
# need to know if the graph returns a tuple
|
|
434
|
+
from torch._inductor.compile_fx import graph_returns_tuple
|
|
435
|
+
|
|
436
|
+
returns_tuple = graph_returns_tuple(graph)
|
|
437
|
+
|
|
438
|
+
# this is the callable we return to Dynamo to run
|
|
439
|
+
def compiled_graph(*args):
|
|
440
|
+
# convert args to list
|
|
441
|
+
list_args = list(args)
|
|
442
|
+
graph_output = inductor_compiled_graph(list_args)
|
|
443
|
+
# unpack the tuple if needed
|
|
444
|
+
if returns_tuple:
|
|
445
|
+
return graph_output
|
|
446
|
+
else:
|
|
447
|
+
return graph_output[0]
|
|
448
|
+
|
|
449
|
+
return compiled_graph
|
|
450
|
+
|
|
451
|
+
def metrics_context(self) -> contextlib.AbstractContextManager:
|
|
452
|
+
"""
|
|
453
|
+
This method returns the Dynamo metrics context (if it exists,
|
|
454
|
+
otherwise a null context). It is used by various compile components.
|
|
455
|
+
Present in torch>=2.6, it's used inside FxGraphCache in
|
|
456
|
+
torch==2.6 (but not after). It might also be used in various other
|
|
457
|
+
torch.compile internal functions.
|
|
458
|
+
|
|
459
|
+
Because it is re-entrant, we always set it (even if entering via Dynamo
|
|
460
|
+
and the context was already entered). We might want to revisit if it
|
|
461
|
+
should be set at a different level of compilation.
|
|
462
|
+
|
|
463
|
+
This is likely a bug in PyTorch: public APIs should not rely on
|
|
464
|
+
manually setting up internal contexts. But we also rely on non-public
|
|
465
|
+
APIs which might not provide these guarantees.
|
|
466
|
+
"""
|
|
467
|
+
import torch._dynamo.utils
|
|
468
|
+
|
|
469
|
+
return torch._dynamo.utils.get_metrics_context()
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
def set_inductor_config(config, runtime_shape):
|
|
473
|
+
if isinstance(runtime_shape, int):
|
|
474
|
+
# for a specific batchsize, tuning triton kernel parameters
|
|
475
|
+
# can be beneficial
|
|
476
|
+
config["max_autotune"] = True
|
|
477
|
+
config["coordinate_descent_tuning"] = True
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
class EagerAdapter(CompilerInterface):
|
|
481
|
+
name = "eager"
|
|
482
|
+
|
|
483
|
+
def compile(
|
|
484
|
+
self,
|
|
485
|
+
graph: fx.GraphModule,
|
|
486
|
+
example_inputs: list[Any],
|
|
487
|
+
compiler_config: dict[str, Any],
|
|
488
|
+
runtime_shape: Optional[int] = None,
|
|
489
|
+
key: Optional[str] = None,
|
|
490
|
+
num_graphs: int = 1,
|
|
491
|
+
) -> tuple[Optional[Callable], Optional[Any]]:
|
|
492
|
+
return graph, None
|
|
493
|
+
|
|
494
|
+
def load(
|
|
495
|
+
self,
|
|
496
|
+
handle: Any,
|
|
497
|
+
graph: fx.GraphModule,
|
|
498
|
+
example_inputs: list[Any],
|
|
499
|
+
graph_index: int,
|
|
500
|
+
runtime_shape: Optional[int] = None,
|
|
501
|
+
num_graphs: int = 1,
|
|
502
|
+
) -> Callable:
|
|
503
|
+
raise NotImplementedError("eager compilation is not supported")
|