sglang 0.5.3rc0__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +54 -37
 - sglang/bench_one_batch_server.py +340 -34
 - sglang/bench_serving.py +340 -159
 - sglang/check_env.py +1 -1
 - sglang/compile_deep_gemm.py +6 -2
 - sglang/global_config.py +1 -25
 - sglang/lang/api.py +6 -0
 - sglang/lang/backend/runtime_endpoint.py +1 -1
 - sglang/lang/interpreter.py +1 -0
 - sglang/lang/ir.py +13 -0
 - sglang/launch_server.py +9 -2
 - sglang/profiler.py +20 -3
 - sglang/srt/_custom_ops.py +1 -1
 - sglang/srt/batch_invariant_ops/__init__.py +27 -0
 - sglang/srt/batch_invariant_ops/batch_invariant_ops.py +547 -0
 - sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
 - sglang/srt/compilation/backend.py +437 -0
 - sglang/srt/compilation/compilation_config.py +20 -0
 - sglang/srt/compilation/compilation_counter.py +47 -0
 - sglang/srt/compilation/compile.py +210 -0
 - sglang/srt/compilation/compiler_interface.py +503 -0
 - sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
 - sglang/srt/compilation/fix_functionalization.py +134 -0
 - sglang/srt/compilation/fx_utils.py +83 -0
 - sglang/srt/compilation/inductor_pass.py +140 -0
 - sglang/srt/compilation/pass_manager.py +66 -0
 - sglang/srt/compilation/piecewise_context_manager.py +40 -0
 - sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
 - sglang/srt/configs/__init__.py +8 -0
 - sglang/srt/configs/deepseek_ocr.py +262 -0
 - sglang/srt/configs/deepseekvl2.py +194 -96
 - sglang/srt/configs/dots_ocr.py +64 -0
 - sglang/srt/configs/dots_vlm.py +2 -7
 - sglang/srt/configs/falcon_h1.py +309 -0
 - sglang/srt/configs/load_config.py +33 -2
 - sglang/srt/configs/mamba_utils.py +117 -0
 - sglang/srt/configs/model_config.py +284 -118
 - sglang/srt/configs/modelopt_config.py +30 -0
 - sglang/srt/configs/nemotron_h.py +286 -0
 - sglang/srt/configs/olmo3.py +105 -0
 - sglang/srt/configs/points_v15_chat.py +29 -0
 - sglang/srt/configs/qwen3_next.py +11 -47
 - sglang/srt/configs/qwen3_omni.py +613 -0
 - sglang/srt/configs/qwen3_vl.py +576 -0
 - sglang/srt/connector/remote_instance.py +1 -1
 - sglang/srt/constrained/base_grammar_backend.py +6 -1
 - sglang/srt/constrained/llguidance_backend.py +5 -0
 - sglang/srt/constrained/outlines_backend.py +1 -1
 - sglang/srt/constrained/outlines_jump_forward.py +1 -1
 - sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
 - sglang/srt/constrained/utils.py +12 -0
 - sglang/srt/constrained/xgrammar_backend.py +26 -15
 - sglang/srt/debug_utils/dumper.py +10 -3
 - sglang/srt/disaggregation/ascend/conn.py +2 -2
 - sglang/srt/disaggregation/ascend/transfer_engine.py +48 -10
 - sglang/srt/disaggregation/base/conn.py +17 -4
 - sglang/srt/disaggregation/common/conn.py +268 -98
 - sglang/srt/disaggregation/decode.py +172 -39
 - sglang/srt/disaggregation/decode_kvcache_offload_manager.py +185 -0
 - sglang/srt/disaggregation/decode_schedule_batch_mixin.py +25 -16
 - sglang/srt/disaggregation/fake/conn.py +11 -3
 - sglang/srt/disaggregation/mooncake/conn.py +203 -555
 - sglang/srt/disaggregation/nixl/conn.py +217 -63
 - sglang/srt/disaggregation/prefill.py +113 -270
 - sglang/srt/disaggregation/utils.py +36 -5
 - sglang/srt/distributed/device_communicators/all_reduce_utils.py +16 -0
 - sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
 - sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
 - sglang/srt/distributed/device_communicators/pynccl.py +24 -12
 - sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
 - sglang/srt/distributed/device_communicators/shm_broadcast.py +4 -2
 - sglang/srt/distributed/device_communicators/symm_mem.py +164 -0
 - sglang/srt/distributed/naive_distributed.py +5 -4
 - sglang/srt/distributed/parallel_state.py +203 -97
 - sglang/srt/elastic_ep/elastic_ep.py +74 -0
 - sglang/srt/entrypoints/context.py +3 -2
 - sglang/srt/entrypoints/engine.py +85 -65
 - sglang/srt/entrypoints/grpc_server.py +632 -305
 - sglang/srt/entrypoints/harmony_utils.py +2 -2
 - sglang/srt/entrypoints/http_server.py +169 -17
 - sglang/srt/entrypoints/http_server_engine.py +1 -7
 - sglang/srt/entrypoints/openai/protocol.py +327 -34
 - sglang/srt/entrypoints/openai/serving_base.py +74 -8
 - sglang/srt/entrypoints/openai/serving_chat.py +202 -118
 - sglang/srt/entrypoints/openai/serving_classify.py +204 -0
 - sglang/srt/entrypoints/openai/serving_completions.py +20 -4
 - sglang/srt/entrypoints/openai/serving_embedding.py +1 -0
 - sglang/srt/entrypoints/openai/serving_responses.py +47 -2
 - sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
 - sglang/srt/environ.py +323 -0
 - sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
 - sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
 - sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
 - sglang/srt/eplb/expert_distribution.py +3 -4
 - sglang/srt/eplb/expert_location.py +30 -5
 - sglang/srt/eplb/expert_location_dispatch.py +2 -2
 - sglang/srt/eplb/expert_location_updater.py +2 -2
 - sglang/srt/function_call/base_format_detector.py +17 -18
 - sglang/srt/function_call/function_call_parser.py +21 -16
 - sglang/srt/function_call/glm4_moe_detector.py +4 -8
 - sglang/srt/function_call/gpt_oss_detector.py +24 -1
 - sglang/srt/function_call/json_array_parser.py +61 -0
 - sglang/srt/function_call/kimik2_detector.py +17 -4
 - sglang/srt/function_call/utils.py +98 -7
 - sglang/srt/grpc/compile_proto.py +245 -0
 - sglang/srt/grpc/grpc_request_manager.py +915 -0
 - sglang/srt/grpc/health_servicer.py +189 -0
 - sglang/srt/grpc/scheduler_launcher.py +181 -0
 - sglang/srt/grpc/sglang_scheduler_pb2.py +81 -68
 - sglang/srt/grpc/sglang_scheduler_pb2.pyi +124 -61
 - sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +92 -1
 - sglang/srt/layers/activation.py +11 -7
 - sglang/srt/layers/attention/aiter_backend.py +17 -18
 - sglang/srt/layers/attention/ascend_backend.py +125 -10
 - sglang/srt/layers/attention/attention_registry.py +226 -0
 - sglang/srt/layers/attention/base_attn_backend.py +32 -4
 - sglang/srt/layers/attention/cutlass_mla_backend.py +3 -3
 - sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
 - sglang/srt/layers/attention/dual_chunk_flashattention_backend.py +1 -1
 - sglang/srt/layers/attention/fla/chunk.py +0 -1
 - sglang/srt/layers/attention/fla/chunk_o.py +1 -1
 - sglang/srt/layers/attention/fla/chunk_scaled_dot_kkt.py +2 -2
 - sglang/srt/layers/attention/fla/fused_recurrent.py +4 -4
 - sglang/srt/layers/attention/fla/fused_sigmoid_gating_recurrent.py +2 -2
 - sglang/srt/layers/attention/fla/index.py +0 -2
 - sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
 - sglang/srt/layers/attention/fla/utils.py +0 -3
 - sglang/srt/layers/attention/fla/wy_fast.py +0 -2
 - sglang/srt/layers/attention/flashattention_backend.py +52 -15
 - sglang/srt/layers/attention/flashinfer_backend.py +357 -212
 - sglang/srt/layers/attention/flashinfer_mla_backend.py +31 -33
 - sglang/srt/layers/attention/flashmla_backend.py +9 -7
 - sglang/srt/layers/attention/hybrid_attn_backend.py +12 -4
 - sglang/srt/layers/attention/hybrid_linear_attn_backend.py +236 -133
 - sglang/srt/layers/attention/intel_amx_backend.py +1 -1
 - sglang/srt/layers/attention/mamba/causal_conv1d.py +2 -1
 - sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +24 -103
 - sglang/srt/layers/attention/mamba/mamba.py +514 -1
 - sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
 - sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
 - sglang/srt/layers/attention/mamba/ops/__init__.py +2 -0
 - sglang/srt/layers/attention/mamba/ops/layernorm_gated.py +172 -0
 - sglang/srt/layers/attention/mamba/ops/mamba_ssm.py +442 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +214 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +562 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +646 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_combined.py +261 -0
 - sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +264 -0
 - sglang/srt/layers/attention/npu_ops/mla_preprocess.py +393 -0
 - sglang/srt/layers/attention/nsa/dequant_k_cache.py +163 -0
 - sglang/srt/layers/attention/nsa/index_buf_accessor.py +354 -0
 - sglang/srt/layers/attention/nsa/nsa_indexer.py +718 -0
 - sglang/srt/layers/attention/nsa/quant_k_cache.py +255 -0
 - sglang/srt/layers/attention/nsa/tilelang_kernel.py +785 -0
 - sglang/srt/layers/attention/nsa/transform_index.py +144 -0
 - sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
 - sglang/srt/layers/attention/nsa/utils.py +23 -0
 - sglang/srt/layers/attention/nsa_backend.py +1201 -0
 - sglang/srt/layers/attention/tbo_backend.py +6 -6
 - sglang/srt/layers/attention/torch_flex_backend.py +325 -0
 - sglang/srt/layers/attention/triton_backend.py +249 -42
 - sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
 - sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
 - sglang/srt/layers/attention/trtllm_mha_backend.py +7 -9
 - sglang/srt/layers/attention/trtllm_mla_backend.py +523 -48
 - sglang/srt/layers/attention/utils.py +11 -7
 - sglang/srt/layers/attention/vision.py +61 -3
 - sglang/srt/layers/attention/wave_backend.py +4 -4
 - sglang/srt/layers/attention/xpu_backend.py +1028 -0
 - sglang/srt/layers/communicator.py +19 -7
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
 - sglang/srt/layers/deep_gemm_wrapper/configurer.py +25 -0
 - sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
 - sglang/srt/layers/dp_attention.py +28 -1
 - sglang/srt/layers/elementwise.py +3 -1
 - sglang/srt/layers/layernorm.py +47 -15
 - sglang/srt/layers/linear.py +30 -5
 - sglang/srt/layers/logits_processor.py +161 -18
 - sglang/srt/layers/modelopt_utils.py +11 -0
 - sglang/srt/layers/moe/cutlass_moe.py +0 -2
 - sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
 - sglang/srt/layers/moe/ep_moe/kernels.py +36 -458
 - sglang/srt/layers/moe/ep_moe/layer.py +243 -448
 - sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +52 -25
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
 - sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +17 -5
 - sglang/srt/layers/moe/fused_moe_triton/layer.py +86 -81
 - sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
 - sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
 - sglang/srt/layers/moe/moe_runner/runner.py +3 -0
 - sglang/srt/layers/moe/moe_runner/triton.py +3 -1
 - sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
 - sglang/srt/layers/moe/router.py +51 -15
 - sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
 - sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
 - sglang/srt/layers/moe/token_dispatcher/deepep.py +177 -106
 - sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
 - sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
 - sglang/srt/layers/moe/topk.py +3 -2
 - sglang/srt/layers/moe/utils.py +27 -1
 - sglang/srt/layers/parameter.py +23 -6
 - sglang/srt/layers/quantization/__init__.py +2 -53
 - sglang/srt/layers/quantization/awq.py +183 -6
 - sglang/srt/layers/quantization/awq_triton.py +29 -0
 - sglang/srt/layers/quantization/base_config.py +20 -1
 - sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
 - sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +21 -49
 - sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
 - sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +5 -0
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +173 -0
 - sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
 - sglang/srt/layers/quantization/fp8.py +86 -20
 - sglang/srt/layers/quantization/fp8_kernel.py +55 -10
 - sglang/srt/layers/quantization/fp8_utils.py +43 -15
 - sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
 - sglang/srt/layers/quantization/gptq.py +0 -1
 - sglang/srt/layers/quantization/int8_kernel.py +18 -2
 - sglang/srt/layers/quantization/marlin_utils.py +12 -0
 - sglang/srt/layers/quantization/modelopt_quant.py +141 -81
 - sglang/srt/layers/quantization/mxfp4.py +17 -34
 - sglang/srt/layers/quantization/petit.py +1 -1
 - sglang/srt/layers/quantization/quark/quark.py +3 -1
 - sglang/srt/layers/quantization/quark/quark_moe.py +18 -5
 - sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
 - sglang/srt/layers/quantization/unquant.py +1 -4
 - sglang/srt/layers/quantization/utils.py +0 -1
 - sglang/srt/layers/quantization/w4afp8.py +51 -24
 - sglang/srt/layers/quantization/w8a8_int8.py +45 -27
 - sglang/srt/layers/radix_attention.py +59 -9
 - sglang/srt/layers/rotary_embedding.py +750 -46
 - sglang/srt/layers/sampler.py +84 -16
 - sglang/srt/layers/sparse_pooler.py +98 -0
 - sglang/srt/layers/utils.py +23 -1
 - sglang/srt/layers/vocab_parallel_embedding.py +4 -1
 - sglang/srt/lora/backend/base_backend.py +3 -3
 - sglang/srt/lora/backend/chunked_backend.py +348 -0
 - sglang/srt/lora/backend/triton_backend.py +9 -4
 - sglang/srt/lora/eviction_policy.py +139 -0
 - sglang/srt/lora/lora.py +7 -5
 - sglang/srt/lora/lora_manager.py +33 -7
 - sglang/srt/lora/lora_registry.py +1 -1
 - sglang/srt/lora/mem_pool.py +41 -17
 - sglang/srt/lora/triton_ops/__init__.py +4 -0
 - sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +214 -0
 - sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +176 -0
 - sglang/srt/lora/utils.py +7 -5
 - sglang/srt/managers/cache_controller.py +83 -152
 - sglang/srt/managers/data_parallel_controller.py +156 -87
 - sglang/srt/managers/detokenizer_manager.py +51 -24
 - sglang/srt/managers/io_struct.py +223 -129
 - sglang/srt/managers/mm_utils.py +49 -10
 - sglang/srt/managers/multi_tokenizer_mixin.py +83 -98
 - sglang/srt/managers/multimodal_processor.py +1 -2
 - sglang/srt/managers/overlap_utils.py +130 -0
 - sglang/srt/managers/schedule_batch.py +340 -529
 - sglang/srt/managers/schedule_policy.py +158 -18
 - sglang/srt/managers/scheduler.py +665 -620
 - sglang/srt/managers/scheduler_input_blocker.py +1 -1
 - sglang/srt/managers/scheduler_metrics_mixin.py +150 -131
 - sglang/srt/managers/scheduler_output_processor_mixin.py +337 -122
 - sglang/srt/managers/scheduler_pp_mixin.py +341 -0
 - sglang/srt/managers/scheduler_profiler_mixin.py +62 -15
 - sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
 - sglang/srt/managers/scheduler_update_weights_mixin.py +40 -14
 - sglang/srt/managers/tokenizer_communicator_mixin.py +141 -19
 - sglang/srt/managers/tokenizer_manager.py +462 -226
 - sglang/srt/managers/tp_worker.py +217 -156
 - sglang/srt/managers/utils.py +79 -47
 - sglang/srt/mem_cache/allocator.py +21 -22
 - sglang/srt/mem_cache/allocator_ascend.py +42 -28
 - sglang/srt/mem_cache/base_prefix_cache.py +3 -3
 - sglang/srt/mem_cache/chunk_cache.py +20 -2
 - sglang/srt/mem_cache/common.py +480 -0
 - sglang/srt/mem_cache/evict_policy.py +38 -0
 - sglang/srt/mem_cache/hicache_storage.py +44 -2
 - sglang/srt/mem_cache/hiradix_cache.py +134 -34
 - sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
 - sglang/srt/mem_cache/memory_pool.py +602 -208
 - sglang/srt/mem_cache/memory_pool_host.py +134 -183
 - sglang/srt/mem_cache/multimodal_cache.py +0 -1
 - sglang/srt/mem_cache/radix_cache.py +263 -78
 - sglang/srt/mem_cache/radix_cache_cpp.py +29 -21
 - sglang/srt/mem_cache/storage/__init__.py +10 -0
 - sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +157 -0
 - sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +97 -0
 - sglang/srt/mem_cache/storage/backend_factory.py +223 -0
 - sglang/srt/mem_cache/storage/eic/eic_storage.py +777 -0
 - sglang/srt/mem_cache/storage/eic/test_unit.py +115 -0
 - sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
 - sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +180 -59
 - sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +15 -9
 - sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +217 -26
 - sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
 - sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
 - sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
 - sglang/srt/mem_cache/swa_radix_cache.py +115 -58
 - sglang/srt/metrics/collector.py +113 -120
 - sglang/srt/metrics/func_timer.py +3 -8
 - sglang/srt/metrics/utils.py +8 -1
 - sglang/srt/model_executor/cpu_graph_runner.py +2 -2
 - sglang/srt/model_executor/cuda_graph_runner.py +81 -36
 - sglang/srt/model_executor/forward_batch_info.py +40 -50
 - sglang/srt/model_executor/model_runner.py +507 -319
 - sglang/srt/model_executor/npu_graph_runner.py +11 -5
 - sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
 - sglang/srt/model_loader/__init__.py +1 -1
 - sglang/srt/model_loader/loader.py +438 -37
 - sglang/srt/model_loader/utils.py +0 -1
 - sglang/srt/model_loader/weight_utils.py +200 -27
 - sglang/srt/models/apertus.py +2 -3
 - sglang/srt/models/arcee.py +2 -2
 - sglang/srt/models/bailing_moe.py +40 -56
 - sglang/srt/models/bailing_moe_nextn.py +3 -4
 - sglang/srt/models/bert.py +1 -1
 - sglang/srt/models/deepseek_nextn.py +25 -4
 - sglang/srt/models/deepseek_ocr.py +1516 -0
 - sglang/srt/models/deepseek_v2.py +793 -235
 - sglang/srt/models/dots_ocr.py +171 -0
 - sglang/srt/models/dots_vlm.py +0 -1
 - sglang/srt/models/dots_vlm_vit.py +1 -1
 - sglang/srt/models/falcon_h1.py +570 -0
 - sglang/srt/models/gemma3_causal.py +0 -2
 - sglang/srt/models/gemma3_mm.py +17 -1
 - sglang/srt/models/gemma3n_mm.py +2 -3
 - sglang/srt/models/glm4_moe.py +17 -40
 - sglang/srt/models/glm4_moe_nextn.py +4 -4
 - sglang/srt/models/glm4v.py +3 -2
 - sglang/srt/models/glm4v_moe.py +6 -6
 - sglang/srt/models/gpt_oss.py +12 -35
 - sglang/srt/models/grok.py +10 -23
 - sglang/srt/models/hunyuan.py +2 -7
 - sglang/srt/models/interns1.py +0 -1
 - sglang/srt/models/kimi_vl.py +1 -7
 - sglang/srt/models/kimi_vl_moonvit.py +4 -2
 - sglang/srt/models/llama.py +6 -2
 - sglang/srt/models/llama_eagle3.py +1 -1
 - sglang/srt/models/longcat_flash.py +6 -23
 - sglang/srt/models/longcat_flash_nextn.py +4 -15
 - sglang/srt/models/mimo.py +2 -13
 - sglang/srt/models/mimo_mtp.py +1 -2
 - sglang/srt/models/minicpmo.py +7 -5
 - sglang/srt/models/mixtral.py +1 -4
 - sglang/srt/models/mllama.py +1 -1
 - sglang/srt/models/mllama4.py +27 -6
 - sglang/srt/models/nemotron_h.py +511 -0
 - sglang/srt/models/olmo2.py +31 -4
 - sglang/srt/models/opt.py +5 -5
 - sglang/srt/models/phi.py +1 -1
 - sglang/srt/models/phi4mm.py +1 -1
 - sglang/srt/models/phimoe.py +0 -1
 - sglang/srt/models/pixtral.py +0 -3
 - sglang/srt/models/points_v15_chat.py +186 -0
 - sglang/srt/models/qwen.py +0 -1
 - sglang/srt/models/qwen2.py +0 -7
 - sglang/srt/models/qwen2_5_vl.py +5 -5
 - sglang/srt/models/qwen2_audio.py +2 -15
 - sglang/srt/models/qwen2_moe.py +70 -4
 - sglang/srt/models/qwen2_vl.py +6 -3
 - sglang/srt/models/qwen3.py +18 -3
 - sglang/srt/models/qwen3_moe.py +50 -38
 - sglang/srt/models/qwen3_next.py +43 -21
 - sglang/srt/models/qwen3_next_mtp.py +3 -4
 - sglang/srt/models/qwen3_omni_moe.py +661 -0
 - sglang/srt/models/qwen3_vl.py +791 -0
 - sglang/srt/models/qwen3_vl_moe.py +343 -0
 - sglang/srt/models/registry.py +15 -3
 - sglang/srt/models/roberta.py +55 -3
 - sglang/srt/models/sarashina2_vision.py +268 -0
 - sglang/srt/models/solar.py +505 -0
 - sglang/srt/models/starcoder2.py +357 -0
 - sglang/srt/models/step3_vl.py +3 -5
 - sglang/srt/models/torch_native_llama.py +9 -2
 - sglang/srt/models/utils.py +61 -0
 - sglang/srt/multimodal/processors/base_processor.py +21 -9
 - sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
 - sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
 - sglang/srt/multimodal/processors/dots_vlm.py +2 -4
 - sglang/srt/multimodal/processors/glm4v.py +1 -5
 - sglang/srt/multimodal/processors/internvl.py +20 -10
 - sglang/srt/multimodal/processors/janus_pro.py +0 -1
 - sglang/srt/multimodal/processors/mllama4.py +0 -8
 - sglang/srt/multimodal/processors/phi4mm.py +0 -1
 - sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
 - sglang/srt/multimodal/processors/qwen_vl.py +83 -17
 - sglang/srt/multimodal/processors/sarashina2_vision.py +81 -0
 - sglang/srt/multimodal/processors/step3_vl.py +1 -1
 - sglang/srt/parser/conversation.py +41 -0
 - sglang/srt/parser/jinja_template_utils.py +6 -0
 - sglang/srt/parser/reasoning_parser.py +0 -1
 - sglang/srt/sampling/custom_logit_processor.py +77 -2
 - sglang/srt/sampling/sampling_batch_info.py +36 -23
 - sglang/srt/sampling/sampling_params.py +75 -0
 - sglang/srt/server_args.py +1300 -338
 - sglang/srt/server_args_config_parser.py +146 -0
 - sglang/srt/single_batch_overlap.py +161 -0
 - sglang/srt/speculative/base_spec_worker.py +34 -0
 - sglang/srt/speculative/cpp_ngram/ngram.cpp +374 -0
 - sglang/srt/speculative/cpp_ngram/ngram.h +110 -0
 - sglang/srt/speculative/cpp_ngram/ngram_cache.py +138 -0
 - sglang/srt/speculative/cpp_ngram/ngram_cache_binding.cpp +43 -0
 - sglang/srt/speculative/cpp_ngram/param.h +125 -0
 - sglang/srt/speculative/cpp_ngram/queue.h +71 -0
 - sglang/srt/speculative/draft_utils.py +226 -0
 - sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +26 -8
 - sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +26 -3
 - sglang/srt/speculative/eagle_info.py +786 -0
 - sglang/srt/speculative/eagle_info_v2.py +458 -0
 - sglang/srt/speculative/eagle_utils.py +113 -1270
 - sglang/srt/speculative/eagle_worker.py +120 -285
 - sglang/srt/speculative/eagle_worker_v2.py +702 -0
 - sglang/srt/speculative/ngram_info.py +433 -0
 - sglang/srt/speculative/ngram_worker.py +246 -0
 - sglang/srt/speculative/spec_info.py +49 -0
 - sglang/srt/speculative/spec_utils.py +641 -0
 - sglang/srt/speculative/standalone_worker.py +4 -14
 - sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
 - sglang/srt/tracing/trace.py +32 -6
 - sglang/srt/two_batch_overlap.py +35 -18
 - sglang/srt/utils/__init__.py +2 -0
 - sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
 - sglang/srt/{utils.py → utils/common.py} +583 -113
 - sglang/srt/{hf_transformers_utils.py → utils/hf_transformers_utils.py} +86 -19
 - sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
 - sglang/srt/{offloader.py → utils/offloader.py} +4 -4
 - sglang/srt/{patch_torch.py → utils/patch_torch.py} +8 -0
 - sglang/srt/utils/profile_merger.py +199 -0
 - sglang/srt/utils/rpd_utils.py +452 -0
 - sglang/srt/utils/slow_rank_detector.py +71 -0
 - sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +5 -7
 - sglang/srt/warmup.py +8 -4
 - sglang/srt/weight_sync/utils.py +1 -1
 - sglang/test/attention/test_flashattn_backend.py +1 -1
 - sglang/test/attention/test_flashattn_mla_backend.py +0 -1
 - sglang/test/attention/test_prefix_chunk_info.py +0 -2
 - sglang/test/attention/test_trtllm_mla_backend.py +221 -53
 - sglang/test/few_shot_gsm8k_engine.py +2 -4
 - sglang/test/get_logits_ut.py +57 -0
 - sglang/test/kit_matched_stop.py +157 -0
 - sglang/test/longbench_v2/__init__.py +1 -0
 - sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
 - sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
 - sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
 - sglang/test/run_eval.py +120 -11
 - sglang/test/runners.py +3 -1
 - sglang/test/send_one.py +42 -7
 - sglang/test/simple_eval_common.py +8 -2
 - sglang/test/simple_eval_gpqa.py +0 -1
 - sglang/test/simple_eval_humaneval.py +0 -3
 - sglang/test/simple_eval_longbench_v2.py +344 -0
 - sglang/test/simple_eval_mmmu_vlm.py +441 -0
 - sglang/test/test_block_fp8.py +3 -4
 - sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
 - sglang/test/test_cutlass_moe.py +1 -2
 - sglang/test/test_cutlass_w4a8_moe.py +10 -20
 - sglang/test/test_deterministic.py +430 -0
 - sglang/test/test_deterministic_utils.py +73 -0
 - sglang/test/test_disaggregation_utils.py +93 -1
 - sglang/test/test_marlin_moe.py +0 -1
 - sglang/test/test_programs.py +1 -1
 - sglang/test/test_utils.py +432 -16
 - sglang/utils.py +10 -1
 - sglang/version.py +1 -1
 - {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/METADATA +64 -43
 - {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/RECORD +476 -346
 - sglang/srt/entrypoints/grpc_request_manager.py +0 -580
 - sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +0 -32
 - sglang/srt/managers/tp_worker_overlap_thread.py +0 -319
 - sglang/srt/mem_cache/lora_radix_cache.py +0 -421
 - sglang/srt/speculative/build_eagle_tree.py +0 -427
 - sglang/test/test_block_fp8_ep.py +0 -358
 - /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
 - /sglang/srt/{remote_instance_weight_loader_utils.py → model_loader/remote_instance_weight_loader_utils.py} +0 -0
 - /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
 - /sglang/srt/{poll_based_barrier.py → utils/poll_based_barrier.py} +0 -0
 - {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
 - {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
 - {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
 
| 
         @@ -0,0 +1,309 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            # coding=utf-8
         
     | 
| 
      
 2 
     | 
    
         
            +
            # Copyright 2024 TII and the HuggingFace Inc. team. All rights reserved.
         
     | 
| 
      
 3 
     | 
    
         
            +
            #
         
     | 
| 
      
 4 
     | 
    
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 
      
 5 
     | 
    
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 
      
 6 
     | 
    
         
            +
            # You may obtain a copy of the License at
         
     | 
| 
      
 7 
     | 
    
         
            +
            #
         
     | 
| 
      
 8 
     | 
    
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 
      
 9 
     | 
    
         
            +
            #
         
     | 
| 
      
 10 
     | 
    
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 
      
 11 
     | 
    
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 
      
 12 
     | 
    
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 
      
 13 
     | 
    
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 
      
 14 
     | 
    
         
            +
            # limitations under the License.
         
     | 
| 
      
 15 
     | 
    
         
            +
            """Falcon-H1 model configuration"""
         
     | 
| 
      
 16 
     | 
    
         
            +
             
     | 
| 
      
 17 
     | 
    
         
            +
             
     | 
| 
      
 18 
     | 
    
         
            +
            from transformers.configuration_utils import PretrainedConfig
         
     | 
| 
      
 19 
     | 
    
         
            +
            from transformers.utils import logging
         
     | 
| 
      
 20 
     | 
    
         
            +
             
     | 
| 
      
 21 
     | 
    
         
            +
            from sglang.srt.configs.mamba_utils import Mamba2CacheParams, Mamba2StateShape
         
     | 
| 
      
 22 
     | 
    
         
            +
            from sglang.srt.layers.dp_attention import get_tensor_model_parallel_world_size
         
     | 
| 
      
 23 
     | 
    
         
            +
             
     | 
| 
      
 24 
     | 
    
         
            +
            logger = logging.get_logger(__name__)
         
     | 
| 
      
 25 
     | 
    
         
            +
             
     | 
| 
      
 26 
     | 
    
         
            +
             
     | 
| 
      
 27 
     | 
    
         
            +
            class FalconH1Config(PretrainedConfig):
         
     | 
| 
      
 28 
     | 
    
         
            +
                r"""
         
     | 
| 
      
 29 
     | 
    
         
            +
                This is the configuration class to store the configuration of a [`FalconH1Model`]. It is used to instantiate a
         
     | 
| 
      
 30 
     | 
    
         
            +
                FalconH1Model model according to the specified arguments, defining the model architecture. Instantiating a configuration
         
     | 
| 
      
 31 
     | 
    
         
            +
                with defaults taken from [ibm-fms/FalconH1-9.8b-2.2T-hf](https://huggingface.co/ibm-fms/FalconH1-9.8b-2.2T-hf).
         
     | 
| 
      
 32 
     | 
    
         
            +
                The FalconH1Model is a hybrid [mamba2](https://github.com/state-spaces/mamba) architecture with SwiGLU.
         
     | 
| 
      
 33 
     | 
    
         
            +
                The checkpoints are  jointly trained by IBM, Princeton, and UIUC.
         
     | 
| 
      
 34 
     | 
    
         
            +
                Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
         
     | 
| 
      
 35 
     | 
    
         
            +
                documentation from [`PretrainedConfig`] for more information.
         
     | 
| 
      
 36 
     | 
    
         
            +
                Args:
         
     | 
| 
      
 37 
     | 
    
         
            +
                    vocab_size (`int`, *optional*, defaults to 128000):
         
     | 
| 
      
 38 
     | 
    
         
            +
                        Vocabulary size of the FalconH1 model. Defines the number of different tokens that can be represented by the
         
     | 
| 
      
 39 
     | 
    
         
            +
                        `inputs_ids` passed when calling [`FalconH1Model`]
         
     | 
| 
      
 40 
     | 
    
         
            +
                    tie_word_embeddings (`bool`, *optional*, defaults to `False`):
         
     | 
| 
      
 41 
     | 
    
         
            +
                        Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
         
     | 
| 
      
 42 
     | 
    
         
            +
                        model has a output word embedding layer.
         
     | 
| 
      
 43 
     | 
    
         
            +
                    hidden_size (`int`, *optional*, defaults to 4096):
         
     | 
| 
      
 44 
     | 
    
         
            +
                        Dimension of the hidden representations.
         
     | 
| 
      
 45 
     | 
    
         
            +
                    intermediate_size (`int`, *optional*, defaults to 14336):
         
     | 
| 
      
 46 
     | 
    
         
            +
                        Dimension of the MLP representations.
         
     | 
| 
      
 47 
     | 
    
         
            +
                    num_hidden_layers (`int`, *optional*, defaults to 32):
         
     | 
| 
      
 48 
     | 
    
         
            +
                        Number of hidden layers in the Transformer encoder.
         
     | 
| 
      
 49 
     | 
    
         
            +
                    num_attention_heads (`int`, *optional*, defaults to 32):
         
     | 
| 
      
 50 
     | 
    
         
            +
                        Number of attention heads for each attention layer in the Transformer encoder.
         
     | 
| 
      
 51 
     | 
    
         
            +
                    num_key_value_heads (`int`, *optional*, defaults to 8):
         
     | 
| 
      
 52 
     | 
    
         
            +
                        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
         
     | 
| 
      
 53 
     | 
    
         
            +
                        `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
         
     | 
| 
      
 54 
     | 
    
         
            +
                        `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
         
     | 
| 
      
 55 
     | 
    
         
            +
                        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
         
     | 
| 
      
 56 
     | 
    
         
            +
                        by meanpooling all the original heads within that group. For more details, check out [this
         
     | 
| 
      
 57 
     | 
    
         
            +
                        paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
         
     | 
| 
      
 58 
     | 
    
         
            +
                    hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
         
     | 
| 
      
 59 
     | 
    
         
            +
                        The non-linear activation function (function or string) in the decoder.
         
     | 
| 
      
 60 
     | 
    
         
            +
                    initializer_range (`float`, *optional*, defaults to 0.02):
         
     | 
| 
      
 61 
     | 
    
         
            +
                        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
         
     | 
| 
      
 62 
     | 
    
         
            +
                    rms_norm_eps (`float`, *optional*, defaults to 1e-05):
         
     | 
| 
      
 63 
     | 
    
         
            +
                        The epsilon used by the rms normalization layers.
         
     | 
| 
      
 64 
     | 
    
         
            +
                    use_cache (`bool`, *optional*, defaults to `True`):
         
     | 
| 
      
 65 
     | 
    
         
            +
                        Whether or not the model should return the last key/values attentions (not used by all models). Only
         
     | 
| 
      
 66 
     | 
    
         
            +
                        relevant if `config.is_decoder=True`.
         
     | 
| 
      
 67 
     | 
    
         
            +
                    num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
         
     | 
| 
      
 68 
     | 
    
         
            +
                        Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
         
     | 
| 
      
 69 
     | 
    
         
            +
                        integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
         
     | 
| 
      
 70 
     | 
    
         
            +
                        logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
         
     | 
| 
      
 71 
     | 
    
         
            +
                        sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
         
     | 
| 
      
 72 
     | 
    
         
            +
                        significantly.
         
     | 
| 
      
 73 
     | 
    
         
            +
                    pad_token_id (`int`, *optional*, defaults to 0):
         
     | 
| 
      
 74 
     | 
    
         
            +
                        The id of the padding token.
         
     | 
| 
      
 75 
     | 
    
         
            +
                    bos_token_id (`int`, *optional*, defaults to 1):
         
     | 
| 
      
 76 
     | 
    
         
            +
                        The id of the "beginning-of-sequence" token.
         
     | 
| 
      
 77 
     | 
    
         
            +
                    eos_token_id (`int`, *optional*, defaults to 2):
         
     | 
| 
      
 78 
     | 
    
         
            +
                        The id of the "end-of-sequence" token.
         
     | 
| 
      
 79 
     | 
    
         
            +
                    max_position_embeddings (`int`, *optional*, defaults to 8192):
         
     | 
| 
      
 80 
     | 
    
         
            +
                        Max cached sequence length for the model
         
     | 
| 
      
 81 
     | 
    
         
            +
                    attention_dropout (`float`, *optional*, defaults to 0.0):
         
     | 
| 
      
 82 
     | 
    
         
            +
                        The dropout ratio for the attention probabilities.
         
     | 
| 
      
 83 
     | 
    
         
            +
                    mamba_d_ssm (`int`, *optional*, defaults to 1024):
         
     | 
| 
      
 84 
     | 
    
         
            +
                        The dimension of the SSM state space latents.
         
     | 
| 
      
 85 
     | 
    
         
            +
                    mamba_n_heads (`int`, *optional*, defaults to 128):
         
     | 
| 
      
 86 
     | 
    
         
            +
                        The number of mamba heads used in the v2 implementation.
         
     | 
| 
      
 87 
     | 
    
         
            +
                    mamba_d_head (`int`, *optional*, defaults to `"auto"`):
         
     | 
| 
      
 88 
     | 
    
         
            +
                        Head embedding dimension size
         
     | 
| 
      
 89 
     | 
    
         
            +
                    mamba_n_groups (`int`, *optional*, defaults to 1):
         
     | 
| 
      
 90 
     | 
    
         
            +
                        The number of the mamba groups used in the v2 implementation.
         
     | 
| 
      
 91 
     | 
    
         
            +
                    mamba_d_state (`int`, *optional*, defaults to 256):
         
     | 
| 
      
 92 
     | 
    
         
            +
                        The dimension the mamba state space latents
         
     | 
| 
      
 93 
     | 
    
         
            +
                    mamba_d_conv (`int`, *optional*, defaults to 4):
         
     | 
| 
      
 94 
     | 
    
         
            +
                        The size of the mamba convolution kernel
         
     | 
| 
      
 95 
     | 
    
         
            +
                    mamba_expand (`int`, *optional*, defaults to 2):
         
     | 
| 
      
 96 
     | 
    
         
            +
                        Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
         
     | 
| 
      
 97 
     | 
    
         
            +
                    mamba_chunk_size (`int`, *optional*, defaults to 256):
         
     | 
| 
      
 98 
     | 
    
         
            +
                        The chunks in which to break the sequence when doing prefill/training
         
     | 
| 
      
 99 
     | 
    
         
            +
                    mamba_conv_bias (`bool`, *optional*, defaults to `True`):
         
     | 
| 
      
 100 
     | 
    
         
            +
                        Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
         
     | 
| 
      
 101 
     | 
    
         
            +
                    mamba_proj_bias (`bool`, *optional*, defaults to `False`):
         
     | 
| 
      
 102 
     | 
    
         
            +
                        Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
         
     | 
| 
      
 103 
     | 
    
         
            +
                    mamba_norm_before_gate (`bool`, *optional*, defaults to `True`):
         
     | 
| 
      
 104 
     | 
    
         
            +
                        Whether to use RMSNorm before the gate in the Mamba block
         
     | 
| 
      
 105 
     | 
    
         
            +
                    mamba_rms_norm (`bool`, *optional*, defaults to `False`):
         
     | 
| 
      
 106 
     | 
    
         
            +
                        Whether to use RMSNorm instead of LayerNorm in the Mamba block
         
     | 
| 
      
 107 
     | 
    
         
            +
                    projectors_bias (`bool`, *optional*, defaults to `False`):
         
     | 
| 
      
 108 
     | 
    
         
            +
                        Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the attention block
         
     | 
| 
      
 109 
     | 
    
         
            +
                    rope_theta (`float`, *optional*, defaults to 100000.0):
         
     | 
| 
      
 110 
     | 
    
         
            +
                        The theta value used for the RoPE embeddings.
         
     | 
| 
      
 111 
     | 
    
         
            +
                    rope_scaling (`float`, *optional*):
         
     | 
| 
      
 112 
     | 
    
         
            +
                        The scaling value used for the RoPE embeddings. If `None`, no scaling is applied.
         
     | 
| 
      
 113 
     | 
    
         
            +
                    lm_head_multiplier (`float`, *optional*, defaults to 1.0):
         
     | 
| 
      
 114 
     | 
    
         
            +
                        The multiplier for the LM head. This is used to scale the output of the LM head.
         
     | 
| 
      
 115 
     | 
    
         
            +
                    embedding_multiplier (`float`, *optional*, defaults to 1.0):
         
     | 
| 
      
 116 
     | 
    
         
            +
                        The multiplier for the embedding layer. This is used to scale the output of the embedding layer.
         
     | 
| 
      
 117 
     | 
    
         
            +
                    mlp_multipliers (`list[float]`, *optional*):
         
     | 
| 
      
 118 
     | 
    
         
            +
                        The multipliers for the MLP layers. This is used to scale the output of the MLP layers. The first value is
         
     | 
| 
      
 119 
     | 
    
         
            +
                        the multiplier of gate layer, the second value is the multiplier of the down_proj layer.
         
     | 
| 
      
 120 
     | 
    
         
            +
                    key_multiplier (`float`, *optional*):
         
     | 
| 
      
 121 
     | 
    
         
            +
                        The multiplier for the key layer. This is used to scale the output of the key layer.
         
     | 
| 
      
 122 
     | 
    
         
            +
                    attention_out_multiplier (`float`, *optional*):
         
     | 
| 
      
 123 
     | 
    
         
            +
                        The multiplier for the attention output layer. This is used to scale the output of the attention output
         
     | 
| 
      
 124 
     | 
    
         
            +
                    attention_in_multiplier (`float`, *optional*):
         
     | 
| 
      
 125 
     | 
    
         
            +
                        The multiplier for the attention input layer. This is used to scale the output of the attention input layer.
         
     | 
| 
      
 126 
     | 
    
         
            +
                    ssm_multipliers (`list[float]`, *optional*):
         
     | 
| 
      
 127 
     | 
    
         
            +
                        The multipliers for the SSM layers. This is used to scale the output of the SSM layers.
         
     | 
| 
      
 128 
     | 
    
         
            +
                    ssm_in_multiplier (`float`, *optional*):
         
     | 
| 
      
 129 
     | 
    
         
            +
                        The multiplier for the SSM input layer. This is used to scale the output of the SSM input layer.
         
     | 
| 
      
 130 
     | 
    
         
            +
                    ssm_out_multiplier (`float`, *optional*):
         
     | 
| 
      
 131 
     | 
    
         
            +
                        The multiplier for the SSM output layer. This is used to scale the output of the SSM output layer.
         
     | 
| 
      
 132 
     | 
    
         
            +
                """
         
     | 
| 
      
 133 
     | 
    
         
            +
             
     | 
| 
      
 134 
     | 
    
         
            +
                model_type = "falcon_h1"
         
     | 
| 
      
 135 
     | 
    
         
            +
                keys_to_ignore_at_inference = ["past_key_values"]
         
     | 
| 
      
 136 
     | 
    
         
            +
             
     | 
| 
      
 137 
     | 
    
         
            +
                def __init__(
         
     | 
| 
      
 138 
     | 
    
         
            +
                    self,
         
     | 
| 
      
 139 
     | 
    
         
            +
                    vocab_size=128000,
         
     | 
| 
      
 140 
     | 
    
         
            +
                    tie_word_embeddings=False,
         
     | 
| 
      
 141 
     | 
    
         
            +
                    hidden_size=4096,
         
     | 
| 
      
 142 
     | 
    
         
            +
                    intermediate_size=14336,
         
     | 
| 
      
 143 
     | 
    
         
            +
                    num_hidden_layers=32,
         
     | 
| 
      
 144 
     | 
    
         
            +
                    num_attention_heads=32,
         
     | 
| 
      
 145 
     | 
    
         
            +
                    num_key_value_heads=8,
         
     | 
| 
      
 146 
     | 
    
         
            +
                    hidden_act="silu",
         
     | 
| 
      
 147 
     | 
    
         
            +
                    initializer_range=0.02,
         
     | 
| 
      
 148 
     | 
    
         
            +
                    rms_norm_eps=1e-5,
         
     | 
| 
      
 149 
     | 
    
         
            +
                    use_cache=True,
         
     | 
| 
      
 150 
     | 
    
         
            +
                    num_logits_to_keep=1,
         
     | 
| 
      
 151 
     | 
    
         
            +
                    pad_token_id=0,
         
     | 
| 
      
 152 
     | 
    
         
            +
                    bos_token_id=1,
         
     | 
| 
      
 153 
     | 
    
         
            +
                    eos_token_id=2,
         
     | 
| 
      
 154 
     | 
    
         
            +
                    max_position_embeddings=8192,
         
     | 
| 
      
 155 
     | 
    
         
            +
                    attention_dropout=0.0,
         
     | 
| 
      
 156 
     | 
    
         
            +
                    mamba_d_ssm=1024,
         
     | 
| 
      
 157 
     | 
    
         
            +
                    mamba_n_heads=128,
         
     | 
| 
      
 158 
     | 
    
         
            +
                    mamba_d_head="auto",
         
     | 
| 
      
 159 
     | 
    
         
            +
                    mamba_n_groups=1,
         
     | 
| 
      
 160 
     | 
    
         
            +
                    mamba_d_state=256,
         
     | 
| 
      
 161 
     | 
    
         
            +
                    mamba_d_conv=4,
         
     | 
| 
      
 162 
     | 
    
         
            +
                    mamba_expand=2,
         
     | 
| 
      
 163 
     | 
    
         
            +
                    mamba_chunk_size=256,
         
     | 
| 
      
 164 
     | 
    
         
            +
                    mamba_conv_bias=True,
         
     | 
| 
      
 165 
     | 
    
         
            +
                    mamba_proj_bias=False,
         
     | 
| 
      
 166 
     | 
    
         
            +
                    mamba_norm_before_gate=True,
         
     | 
| 
      
 167 
     | 
    
         
            +
                    mamba_rms_norm=False,
         
     | 
| 
      
 168 
     | 
    
         
            +
                    projectors_bias=False,
         
     | 
| 
      
 169 
     | 
    
         
            +
                    rope_theta=100000.0,
         
     | 
| 
      
 170 
     | 
    
         
            +
                    rope_scaling=None,
         
     | 
| 
      
 171 
     | 
    
         
            +
                    lm_head_multiplier=1.0,
         
     | 
| 
      
 172 
     | 
    
         
            +
                    embedding_multiplier=1.0,
         
     | 
| 
      
 173 
     | 
    
         
            +
                    mlp_multipliers=None,
         
     | 
| 
      
 174 
     | 
    
         
            +
                    key_multiplier=None,
         
     | 
| 
      
 175 
     | 
    
         
            +
                    attention_out_multiplier=None,
         
     | 
| 
      
 176 
     | 
    
         
            +
                    attention_in_multiplier=None,
         
     | 
| 
      
 177 
     | 
    
         
            +
                    ssm_multipliers=None,
         
     | 
| 
      
 178 
     | 
    
         
            +
                    ssm_in_multiplier=None,
         
     | 
| 
      
 179 
     | 
    
         
            +
                    ssm_out_multiplier=None,
         
     | 
| 
      
 180 
     | 
    
         
            +
                    **kwargs,
         
     | 
| 
      
 181 
     | 
    
         
            +
                ):
         
     | 
| 
      
 182 
     | 
    
         
            +
                    self.vocab_size = vocab_size
         
     | 
| 
      
 183 
     | 
    
         
            +
                    self.hidden_size = hidden_size
         
     | 
| 
      
 184 
     | 
    
         
            +
                    self.intermediate_size = intermediate_size
         
     | 
| 
      
 185 
     | 
    
         
            +
                    self.num_hidden_layers = num_hidden_layers
         
     | 
| 
      
 186 
     | 
    
         
            +
                    self.num_attention_heads = num_attention_heads
         
     | 
| 
      
 187 
     | 
    
         
            +
                    self.max_position_embeddings = max_position_embeddings
         
     | 
| 
      
 188 
     | 
    
         
            +
                    self.attention_dropout = attention_dropout
         
     | 
| 
      
 189 
     | 
    
         
            +
                    self.attention_bias = False
         
     | 
| 
      
 190 
     | 
    
         
            +
                    self.mlp_bias = False
         
     | 
| 
      
 191 
     | 
    
         
            +
             
     | 
| 
      
 192 
     | 
    
         
            +
                    # for backward compatibility
         
     | 
| 
      
 193 
     | 
    
         
            +
                    if num_key_value_heads is None:
         
     | 
| 
      
 194 
     | 
    
         
            +
                        num_key_value_heads = num_attention_heads
         
     | 
| 
      
 195 
     | 
    
         
            +
             
     | 
| 
      
 196 
     | 
    
         
            +
                    self.num_key_value_heads = num_key_value_heads
         
     | 
| 
      
 197 
     | 
    
         
            +
                    self.hidden_act = hidden_act
         
     | 
| 
      
 198 
     | 
    
         
            +
                    self.initializer_range = initializer_range
         
     | 
| 
      
 199 
     | 
    
         
            +
                    self.rms_norm_eps = rms_norm_eps
         
     | 
| 
      
 200 
     | 
    
         
            +
             
     | 
| 
      
 201 
     | 
    
         
            +
                    self.use_cache = use_cache
         
     | 
| 
      
 202 
     | 
    
         
            +
                    self.num_logits_to_keep = num_logits_to_keep
         
     | 
| 
      
 203 
     | 
    
         
            +
             
     | 
| 
      
 204 
     | 
    
         
            +
                    self.rope_theta = rope_theta
         
     | 
| 
      
 205 
     | 
    
         
            +
                    self.rope_scaling = None
         
     | 
| 
      
 206 
     | 
    
         
            +
                    self.rope_scaling = rope_scaling
         
     | 
| 
      
 207 
     | 
    
         
            +
                    self.projectors_bias = projectors_bias
         
     | 
| 
      
 208 
     | 
    
         
            +
                    self.mamba_intermediate = mamba_intermediate = (
         
     | 
| 
      
 209 
     | 
    
         
            +
                        mamba_expand * hidden_size if mamba_d_ssm is None else mamba_d_ssm
         
     | 
| 
      
 210 
     | 
    
         
            +
                    )
         
     | 
| 
      
 211 
     | 
    
         
            +
             
     | 
| 
      
 212 
     | 
    
         
            +
                    if mamba_intermediate % mamba_n_heads != 0:
         
     | 
| 
      
 213 
     | 
    
         
            +
                        raise ValueError("mamba_n_heads must divide mamba_expand * hidden_size")
         
     | 
| 
      
 214 
     | 
    
         
            +
             
     | 
| 
      
 215 
     | 
    
         
            +
                    # for the mamba_v2, must satisfy the following
         
     | 
| 
      
 216 
     | 
    
         
            +
                    if mamba_d_head == "auto":
         
     | 
| 
      
 217 
     | 
    
         
            +
                        mamba_d_head = mamba_intermediate // mamba_n_heads
         
     | 
| 
      
 218 
     | 
    
         
            +
             
     | 
| 
      
 219 
     | 
    
         
            +
                    if mamba_d_head * mamba_n_heads != mamba_intermediate:
         
     | 
| 
      
 220 
     | 
    
         
            +
                        raise ValueError(
         
     | 
| 
      
 221 
     | 
    
         
            +
                            "The dimensions for the Mamba head state do not match the model intermediate_size"
         
     | 
| 
      
 222 
     | 
    
         
            +
                        )
         
     | 
| 
      
 223 
     | 
    
         
            +
             
     | 
| 
      
 224 
     | 
    
         
            +
                    self.mamba_d_ssm = mamba_d_ssm
         
     | 
| 
      
 225 
     | 
    
         
            +
                    self.mamba_n_heads = mamba_n_heads
         
     | 
| 
      
 226 
     | 
    
         
            +
                    self.mamba_d_head = mamba_d_head
         
     | 
| 
      
 227 
     | 
    
         
            +
                    self.mamba_n_groups = mamba_n_groups
         
     | 
| 
      
 228 
     | 
    
         
            +
                    self.mamba_d_state = mamba_d_state
         
     | 
| 
      
 229 
     | 
    
         
            +
                    self.mamba_d_conv = mamba_d_conv
         
     | 
| 
      
 230 
     | 
    
         
            +
                    self.mamba_expand = mamba_expand
         
     | 
| 
      
 231 
     | 
    
         
            +
                    self.mamba_chunk_size = mamba_chunk_size
         
     | 
| 
      
 232 
     | 
    
         
            +
                    self.mamba_conv_bias = mamba_conv_bias
         
     | 
| 
      
 233 
     | 
    
         
            +
                    self.mamba_proj_bias = mamba_proj_bias
         
     | 
| 
      
 234 
     | 
    
         
            +
             
     | 
| 
      
 235 
     | 
    
         
            +
                    self.mamba_norm_before_gate = mamba_norm_before_gate
         
     | 
| 
      
 236 
     | 
    
         
            +
                    self.mamba_rms_norm = mamba_rms_norm
         
     | 
| 
      
 237 
     | 
    
         
            +
             
     | 
| 
      
 238 
     | 
    
         
            +
                    self.lm_head_multiplier = lm_head_multiplier
         
     | 
| 
      
 239 
     | 
    
         
            +
                    self.embedding_multiplier = embedding_multiplier
         
     | 
| 
      
 240 
     | 
    
         
            +
             
     | 
| 
      
 241 
     | 
    
         
            +
                    if mlp_multipliers is not None:
         
     | 
| 
      
 242 
     | 
    
         
            +
                        self.mlp_multipliers = mlp_multipliers
         
     | 
| 
      
 243 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 244 
     | 
    
         
            +
                        self.mlp_multipliers = [1.0, 1.0]
         
     | 
| 
      
 245 
     | 
    
         
            +
             
     | 
| 
      
 246 
     | 
    
         
            +
                    if attention_out_multiplier is not None:
         
     | 
| 
      
 247 
     | 
    
         
            +
                        self.attention_out_multiplier = attention_out_multiplier
         
     | 
| 
      
 248 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 249 
     | 
    
         
            +
                        self.attention_out_multiplier = 1.0
         
     | 
| 
      
 250 
     | 
    
         
            +
             
     | 
| 
      
 251 
     | 
    
         
            +
                    if attention_in_multiplier is not None:
         
     | 
| 
      
 252 
     | 
    
         
            +
                        self.attention_in_multiplier = attention_in_multiplier
         
     | 
| 
      
 253 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 254 
     | 
    
         
            +
                        self.attention_in_multiplier = 1.0
         
     | 
| 
      
 255 
     | 
    
         
            +
             
     | 
| 
      
 256 
     | 
    
         
            +
                    if key_multiplier is not None:
         
     | 
| 
      
 257 
     | 
    
         
            +
                        self.key_multiplier = key_multiplier
         
     | 
| 
      
 258 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 259 
     | 
    
         
            +
                        self.key_multiplier = 1.0
         
     | 
| 
      
 260 
     | 
    
         
            +
             
     | 
| 
      
 261 
     | 
    
         
            +
                    if ssm_multipliers is not None:
         
     | 
| 
      
 262 
     | 
    
         
            +
                        self.ssm_multipliers = ssm_multipliers
         
     | 
| 
      
 263 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 264 
     | 
    
         
            +
                        self.ssm_multipliers = [1.0, 1.0, 1.0, 1.0, 1.0]
         
     | 
| 
      
 265 
     | 
    
         
            +
             
     | 
| 
      
 266 
     | 
    
         
            +
                    if ssm_in_multiplier is not None:
         
     | 
| 
      
 267 
     | 
    
         
            +
                        self.ssm_in_multiplier = ssm_in_multiplier
         
     | 
| 
      
 268 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 269 
     | 
    
         
            +
                        self.ssm_in_multiplier = 1.0
         
     | 
| 
      
 270 
     | 
    
         
            +
             
     | 
| 
      
 271 
     | 
    
         
            +
                    if ssm_out_multiplier is not None:
         
     | 
| 
      
 272 
     | 
    
         
            +
                        self.ssm_out_multiplier = ssm_out_multiplier
         
     | 
| 
      
 273 
     | 
    
         
            +
                    else:
         
     | 
| 
      
 274 
     | 
    
         
            +
                        self.ssm_out_multiplier = 1.0
         
     | 
| 
      
 275 
     | 
    
         
            +
             
     | 
| 
      
 276 
     | 
    
         
            +
                    super().__init__(
         
     | 
| 
      
 277 
     | 
    
         
            +
                        pad_token_id=pad_token_id,
         
     | 
| 
      
 278 
     | 
    
         
            +
                        bos_token_id=bos_token_id,
         
     | 
| 
      
 279 
     | 
    
         
            +
                        eos_token_id=eos_token_id,
         
     | 
| 
      
 280 
     | 
    
         
            +
                        tie_word_embeddings=tie_word_embeddings,
         
     | 
| 
      
 281 
     | 
    
         
            +
                        **kwargs,
         
     | 
| 
      
 282 
     | 
    
         
            +
                    )
         
     | 
| 
      
 283 
     | 
    
         
            +
             
     | 
| 
      
 284 
     | 
    
         
            +
                @property
         
     | 
| 
      
 285 
     | 
    
         
            +
                def layers_block_type(self):
         
     | 
| 
      
 286 
     | 
    
         
            +
                    return ["falcon_h1" for i in range(self.num_hidden_layers)]
         
     | 
| 
      
 287 
     | 
    
         
            +
             
     | 
| 
      
 288 
     | 
    
         
            +
                @property
         
     | 
| 
      
 289 
     | 
    
         
            +
                def full_attention_layer_ids(self):
         
     | 
| 
      
 290 
     | 
    
         
            +
                    # For Falcon-H1, we do have attention on all layers
         
     | 
| 
      
 291 
     | 
    
         
            +
                    return range(self.num_hidden_layers)
         
     | 
| 
      
 292 
     | 
    
         
            +
             
     | 
| 
      
 293 
     | 
    
         
            +
                @property
         
     | 
| 
      
 294 
     | 
    
         
            +
                def linear_layer_ids(self):
         
     | 
| 
      
 295 
     | 
    
         
            +
                    # For Falcon-H1, we do have mamba on all layers
         
     | 
| 
      
 296 
     | 
    
         
            +
                    return range(self.num_hidden_layers)
         
     | 
| 
      
 297 
     | 
    
         
            +
             
     | 
| 
      
 298 
     | 
    
         
            +
                @property
         
     | 
| 
      
 299 
     | 
    
         
            +
                def mamba2_cache_params(self):
         
     | 
| 
      
 300 
     | 
    
         
            +
                    shape = Mamba2StateShape.create(
         
     | 
| 
      
 301 
     | 
    
         
            +
                        tp_world_size=get_tensor_model_parallel_world_size(),
         
     | 
| 
      
 302 
     | 
    
         
            +
                        intermediate_size=self.mamba_intermediate,
         
     | 
| 
      
 303 
     | 
    
         
            +
                        n_groups=self.mamba_n_groups,
         
     | 
| 
      
 304 
     | 
    
         
            +
                        num_heads=self.mamba_n_heads,
         
     | 
| 
      
 305 
     | 
    
         
            +
                        head_dim=self.mamba_d_head,
         
     | 
| 
      
 306 
     | 
    
         
            +
                        state_size=self.mamba_d_state,
         
     | 
| 
      
 307 
     | 
    
         
            +
                        conv_kernel=self.mamba_d_conv,
         
     | 
| 
      
 308 
     | 
    
         
            +
                    )
         
     | 
| 
      
 309 
     | 
    
         
            +
                    return Mamba2CacheParams(shape=shape, layers=self.linear_layer_ids)
         
     | 
| 
         @@ -1,10 +1,12 @@ 
     | 
|
| 
       1 
1 
     | 
    
         
             
            # Adapted from https://github.com/vllm-project/vllm/blob/v0.6.4.post1/vllm/config.py
         
     | 
| 
       2 
2 
     | 
    
         
             
            import enum
         
     | 
| 
       3 
     | 
    
         
            -
            import json
         
     | 
| 
       4 
3 
     | 
    
         
             
            import logging
         
     | 
| 
       5 
4 
     | 
    
         
             
            from dataclasses import dataclass, field
         
     | 
| 
       6 
5 
     | 
    
         
             
            from typing import List, Optional, Union
         
     | 
| 
       7 
6 
     | 
    
         | 
| 
      
 7 
     | 
    
         
            +
            import orjson
         
     | 
| 
      
 8 
     | 
    
         
            +
             
     | 
| 
      
 9 
     | 
    
         
            +
            from sglang.srt.configs.modelopt_config import ModelOptConfig
         
     | 
| 
       8 
10 
     | 
    
         
             
            from sglang.srt.utils import is_hip
         
     | 
| 
       9 
11 
     | 
    
         | 
| 
       10 
12 
     | 
    
         
             
            logger = logging.getLogger(__name__)
         
     | 
| 
         @@ -24,6 +26,8 @@ class LoadFormat(str, enum.Enum): 
     | 
|
| 
       24 
26 
     | 
    
         
             
                JAX = "jax"
         
     | 
| 
       25 
27 
     | 
    
         
             
                REMOTE = "remote"
         
     | 
| 
       26 
28 
     | 
    
         
             
                REMOTE_INSTANCE = "remote_instance"
         
     | 
| 
      
 29 
     | 
    
         
            +
                RDMA = "rdma"
         
     | 
| 
      
 30 
     | 
    
         
            +
                LOCAL_CACHED = "local_cached"
         
     | 
| 
       27 
31 
     | 
    
         | 
| 
       28 
32 
     | 
    
         | 
| 
       29 
33 
     | 
    
         
             
            @dataclass
         
     | 
| 
         @@ -47,6 +51,12 @@ class LoadConfig: 
     | 
|
| 
       47 
51 
     | 
    
         
             
                    checkpoints.
         
     | 
| 
       48 
52 
     | 
    
         
             
                decryption_key_file: If set, decrypts the output files with a password read
         
     | 
| 
       49 
53 
     | 
    
         
             
                    from this file (after PBKDF2).
         
     | 
| 
      
 54 
     | 
    
         
            +
                decrypt_max_concurrency: The maximum number of concurrent processes to decrypt the safetensor files. -1 means no limit.
         
     | 
| 
      
 55 
     | 
    
         
            +
             
     | 
| 
      
 56 
     | 
    
         
            +
                # ModelOpt-specific loading options
         
     | 
| 
      
 57 
     | 
    
         
            +
                modelopt_checkpoint_restore_path: Optional[str] = None
         
     | 
| 
      
 58 
     | 
    
         
            +
                modelopt_checkpoint_save_path: Optional[str] = None
         
     | 
| 
      
 59 
     | 
    
         
            +
                modelopt_export_path: Optional[str] = None
         
     | 
| 
       50 
60 
     | 
    
         
             
                """
         
     | 
| 
       51 
61 
     | 
    
         | 
| 
       52 
62 
     | 
    
         
             
                load_format: Union[str, LoadFormat] = LoadFormat.AUTO
         
     | 
| 
         @@ -54,11 +64,24 @@ class LoadConfig: 
     | 
|
| 
       54 
64 
     | 
    
         
             
                model_loader_extra_config: Optional[Union[str, dict]] = field(default_factory=dict)
         
     | 
| 
       55 
65 
     | 
    
         
             
                ignore_patterns: Optional[Union[List[str], str]] = None
         
     | 
| 
       56 
66 
     | 
    
         
             
                decryption_key_file: Optional[str] = None
         
     | 
| 
      
 67 
     | 
    
         
            +
                decrypt_max_concurrency: int = -1
         
     | 
| 
      
 68 
     | 
    
         
            +
                tp_rank: Optional[int] = None
         
     | 
| 
      
 69 
     | 
    
         
            +
                remote_instance_weight_loader_seed_instance_ip: Optional[str] = None
         
     | 
| 
      
 70 
     | 
    
         
            +
                remote_instance_weight_loader_seed_instance_service_port: Optional[int] = None
         
     | 
| 
      
 71 
     | 
    
         
            +
                remote_instance_weight_loader_send_weights_group_ports: Optional[List[int]] = None
         
     | 
| 
      
 72 
     | 
    
         
            +
             
     | 
| 
      
 73 
     | 
    
         
            +
                # ModelOpt-specific loading options
         
     | 
| 
      
 74 
     | 
    
         
            +
                modelopt_checkpoint_restore_path: Optional[str] = None
         
     | 
| 
      
 75 
     | 
    
         
            +
                modelopt_checkpoint_save_path: Optional[str] = None
         
     | 
| 
      
 76 
     | 
    
         
            +
                modelopt_export_path: Optional[str] = None
         
     | 
| 
      
 77 
     | 
    
         
            +
             
     | 
| 
      
 78 
     | 
    
         
            +
                # ModelOpt configuration object
         
     | 
| 
      
 79 
     | 
    
         
            +
                modelopt_config: Optional[ModelOptConfig] = None
         
     | 
| 
       57 
80 
     | 
    
         | 
| 
       58 
81 
     | 
    
         
             
                def __post_init__(self):
         
     | 
| 
       59 
82 
     | 
    
         
             
                    model_loader_extra_config = self.model_loader_extra_config or {}
         
     | 
| 
       60 
83 
     | 
    
         
             
                    if isinstance(model_loader_extra_config, str):
         
     | 
| 
       61 
     | 
    
         
            -
                        self.model_loader_extra_config =  
     | 
| 
      
 84 
     | 
    
         
            +
                        self.model_loader_extra_config = orjson.loads(model_loader_extra_config)
         
     | 
| 
       62 
85 
     | 
    
         
             
                    self._verify_load_format()
         
     | 
| 
       63 
86 
     | 
    
         | 
| 
       64 
87 
     | 
    
         
             
                    if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
         
     | 
| 
         @@ -69,6 +92,14 @@ class LoadConfig: 
     | 
|
| 
       69 
92 
     | 
    
         
             
                    else:
         
     | 
| 
       70 
93 
     | 
    
         
             
                        self.ignore_patterns = ["original/**/*"]
         
     | 
| 
       71 
94 
     | 
    
         | 
| 
      
 95 
     | 
    
         
            +
                    # Create ModelOptConfig if not provided
         
     | 
| 
      
 96 
     | 
    
         
            +
                    if self.modelopt_config is None:
         
     | 
| 
      
 97 
     | 
    
         
            +
                        self.modelopt_config = ModelOptConfig(
         
     | 
| 
      
 98 
     | 
    
         
            +
                            checkpoint_restore_path=self.modelopt_checkpoint_restore_path,
         
     | 
| 
      
 99 
     | 
    
         
            +
                            checkpoint_save_path=self.modelopt_checkpoint_save_path,
         
     | 
| 
      
 100 
     | 
    
         
            +
                            export_path=self.modelopt_export_path,
         
     | 
| 
      
 101 
     | 
    
         
            +
                        )
         
     | 
| 
      
 102 
     | 
    
         
            +
             
     | 
| 
       72 
103 
     | 
    
         
             
                def _verify_load_format(self) -> None:
         
     | 
| 
       73 
104 
     | 
    
         
             
                    if not isinstance(self.load_format, str):
         
     | 
| 
       74 
105 
     | 
    
         
             
                        return
         
     | 
| 
         @@ -0,0 +1,117 @@ 
     | 
|
| 
      
 1 
     | 
    
         
            +
            # Copyright 2025 SGLang Team
         
     | 
| 
      
 2 
     | 
    
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 
      
 3 
     | 
    
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 
      
 4 
     | 
    
         
            +
            # You may obtain a copy of the License at
         
     | 
| 
      
 5 
     | 
    
         
            +
            #
         
     | 
| 
      
 6 
     | 
    
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 
      
 7 
     | 
    
         
            +
            #
         
     | 
| 
      
 8 
     | 
    
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 
      
 9 
     | 
    
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 
      
 10 
     | 
    
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 
      
 11 
     | 
    
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 
      
 12 
     | 
    
         
            +
            # limitations under the License.
         
     | 
| 
      
 13 
     | 
    
         
            +
            """Common config utils for mamba2 - NemotronH, FalconH1, Qwen3Next, etc."""
         
     | 
| 
      
 14 
     | 
    
         
            +
             
     | 
| 
      
 15 
     | 
    
         
            +
            import os
         
     | 
| 
      
 16 
     | 
    
         
            +
            from dataclasses import dataclass, field
         
     | 
| 
      
 17 
     | 
    
         
            +
             
     | 
| 
      
 18 
     | 
    
         
            +
            import numpy as np
         
     | 
| 
      
 19 
     | 
    
         
            +
            import torch
         
     | 
| 
      
 20 
     | 
    
         
            +
             
     | 
| 
      
 21 
     | 
    
         
            +
            from sglang.srt.distributed.utils import divide
         
     | 
| 
      
 22 
     | 
    
         
            +
             
     | 
| 
      
 23 
     | 
    
         
            +
             
     | 
| 
      
 24 
     | 
    
         
            +
            def extra_groups_for_head_shards(ngroups: int, tp_size: int):
         
     | 
| 
      
 25 
     | 
    
         
            +
                """Compute the increase in group numbers to account for
         
     | 
| 
      
 26 
     | 
    
         
            +
                replication in order to accompany the head shards."""
         
     | 
| 
      
 27 
     | 
    
         
            +
             
     | 
| 
      
 28 
     | 
    
         
            +
                # in the case ngoups % tp_size == 0, this will be zero
         
     | 
| 
      
 29 
     | 
    
         
            +
                if ngroups % tp_size == 0:
         
     | 
| 
      
 30 
     | 
    
         
            +
                    return 0
         
     | 
| 
      
 31 
     | 
    
         
            +
             
     | 
| 
      
 32 
     | 
    
         
            +
                # for n_groups == 1, this is exactly tp_size - n_groups
         
     | 
| 
      
 33 
     | 
    
         
            +
                return tp_size - ngroups
         
     | 
| 
      
 34 
     | 
    
         
            +
             
     | 
| 
      
 35 
     | 
    
         
            +
             
     | 
| 
      
 36 
     | 
    
         
            +
            @dataclass(kw_only=True, frozen=True)
         
     | 
| 
      
 37 
     | 
    
         
            +
            class Mamba2StateShape:
         
     | 
| 
      
 38 
     | 
    
         
            +
                conv: tuple[int, int]
         
     | 
| 
      
 39 
     | 
    
         
            +
                temporal: tuple[int, int, int]
         
     | 
| 
      
 40 
     | 
    
         
            +
             
     | 
| 
      
 41 
     | 
    
         
            +
                intermediate_size: int
         
     | 
| 
      
 42 
     | 
    
         
            +
                conv_dim: int
         
     | 
| 
      
 43 
     | 
    
         
            +
                ssm_state_size: int
         
     | 
| 
      
 44 
     | 
    
         
            +
                num_heads: int
         
     | 
| 
      
 45 
     | 
    
         
            +
                head_dim: int
         
     | 
| 
      
 46 
     | 
    
         
            +
                state_size: int
         
     | 
| 
      
 47 
     | 
    
         
            +
                conv_kernel: int
         
     | 
| 
      
 48 
     | 
    
         
            +
             
     | 
| 
      
 49 
     | 
    
         
            +
                @staticmethod
         
     | 
| 
      
 50 
     | 
    
         
            +
                def create(
         
     | 
| 
      
 51 
     | 
    
         
            +
                    *,
         
     | 
| 
      
 52 
     | 
    
         
            +
                    tp_world_size: int,
         
     | 
| 
      
 53 
     | 
    
         
            +
                    intermediate_size: int,
         
     | 
| 
      
 54 
     | 
    
         
            +
                    n_groups: int,
         
     | 
| 
      
 55 
     | 
    
         
            +
                    num_heads: int,
         
     | 
| 
      
 56 
     | 
    
         
            +
                    head_dim: int,
         
     | 
| 
      
 57 
     | 
    
         
            +
                    state_size: int,
         
     | 
| 
      
 58 
     | 
    
         
            +
                    conv_kernel: int,
         
     | 
| 
      
 59 
     | 
    
         
            +
                ) -> "Mamba2StateShape":
         
     | 
| 
      
 60 
     | 
    
         
            +
                    # if n_groups is not divisible by world_size, need to extend the shards
         
     | 
| 
      
 61 
     | 
    
         
            +
                    # to ensure all groups needed by a head is sharded along with it
         
     | 
| 
      
 62 
     | 
    
         
            +
                    if n_groups % tp_world_size != 0:
         
     | 
| 
      
 63 
     | 
    
         
            +
                        extra_groups = extra_groups_for_head_shards(n_groups, tp_world_size)
         
     | 
| 
      
 64 
     | 
    
         
            +
                        n_groups += extra_groups
         
     | 
| 
      
 65 
     | 
    
         
            +
                    # heads and n_groups are TP-ed
         
     | 
| 
      
 66 
     | 
    
         
            +
                    conv_dim = intermediate_size + 2 * n_groups * state_size
         
     | 
| 
      
 67 
     | 
    
         
            +
             
     | 
| 
      
 68 
     | 
    
         
            +
                    # contiguous along 'dim' axis
         
     | 
| 
      
 69 
     | 
    
         
            +
                    conv_state_shape = divide(conv_dim, tp_world_size), conv_kernel - 1
         
     | 
| 
      
 70 
     | 
    
         
            +
             
     | 
| 
      
 71 
     | 
    
         
            +
                    # These are not TP-ed as they depend on A, dt_bias, D
         
     | 
| 
      
 72 
     | 
    
         
            +
                    # - they are typically small
         
     | 
| 
      
 73 
     | 
    
         
            +
                    #   e.g., QWen3-Next: (32, 128, 128)
         
     | 
| 
      
 74 
     | 
    
         
            +
                    temporal_state_shape = (divide(num_heads, tp_world_size), head_dim, state_size)
         
     | 
| 
      
 75 
     | 
    
         
            +
                    return Mamba2StateShape(
         
     | 
| 
      
 76 
     | 
    
         
            +
                        conv=conv_state_shape,
         
     | 
| 
      
 77 
     | 
    
         
            +
                        temporal=temporal_state_shape,
         
     | 
| 
      
 78 
     | 
    
         
            +
                        intermediate_size=intermediate_size,
         
     | 
| 
      
 79 
     | 
    
         
            +
                        conv_dim=conv_dim,
         
     | 
| 
      
 80 
     | 
    
         
            +
                        ssm_state_size=state_size,
         
     | 
| 
      
 81 
     | 
    
         
            +
                        num_heads=num_heads,
         
     | 
| 
      
 82 
     | 
    
         
            +
                        head_dim=head_dim,
         
     | 
| 
      
 83 
     | 
    
         
            +
                        state_size=state_size,
         
     | 
| 
      
 84 
     | 
    
         
            +
                        conv_kernel=conv_kernel,
         
     | 
| 
      
 85 
     | 
    
         
            +
                    )
         
     | 
| 
      
 86 
     | 
    
         
            +
             
     | 
| 
      
 87 
     | 
    
         
            +
             
     | 
| 
      
 88 
     | 
    
         
            +
            @dataclass(kw_only=True, frozen=True)
         
     | 
| 
      
 89 
     | 
    
         
            +
            class Mamba2StateDType:
         
     | 
| 
      
 90 
     | 
    
         
            +
                conv: torch.dtype
         
     | 
| 
      
 91 
     | 
    
         
            +
                temporal: torch.dtype
         
     | 
| 
      
 92 
     | 
    
         
            +
             
     | 
| 
      
 93 
     | 
    
         
            +
             
     | 
| 
      
 94 
     | 
    
         
            +
            CONV_DTYPE = torch.bfloat16
         
     | 
| 
      
 95 
     | 
    
         
            +
             
     | 
| 
      
 96 
     | 
    
         
            +
             
     | 
| 
      
 97 
     | 
    
         
            +
            def mamba2_state_dtype() -> Mamba2StateDType:
         
     | 
| 
      
 98 
     | 
    
         
            +
                dtype_map = {
         
     | 
| 
      
 99 
     | 
    
         
            +
                    "float32": torch.float32,
         
     | 
| 
      
 100 
     | 
    
         
            +
                    "bfloat16": torch.bfloat16,
         
     | 
| 
      
 101 
     | 
    
         
            +
                }
         
     | 
| 
      
 102 
     | 
    
         
            +
                ssm_dtype = dtype_map[os.environ["SGLANG_MAMBA_SSM_DTYPE"]]
         
     | 
| 
      
 103 
     | 
    
         
            +
                return Mamba2StateDType(conv=CONV_DTYPE, temporal=ssm_dtype)
         
     | 
| 
      
 104 
     | 
    
         
            +
             
     | 
| 
      
 105 
     | 
    
         
            +
             
     | 
| 
      
 106 
     | 
    
         
            +
            @dataclass(kw_only=True, frozen=True)
         
     | 
| 
      
 107 
     | 
    
         
            +
            class Mamba2CacheParams:
         
     | 
| 
      
 108 
     | 
    
         
            +
                shape: Mamba2StateShape
         
     | 
| 
      
 109 
     | 
    
         
            +
                dtype: Mamba2StateDType = field(default_factory=mamba2_state_dtype)
         
     | 
| 
      
 110 
     | 
    
         
            +
                layers: list[int]
         
     | 
| 
      
 111 
     | 
    
         
            +
             
     | 
| 
      
 112 
     | 
    
         
            +
                @property
         
     | 
| 
      
 113 
     | 
    
         
            +
                def mamba_cache_per_req(self) -> int:
         
     | 
| 
      
 114 
     | 
    
         
            +
                    return (
         
     | 
| 
      
 115 
     | 
    
         
            +
                        int(np.prod(self.shape.conv)) * self.dtype.conv.itemsize
         
     | 
| 
      
 116 
     | 
    
         
            +
                        + int(np.prod(self.shape.temporal)) * self.dtype.temporal.itemsize
         
     | 
| 
      
 117 
     | 
    
         
            +
                    ) * len(self.layers)
         
     |