sglang 0.5.1__tar.gz → 0.5.1.post2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sglang-0.5.1/sglang.egg-info → sglang-0.5.1.post2}/PKG-INFO +4 -4
- {sglang-0.5.1 → sglang-0.5.1.post2}/README.md +1 -1
- {sglang-0.5.1 → sglang-0.5.1.post2}/pyproject.toml +3 -3
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/decode.py +4 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/prefill.py +4 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/engine.py +1 -1
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/tool.py +7 -7
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/flashinfer_mla_backend.py +71 -89
- sglang-0.5.1.post2/sglang/srt/layers/attention/utils.py +99 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/cutlass_moe.py +0 -7
- sglang-0.5.1.post2/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +6 -2
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/modelopt_quant.py +2 -2
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/lora_manager.py +29 -12
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler_metrics_mixin.py +15 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/metrics/collector.py +5 -5
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_executor/cuda_graph_runner.py +2 -2
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/grok.py +0 -4
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/offloader.py +115 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/server_args.py +0 -4
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/utils.py +0 -7
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_cutlass_moe.py +33 -28
- sglang-0.5.1.post2/sglang/version.py +1 -0
- {sglang-0.5.1 → sglang-0.5.1.post2/sglang.egg-info}/PKG-INFO +4 -4
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang.egg-info/SOURCES.txt +1 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang.egg-info/requires.txt +2 -2
- sglang-0.5.1/sglang/srt/layers/attention/utils.py +0 -178
- sglang-0.5.1/sglang/version.py +0 -1
- {sglang-0.5.1 → sglang-0.5.1.post2}/LICENSE +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/setup.cfg +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/bench_offline_throughput.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/bench_one_batch.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/bench_one_batch_server.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/bench_serving.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/check_env.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/compile_deep_gemm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/eval/llama3_eval.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/eval/loogle_eval.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/global_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/api.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/backend/anthropic.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/backend/base_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/backend/litellm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/backend/openai.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/backend/runtime_endpoint.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/backend/vertexai.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/chat_template.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/choices.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/compiler.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/interpreter.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/ir.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/lang/tracer.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/launch_server.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/profiler.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/_custom_ops.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/aio_rwlock.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/bench_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/code_completion_parser.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/chatglm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/dbrx.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/deepseekvl2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/device_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/exaone.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/internvl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/janus_pro.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/kimi_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/kimi_vl_moonvit.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/load_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/model_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/step3_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/update_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/configs/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/base_connector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/redis.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/s3.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/serde/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/serde/safe_serde.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/serde/serde.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/connector/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constants.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/base_grammar_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/llguidance_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/outlines_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/outlines_jump_forward.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/reasoner_grammar_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/triton_ops/bitmask_ops.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/constrained/xgrammar_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/conversation.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/custom_op.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/debug_utils/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/debug_utils/dump_comparator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/debug_utils/dumper.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/debug_utils/text_comparator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/ascend/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/ascend/conn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/ascend/transfer_engine.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/base/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/base/conn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/common/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/common/conn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/common/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/decode_schedule_batch_mixin.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/fake/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/fake/conn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/kv_events.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/launch_lb.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/mini_lb.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/mooncake/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/mooncake/conn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/mooncake/transfer_engine.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/nixl/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/nixl/conn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/disaggregation/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/communication_op.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/cuda_wrapper.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/custom_all_reduce.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/hpu_communicator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/npu_communicator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/pymscclpp.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/pynccl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/pynccl_allocator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/pynccl_wrapper.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/quick_all_reduce.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/shm_broadcast.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/device_communicators/xpu_communicator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/naive_distributed.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/parallel_state.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/distributed/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/EngineBase.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/context.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/harmony_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/http_server.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/http_server_engine.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/protocol.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_base.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_chat.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_completions.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_embedding.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_rerank.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_responses.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/serving_score.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/tool_server.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/usage_processor.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/entrypoints/openai/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/eplb_algorithms/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/eplb_algorithms/deepseek_vec.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/eplb_manager.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/eplb_simulator/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/eplb_simulator/reader.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/expert_distribution.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/expert_location.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/expert_location_dispatch.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/eplb/expert_location_updater.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/base_format_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/core_types.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/deepseekv3_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/ebnf_composer.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/function_call_parser.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/glm4_moe_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/gpt_oss_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/kimik2_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/llama32_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/mistral_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/pythonic_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/qwen25_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/qwen3_coder_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/step3_detector.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/function_call/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/hf_transformers_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/host_shared_memory.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/jinja_template_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/activation.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/amx_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/aiter_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/ascend_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/base_attn_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/cutlass_mla_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/double_sparsity_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/dual_chunk_flashattention_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/flashattention_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/flashinfer_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/flashmla_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/hybrid_attn_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/intel_amx_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/merge_state.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/tbo_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/torch_native_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_ops/decode_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_ops/extend_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_ops/merge_state.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_ops/prefill_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/triton_ops/rocm_mla_decode_rope.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/trtllm_mha_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/trtllm_mla_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/vision.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/vision_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/wave_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/wave_ops/decode_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/wave_ops/extend_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/attention/wave_ops/prefill_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/communicator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/dp_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/elementwise.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/flashinfer_comm_fusion.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/layernorm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/linear.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/logits_processor.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/cutlass_moe_params.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/cutlass_w4a8_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/ep_moe/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/ep_moe/kernels.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/ep_moe/layer.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_native.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=129,N=352,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=160,N=320,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=161,N=192,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_0/E=16,N=1024,device_name=NVIDIA_B200.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=352,device_name=NVIDIA_RTX_6000_Ada_Generation,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=768,device_name=NVIDIA_H20.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=320,device_name=NVIDIA_H20-3e.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=385,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=385,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=768,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=352,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Max-Q_Workstation_Edition,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=129,N=704,device_name=NVIDIA_B200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=161,N=384,device_name=NVIDIA_RTX_PRO_6000_Blackwell_Max-Q_Workstation_Edition,dtype=fp8_w8a8.json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=384,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/layer.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/moe_runner/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/moe_runner/base.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/rocm_moe_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/router.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/token_dispatcher/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/token_dispatcher/base_dispatcher.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/token_dispatcher/deepep.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/token_dispatcher/standard.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/topk.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/moe/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/multimodal.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/parameter.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/pooler.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/awq.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/awq_triton.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/base_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/blockwise_int8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/compressed_tensors/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/deep_gemm_wrapper/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/fp8_kernel.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/fp8_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/fpgemm_fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/gptq.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/int8_kernel.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/int8_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/kv_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/marlin_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/marlin_utils_fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/moe_wna16.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/mxfp4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/mxfp4_tensor.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/petit.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/petit_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/qoq.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/quark.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/quark_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/schemes/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/schemes/quark_scheme.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/quark/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/unquant.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/w4afp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/w8a8_fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/quantization/w8a8_int8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/radix_attention.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/rotary_embedding.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/sampler.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/torchao_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/layers/vocab_parallel_embedding.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/backend/base_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/backend/triton_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/layers.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/lora.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/lora_config.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/lora_registry.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/mem_pool.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/triton_ops/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/triton_ops/gate_up_lora_b.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/triton_ops/qkv_lora_b.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/triton_ops/sgemm_lora_a.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/triton_ops/sgemm_lora_b.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/lora/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/cache_controller.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/configure_logging.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/data_parallel_controller.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/detokenizer_manager.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/io_struct.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/mm_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/multimodal_processor.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/schedule_batch.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/schedule_policy.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler_input_blocker.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler_output_processor_mixin.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler_profiler_mixin.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler_recv_skipper.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/scheduler_update_weights_mixin.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/session_controller.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/template_manager.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/tokenizer_manager.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/tp_worker.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/tp_worker_overlap_thread.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/managers/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/allocator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/allocator_ascend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/base_prefix_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/chunk_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/cpp_radix_tree/radix_tree.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/flush_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/hicache_storage.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/hiradix_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/lora_radix_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/memory_pool.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/memory_pool_host.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/multimodal_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/radix_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/radix_cache_cpp.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/hf3fs/client_hf3fs.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/hf3fs/hf3fs_utils.cpp +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/hf3fs/mini_3fs_metadata_server.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/hf3fs/test_hf3fs_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/mooncake_store/unit_test.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/nixl/nixl_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/mem_cache/swa_radix_cache.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/metrics/func_timer.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_executor/forward_batch_info.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_executor/model_runner.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_executor/npu_graph_runner.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_loader/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_loader/loader.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_loader/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_loader/weight_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/model_parallel.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/arcee.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/baichuan.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/bailing_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/bert.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/chatglm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/clip.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/commandr.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/dbrx.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/deepseek.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/deepseek_janus_pro.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/deepseek_nextn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/deepseek_v2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/deepseek_vl2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/ernie4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/ernie4_eagle.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/exaone.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma2_reward.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma3_causal.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma3_mm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma3n_audio.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma3n_causal.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gemma3n_mm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/glm4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/glm4_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/glm4_moe_nextn.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/glm4v.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/glm4v_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gpt2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gpt_bigcode.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/gpt_oss.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/granite.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/granitemoe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/hunyuan.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/idefics2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/internlm2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/internlm2_reward.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/interns1.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/internvl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/kimi_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/kimi_vl_moonvit.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama_classification.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama_eagle.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama_eagle3.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama_embedding.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llama_reward.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llava.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/llavavid.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mimo.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mimo_mtp.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/minicpm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/minicpm3.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/minicpmo.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/minicpmv.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mistral.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mixtral.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mixtral_quant.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mllama.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/mllama4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/nemotron_nas.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/olmo.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/olmo2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/olmoe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/persimmon.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/phi.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/phi3_small.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/phi4mm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/phi4mm_audio.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/phi4mm_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/phimoe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/pixtral.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_5_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_audio.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_classification.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_eagle.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_rm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen2_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen3.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen3_classification.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/qwen3_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/registry.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/roberta.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/siglip.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/stablelm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/step3_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/torch_native_llama.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/transformers.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/vila.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/xverse.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/xverse_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/models/yivl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/mm_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/base_processor.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/clip.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/gemma3.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/gemma3n.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/glm4v.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/internvl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/janus_pro.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/kimi_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/llava.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/minicpm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/mlama.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/mllama4.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/phi4mm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/pixtral.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/qwen_audio.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/qwen_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/step3_vl.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/multimodal/processors/vila.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/operations.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/operations_strategy.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/patch_torch.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/poll_based_barrier.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/reasoning_parser.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/custom_logit_processor.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/penaltylib/frequency_penalty.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/penaltylib/min_new_tokens.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/penaltylib/presence_penalty.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/sampling_batch_info.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/sampling/sampling_params.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/speculative/build_eagle_tree.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/speculative/eagle_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/speculative/eagle_worker.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/speculative/spec_info.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/tokenizer/tiktoken_tokenizer.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/torch_memory_saver_adapter.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/two_batch_overlap.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/warmup.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/weight_sync/tensor_bucket.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/srt/weight_sync/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/attention/__init__.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/attention/test_flashattn_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/attention/test_flashattn_mla_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/attention/test_prefix_chunk_info.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/attention/test_trtllm_mla_backend.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/doc_patch.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/few_shot_gsm8k.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/few_shot_gsm8k_engine.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/run_eval.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/runners.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/send_one.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/simple_eval_common.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/simple_eval_gpqa.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/simple_eval_humaneval.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/simple_eval_math.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/simple_eval_mgsm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/simple_eval_mmlu.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_activation.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_block_fp8.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_block_fp8_ep.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_custom_ops.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_cutlass_w4a8_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_deepep_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_dynamic_grad_mode.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_fp4_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_layernorm.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_marlin_moe.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_marlin_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_programs.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/test/test_utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang/utils.py +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang.egg-info/dependency_links.txt +0 -0
- {sglang-0.5.1 → sglang-0.5.1.post2}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: sglang
|
3
|
-
Version: 0.5.1
|
3
|
+
Version: 0.5.1.post2
|
4
4
|
Summary: SGLang is yet another fast serving framework for large language models and vision language models.
|
5
5
|
License: Apache License
|
6
6
|
Version 2.0, January 2004
|
@@ -262,7 +262,7 @@ Requires-Dist: torch==2.8.0; extra == "srt"
|
|
262
262
|
Requires-Dist: torchaudio==2.8.0; extra == "srt"
|
263
263
|
Requires-Dist: torchvision; extra == "srt"
|
264
264
|
Requires-Dist: cuda-python; extra == "srt"
|
265
|
-
Requires-Dist: flashinfer_python==0.2.
|
265
|
+
Requires-Dist: flashinfer_python==0.2.14.post1; extra == "srt"
|
266
266
|
Provides-Extra: blackwell
|
267
267
|
Requires-Dist: sglang[runtime_common]; extra == "blackwell"
|
268
268
|
Requires-Dist: sgl-kernel; extra == "blackwell"
|
@@ -270,7 +270,7 @@ Requires-Dist: torch==2.8.0; extra == "blackwell"
|
|
270
270
|
Requires-Dist: torchaudio==2.8.0; extra == "blackwell"
|
271
271
|
Requires-Dist: torchvision; extra == "blackwell"
|
272
272
|
Requires-Dist: cuda-python; extra == "blackwell"
|
273
|
-
Requires-Dist: flashinfer_python==0.2.
|
273
|
+
Requires-Dist: flashinfer_python==0.2.14.post1; extra == "blackwell"
|
274
274
|
Provides-Extra: srt-hip
|
275
275
|
Requires-Dist: sglang[runtime_common]; extra == "srt-hip"
|
276
276
|
Requires-Dist: torch; extra == "srt-hip"
|
@@ -374,7 +374,7 @@ Dynamic: license-file
|
|
374
374
|
| [**Slides**](https://github.com/sgl-project/sgl-learning-materials?tab=readme-ov-file#slides) |
|
375
375
|
|
376
376
|
## News
|
377
|
-
- [2025/08] 🔔 SGLang x AMD SF Meetup on 8/22: Hands-on GPU workshop, tech talks by AMD/xAI/SGLang, and networking. [
|
377
|
+
- [2025/08] 🔔 SGLang x AMD SF Meetup on 8/22: Hands-on GPU workshop, tech talks by AMD/xAI/SGLang, and networking ([Roadmap](https://github.com/sgl-project/sgl-learning-materials/blob/main/slides/amd_meetup_sglang_roadmap.pdf), [Large-scale EP](https://github.com/sgl-project/sgl-learning-materials/blob/main/slides/amd_meetup_sglang_ep.pdf)).
|
378
378
|
- [2025/08] 🔥 SGLang provides day-0 support for OpenAI gpt-oss model ([instructions](https://github.com/sgl-project/sglang/issues/8833))
|
379
379
|
- [2025/06] 🔥 SGLang, the high-performance serving infrastructure powering trillions of tokens daily, has been awarded the third batch of the Open Source AI Grant by a16z ([a16z blog](https://a16z.com/advancing-open-source-ai-through-benchmarks-and-bold-experimentation/)).
|
380
380
|
- [2025/06] 🔥 Deploying DeepSeek on GB200 NVL72 with PD and Large Scale EP (Part I): 2.7x Higher Decoding Throughput ([blog](https://lmsys.org/blog/2025-06-16-gb200-part-1/)).
|
@@ -20,7 +20,7 @@
|
|
20
20
|
| [**Slides**](https://github.com/sgl-project/sgl-learning-materials?tab=readme-ov-file#slides) |
|
21
21
|
|
22
22
|
## News
|
23
|
-
- [2025/08] 🔔 SGLang x AMD SF Meetup on 8/22: Hands-on GPU workshop, tech talks by AMD/xAI/SGLang, and networking. [
|
23
|
+
- [2025/08] 🔔 SGLang x AMD SF Meetup on 8/22: Hands-on GPU workshop, tech talks by AMD/xAI/SGLang, and networking ([Roadmap](https://github.com/sgl-project/sgl-learning-materials/blob/main/slides/amd_meetup_sglang_roadmap.pdf), [Large-scale EP](https://github.com/sgl-project/sgl-learning-materials/blob/main/slides/amd_meetup_sglang_ep.pdf)).
|
24
24
|
- [2025/08] 🔥 SGLang provides day-0 support for OpenAI gpt-oss model ([instructions](https://github.com/sgl-project/sglang/issues/8833))
|
25
25
|
- [2025/06] 🔥 SGLang, the high-performance serving infrastructure powering trillions of tokens daily, has been awarded the third batch of the Open Source AI Grant by a16z ([a16z blog](https://a16z.com/advancing-open-source-ai-through-benchmarks-and-bold-experimentation/)).
|
26
26
|
- [2025/06] 🔥 Deploying DeepSeek on GB200 NVL72 with PD and Large Scale EP (Part I): 2.7x Higher Decoding Throughput ([blog](https://lmsys.org/blog/2025-06-16-gb200-part-1/)).
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "sglang"
|
7
|
-
version = "0.5.1"
|
7
|
+
version = "0.5.1.post2"
|
8
8
|
description = "SGLang is yet another fast serving framework for large language models and vision language models."
|
9
9
|
readme = "README.md"
|
10
10
|
requires-python = ">=3.10"
|
@@ -63,7 +63,7 @@ srt = [
|
|
63
63
|
"torchaudio==2.8.0",
|
64
64
|
"torchvision",
|
65
65
|
"cuda-python",
|
66
|
-
"flashinfer_python==0.2.
|
66
|
+
"flashinfer_python==0.2.14.post1",
|
67
67
|
]
|
68
68
|
|
69
69
|
blackwell = [
|
@@ -73,7 +73,7 @@ blackwell = [
|
|
73
73
|
"torchaudio==2.8.0",
|
74
74
|
"torchvision",
|
75
75
|
"cuda-python",
|
76
|
-
"flashinfer_python==0.2.
|
76
|
+
"flashinfer_python==0.2.14.post1",
|
77
77
|
]
|
78
78
|
|
79
79
|
# HIP (Heterogeneous-computing Interface for Portability) for AMD
|
@@ -334,6 +334,8 @@ class DecodePreallocQueue:
|
|
334
334
|
error_message,
|
335
335
|
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
|
336
336
|
)
|
337
|
+
if self.scheduler.enable_metrics:
|
338
|
+
self.scheduler.metrics_collector.increment_bootstrap_failed_reqs()
|
337
339
|
else:
|
338
340
|
raise ValueError(f"Unexpected poll case: {poll}")
|
339
341
|
|
@@ -595,6 +597,8 @@ class DecodeTransferQueue:
|
|
595
597
|
# unlock the kv cache or it will have memory leak
|
596
598
|
self.tree_cache.cache_finished_req(decode_req.req)
|
597
599
|
indices_to_remove.add(i)
|
600
|
+
if self.scheduler.enable_metrics:
|
601
|
+
self.scheduler.metrics_collector.increment_transfer_failed_reqs()
|
598
602
|
continue
|
599
603
|
elif poll == KVPoll.Success:
|
600
604
|
|
@@ -238,6 +238,8 @@ class PrefillBootstrapQueue:
|
|
238
238
|
self.scheduler.stream_output([req], req.return_logprob)
|
239
239
|
indices_to_remove.add(i)
|
240
240
|
failed_reqs.append(req)
|
241
|
+
if self.scheduler.enable_metrics:
|
242
|
+
self.scheduler.metrics_collector.increment_bootstrap_failed_reqs()
|
241
243
|
continue
|
242
244
|
|
243
245
|
# KV.WaitingForInput - init here
|
@@ -522,6 +524,8 @@ class SchedulerDisaggregationPrefillMixin:
|
|
522
524
|
req, error_message, status_code=HTTPStatus.INTERNAL_SERVER_ERROR
|
523
525
|
)
|
524
526
|
done_reqs.append(req)
|
527
|
+
if self.enable_metrics:
|
528
|
+
self.metrics_collector.increment_transfer_failed_reqs()
|
525
529
|
else:
|
526
530
|
assert False, f"Unexpected polling state {poll=}"
|
527
531
|
|
@@ -672,7 +672,7 @@ def _set_envs_and_config(server_args: ServerArgs):
|
|
672
672
|
if server_args.attention_backend == "flashinfer":
|
673
673
|
assert_pkg_version(
|
674
674
|
"flashinfer_python",
|
675
|
-
"0.2.
|
675
|
+
"0.2.14.post1",
|
676
676
|
"Please uninstall the old version and "
|
677
677
|
"reinstall the latest version by following the instructions "
|
678
678
|
"at https://docs.flashinfer.ai/installation.html.",
|
@@ -4,6 +4,8 @@ import os
|
|
4
4
|
from abc import ABC, abstractmethod
|
5
5
|
from typing import TYPE_CHECKING, Any
|
6
6
|
|
7
|
+
from sglang.srt.utils import print_info_once, print_warning_once
|
8
|
+
|
7
9
|
if TYPE_CHECKING:
|
8
10
|
# Avoid circular import.
|
9
11
|
from sglang.srt.entrypoints.context import ConversationContext
|
@@ -25,7 +27,7 @@ class HarmonyBrowserTool(Tool):
|
|
25
27
|
exa_api_key = os.getenv("EXA_API_KEY")
|
26
28
|
if not exa_api_key:
|
27
29
|
self.enabled = False
|
28
|
-
|
30
|
+
print_warning_once("EXA_API_KEY is not set, browsing is disabled")
|
29
31
|
return
|
30
32
|
|
31
33
|
try:
|
@@ -33,12 +35,12 @@ class HarmonyBrowserTool(Tool):
|
|
33
35
|
from gpt_oss.tools.simple_browser.backend import ExaBackend
|
34
36
|
except ImportError:
|
35
37
|
self.enabled = False
|
36
|
-
|
38
|
+
print_warning_once("gpt_oss is not installed, browsing is disabled")
|
37
39
|
return
|
38
40
|
|
39
41
|
browser_backend = ExaBackend(source="web", api_key=exa_api_key)
|
40
42
|
self.browser_tool = SimpleBrowserTool(backend=browser_backend)
|
41
|
-
|
43
|
+
print_info_once("Browser tool initialized")
|
42
44
|
|
43
45
|
async def get_result(self, context: "ConversationContext") -> Any:
|
44
46
|
from sglang.srt.entrypoints.context import HarmonyContext
|
@@ -64,13 +66,11 @@ class HarmonyPythonTool(Tool):
|
|
64
66
|
from gpt_oss.tools.python_docker.docker_tool import PythonTool
|
65
67
|
except ImportError:
|
66
68
|
self.enabled = False
|
67
|
-
|
68
|
-
"gpt_oss is not installed, code interpreter is disabled"
|
69
|
-
)
|
69
|
+
print_warning_once("gpt_oss is not installed, code interpreter is disabled")
|
70
70
|
return
|
71
71
|
|
72
72
|
self.python_tool = PythonTool()
|
73
|
-
|
73
|
+
print_info_once("Code interpreter tool initialized")
|
74
74
|
|
75
75
|
async def get_result(self, context: "ConversationContext") -> Any:
|
76
76
|
from sglang.srt.entrypoints.context import HarmonyContext
|
@@ -24,7 +24,9 @@ if os.environ["SGLANG_ENABLE_TORCH_COMPILE"] == "1":
|
|
24
24
|
|
25
25
|
from sglang.global_config import global_config
|
26
26
|
from sglang.srt.layers.attention.base_attn_backend import AttentionBackend
|
27
|
-
from sglang.srt.layers.attention.
|
27
|
+
from sglang.srt.layers.attention.flashinfer_backend import (
|
28
|
+
create_flashinfer_kv_indices_triton,
|
29
|
+
)
|
28
30
|
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
29
31
|
from sglang.srt.layers.utils import is_sm100_supported
|
30
32
|
from sglang.srt.managers.schedule_batch import global_server_args_dict
|
@@ -179,6 +181,7 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
179
181
|
q_indptr_decode_buf: Optional[torch.Tensor] = None,
|
180
182
|
):
|
181
183
|
super().__init__()
|
184
|
+
|
182
185
|
# Parse constants
|
183
186
|
self.max_context_len = model_runner.model_config.context_len
|
184
187
|
self.device = model_runner.device
|
@@ -210,25 +213,15 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
210
213
|
else:
|
211
214
|
self.kv_indptr = kv_indptr_buf
|
212
215
|
|
213
|
-
self.kv_indices = torch.empty(
|
214
|
-
(max_bs * (self.max_context_len + self.page_size - 1) // self.page_size,),
|
215
|
-
dtype=torch.int32,
|
216
|
-
device=model_runner.device,
|
217
|
-
)
|
218
|
-
|
219
216
|
if not self.skip_prefill:
|
220
217
|
self.qo_indptr = torch.zeros(
|
221
218
|
(max_bs + 1,), dtype=torch.int32, device=model_runner.device
|
222
219
|
)
|
223
220
|
|
224
221
|
if q_indptr_decode_buf is None:
|
225
|
-
# A hack to pre-initialize large batch size for dp attention
|
226
|
-
if model_runner.server_args.enable_dp_attention:
|
227
|
-
max_bs = model_runner.server_args.dp_size * max_bs
|
228
222
|
self.q_indptr_decode = torch.arange(
|
229
223
|
0, max_bs + 1, dtype=torch.int32, device=model_runner.device
|
230
224
|
)
|
231
|
-
|
232
225
|
else:
|
233
226
|
self.q_indptr_decode = q_indptr_decode_buf
|
234
227
|
|
@@ -273,7 +266,6 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
273
266
|
self.prefill_cuda_graph_metadata = {} # For verify
|
274
267
|
|
275
268
|
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
276
|
-
|
277
269
|
if forward_batch.forward_mode.is_decode_or_idle():
|
278
270
|
self.indices_updater_decode.update(
|
279
271
|
forward_batch.req_pool_indices,
|
@@ -331,9 +323,16 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
331
323
|
max_num_tokens: int,
|
332
324
|
kv_indices_buf: Optional[torch.Tensor] = None,
|
333
325
|
):
|
334
|
-
|
335
|
-
|
336
|
-
|
326
|
+
if kv_indices_buf is None:
|
327
|
+
cuda_graph_kv_indices = torch.zeros(
|
328
|
+
(max_bs * self.max_context_len,),
|
329
|
+
dtype=torch.int32,
|
330
|
+
device="cuda",
|
331
|
+
)
|
332
|
+
else:
|
333
|
+
cuda_graph_kv_indices = kv_indices_buf
|
334
|
+
|
335
|
+
self.cuda_graph_kv_indices = cuda_graph_kv_indices
|
337
336
|
self.cuda_graph_qo_indptr = self.q_indptr_decode.clone()
|
338
337
|
self.cuda_graph_kv_indptr = self.kv_indptr.clone()
|
339
338
|
self.cuda_graph_kv_lens = torch.ones(
|
@@ -359,7 +358,6 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
359
358
|
forward_mode: ForwardMode,
|
360
359
|
spec_info: Optional[SpecInfo],
|
361
360
|
):
|
362
|
-
|
363
361
|
if forward_mode.is_decode_or_idle():
|
364
362
|
decode_wrapper = BatchMLAPagedAttentionWrapper(
|
365
363
|
self.workspace_buffer,
|
@@ -370,6 +368,7 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
370
368
|
kv_len_arr=self.cuda_graph_kv_lens[:num_tokens],
|
371
369
|
backend="auto",
|
372
370
|
)
|
371
|
+
|
373
372
|
seq_lens_sum = seq_lens.sum().item()
|
374
373
|
self.indices_updater_decode.update(
|
375
374
|
req_pool_indices,
|
@@ -440,13 +439,11 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
440
439
|
spec_info: Optional[SpecInfo],
|
441
440
|
seq_lens_cpu: Optional[torch.Tensor],
|
442
441
|
):
|
443
|
-
|
444
442
|
if forward_mode.is_decode_or_idle():
|
445
443
|
assert seq_lens_cpu is not None
|
446
444
|
kv_len_arr_cpu = seq_lens_cpu[:bs]
|
447
|
-
num_pages_per_req = (seq_lens_cpu + self.page_size - 1) // self.page_size
|
448
445
|
self.cuda_graph_kv_indptr_cpu[1 : bs + 1] = torch.cumsum(
|
449
|
-
|
446
|
+
kv_len_arr_cpu, dim=0
|
450
447
|
)
|
451
448
|
self.fast_decode_kwargs.update(
|
452
449
|
{
|
@@ -455,6 +452,7 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
455
452
|
"kv_len_arr_cpu": kv_len_arr_cpu,
|
456
453
|
}
|
457
454
|
)
|
455
|
+
|
458
456
|
self.indices_updater_decode.update(
|
459
457
|
req_pool_indices[:bs],
|
460
458
|
seq_lens[:bs],
|
@@ -534,6 +532,7 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
534
532
|
q_rope = q_rope.view(
|
535
533
|
-1, layer.tp_q_head_num, layer.head_dim - layer.v_head_dim
|
536
534
|
)
|
535
|
+
|
537
536
|
if self.forward_metadata.use_ragged:
|
538
537
|
# ragged prefill
|
539
538
|
if q_rope is not None:
|
@@ -554,8 +553,6 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
554
553
|
k_buf = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id).to(
|
555
554
|
q.dtype
|
556
555
|
)
|
557
|
-
k_buf = k_buf.view(-1, self.page_size, k_buf.shape[-1])
|
558
|
-
|
559
556
|
if q_rope is None:
|
560
557
|
qall = q.view(-1, layer.tp_q_head_num, layer.head_dim)
|
561
558
|
q, q_rope = (
|
@@ -617,17 +614,17 @@ class FlashInferMLAAttnBackend(AttentionBackend):
|
|
617
614
|
q_nope = reshaped_q[:, :, : layer.v_head_dim]
|
618
615
|
q_rope = reshaped_q[:, :, layer.v_head_dim :]
|
619
616
|
|
620
|
-
|
617
|
+
k_buffer = forward_batch.token_to_kv_pool.get_key_buffer(layer.layer_id).to(
|
621
618
|
q.dtype
|
622
619
|
)
|
623
|
-
k_buf = k_buf.view(-1, self.page_size, k_buf.shape[-1])
|
624
620
|
|
625
621
|
o = q_nope.new_empty(q_nope.shape)
|
622
|
+
# Direct call to run without the wrapper
|
626
623
|
o = decode_wrapper.run(
|
627
624
|
q_nope,
|
628
625
|
q_rope,
|
629
|
-
|
630
|
-
|
626
|
+
k_buffer[:, :, : layer.v_head_dim],
|
627
|
+
k_buffer[:, :, layer.v_head_dim :],
|
631
628
|
out=o,
|
632
629
|
)
|
633
630
|
|
@@ -646,10 +643,9 @@ class FlashInferMLAIndicesUpdaterDecode:
|
|
646
643
|
self.scaling = model_runner.model_config.scaling
|
647
644
|
self.data_type = model_runner.dtype
|
648
645
|
self.attn_backend = attn_backend
|
649
|
-
|
646
|
+
|
650
647
|
# Buffers and wrappers
|
651
648
|
self.kv_indptr = attn_backend.kv_indptr
|
652
|
-
self.kv_indices = attn_backend.kv_indices
|
653
649
|
self.req_to_token = model_runner.req_to_token_pool.req_to_token
|
654
650
|
self.q_indptr = attn_backend.q_indptr_decode
|
655
651
|
|
@@ -693,17 +689,13 @@ class FlashInferMLAIndicesUpdaterDecode:
|
|
693
689
|
kv_lens = paged_kernel_lens.to(torch.int32)
|
694
690
|
sm_scale = self.scaling
|
695
691
|
if spec_info is None:
|
696
|
-
|
697
|
-
paged_kernel_lens + self.page_size - 1
|
698
|
-
) // self.page_size
|
699
|
-
kv_indptr[1 : bs + 1] = torch.cumsum(num_pages_per_req, dim=0)
|
692
|
+
kv_indptr[1 : bs + 1] = torch.cumsum(paged_kernel_lens, dim=0)
|
700
693
|
kv_indptr = kv_indptr[: bs + 1]
|
701
694
|
kv_indices = (
|
702
|
-
|
695
|
+
torch.empty(paged_kernel_lens_sum, dtype=torch.int32, device="cuda")
|
703
696
|
if not init_metadata_replay
|
704
697
|
else fast_decode_kwargs["kv_indices"]
|
705
698
|
)
|
706
|
-
|
707
699
|
create_flashinfer_kv_indices_triton[(bs,)](
|
708
700
|
self.req_to_token,
|
709
701
|
req_pool_indices,
|
@@ -712,40 +704,39 @@ class FlashInferMLAIndicesUpdaterDecode:
|
|
712
704
|
None,
|
713
705
|
kv_indices,
|
714
706
|
self.req_to_token.shape[1],
|
715
|
-
self.page_size,
|
716
707
|
)
|
717
708
|
else:
|
718
709
|
kv_indptr, kv_indices = spec_info.kv_indptr, spec_info.kv_indices
|
719
710
|
|
720
711
|
if not init_metadata_replay:
|
721
712
|
wrapper.plan(
|
722
|
-
|
723
|
-
kv_indptr
|
724
|
-
kv_indices
|
725
|
-
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
sm_scale
|
732
|
-
|
733
|
-
|
713
|
+
q_indptr,
|
714
|
+
kv_indptr,
|
715
|
+
kv_indices,
|
716
|
+
kv_lens,
|
717
|
+
self.num_local_heads,
|
718
|
+
self.kv_lora_rank,
|
719
|
+
self.qk_rope_head_dim,
|
720
|
+
1,
|
721
|
+
False,
|
722
|
+
sm_scale,
|
723
|
+
self.data_type,
|
724
|
+
self.data_type,
|
734
725
|
)
|
735
726
|
else:
|
736
727
|
wrapper.plan(
|
737
|
-
|
738
|
-
|
739
|
-
kv_indices
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
|
745
|
-
|
746
|
-
sm_scale
|
747
|
-
|
748
|
-
|
728
|
+
fast_decode_kwargs["qo_indptr_cpu"],
|
729
|
+
fast_decode_kwargs["kv_indptr_cpu"],
|
730
|
+
kv_indices,
|
731
|
+
fast_decode_kwargs["kv_len_arr_cpu"],
|
732
|
+
self.num_local_heads,
|
733
|
+
self.kv_lora_rank,
|
734
|
+
self.qk_rope_head_dim,
|
735
|
+
1,
|
736
|
+
False,
|
737
|
+
sm_scale,
|
738
|
+
self.data_type,
|
739
|
+
self.data_type,
|
749
740
|
)
|
750
741
|
|
751
742
|
|
@@ -767,14 +758,12 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
767
758
|
# Buffers and wrappers
|
768
759
|
self.kv_indptr = attn_backend.kv_indptr
|
769
760
|
self.qo_indptr = attn_backend.qo_indptr
|
770
|
-
self.kv_indices = attn_backend.kv_indices
|
771
761
|
self.req_to_token = model_runner.req_to_token_pool.req_to_token
|
772
762
|
self.prefill_wrapper_ragged = attn_backend.prefill_wrapper_ragged
|
773
|
-
self.page_size = model_runner.page_size
|
774
763
|
|
775
764
|
def update(
|
776
765
|
self,
|
777
|
-
req_pool_indices: torch.
|
766
|
+
req_pool_indices: torch.Tnesor,
|
778
767
|
seq_lens: torch.Tensor,
|
779
768
|
seq_lens_sum: int,
|
780
769
|
prefix_lens: torch.Tensor,
|
@@ -788,6 +777,7 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
788
777
|
else:
|
789
778
|
paged_kernel_lens = seq_lens
|
790
779
|
paged_kernel_lens_sum = seq_lens_sum
|
780
|
+
|
791
781
|
self.call_begin_forward(
|
792
782
|
self.prefill_wrapper_ragged,
|
793
783
|
prefill_wrapper_paged,
|
@@ -821,12 +811,13 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
821
811
|
|
822
812
|
if spec_info is None:
|
823
813
|
assert len(seq_lens) == len(req_pool_indices)
|
824
|
-
|
825
|
-
paged_kernel_lens + self.page_size - 1
|
826
|
-
) // self.page_size
|
827
|
-
kv_indptr[1 : bs + 1] = torch.cumsum(num_pages_per_req, dim=0)
|
814
|
+
kv_indptr[1 : bs + 1] = torch.cumsum(paged_kernel_lens, dim=0)
|
828
815
|
kv_indptr = kv_indptr[: bs + 1]
|
829
|
-
kv_indices =
|
816
|
+
kv_indices = torch.empty(
|
817
|
+
paged_kernel_lens_sum,
|
818
|
+
dtype=torch.int32,
|
819
|
+
device=req_pool_indices.device,
|
820
|
+
)
|
830
821
|
create_flashinfer_kv_indices_triton[(bs,)](
|
831
822
|
self.req_to_token,
|
832
823
|
req_pool_indices,
|
@@ -835,7 +826,6 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
835
826
|
None,
|
836
827
|
kv_indices,
|
837
828
|
self.req_to_token.shape[1],
|
838
|
-
self.page_size,
|
839
829
|
)
|
840
830
|
qo_indptr[1 : bs + 1] = torch.cumsum(seq_lens - prefix_lens, dim=0)
|
841
831
|
qo_indptr = qo_indptr[: bs + 1]
|
@@ -853,6 +843,7 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
853
843
|
self.req_to_token,
|
854
844
|
)
|
855
845
|
)
|
846
|
+
|
856
847
|
if use_ragged:
|
857
848
|
# ragged prefill
|
858
849
|
wrapper_ragged.begin_forward(
|
@@ -867,26 +858,20 @@ class FlashInferMLAIndicesUpdaterPrefill:
|
|
867
858
|
)
|
868
859
|
else:
|
869
860
|
# mla paged prefill
|
870
|
-
|
871
|
-
assert (
|
872
|
-
self.page_size == 1
|
873
|
-
), "Only page_size=1 is supported for flashinfer backend with speculative decoding"
|
874
|
-
kv_lens = kv_indptr[1:] - kv_indptr[:-1]
|
875
|
-
else:
|
876
|
-
kv_lens = paged_kernel_lens.to(torch.int32)
|
861
|
+
kv_len_arr = kv_indptr[1:] - kv_indptr[:-1]
|
877
862
|
wrapper_paged.plan(
|
878
|
-
qo_indptr
|
879
|
-
kv_indptr
|
880
|
-
kv_indices
|
881
|
-
kv_len_arr
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
887
|
-
sm_scale
|
888
|
-
|
889
|
-
|
863
|
+
qo_indptr,
|
864
|
+
kv_indptr,
|
865
|
+
kv_indices,
|
866
|
+
kv_len_arr,
|
867
|
+
self.num_local_heads,
|
868
|
+
self.kv_lora_rank,
|
869
|
+
self.qk_rope_head_dim,
|
870
|
+
1,
|
871
|
+
True,
|
872
|
+
sm_scale,
|
873
|
+
self.q_data_type,
|
874
|
+
self.data_type,
|
890
875
|
)
|
891
876
|
|
892
877
|
|
@@ -981,7 +966,6 @@ class FlashInferMLAMultiStepDraftBackend:
|
|
981
966
|
call_fn(i, forward_batch)
|
982
967
|
|
983
968
|
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
984
|
-
|
985
969
|
kv_indices = torch.zeros(
|
986
970
|
(
|
987
971
|
self.speculative_num_steps,
|
@@ -1017,7 +1001,6 @@ class FlashInferMLAMultiStepDraftBackend:
|
|
1017
1001
|
)
|
1018
1002
|
|
1019
1003
|
def init_forward_metadata_capture_cuda_graph(self, forward_batch: ForwardBatch):
|
1020
|
-
|
1021
1004
|
def call_fn(i, forward_batch):
|
1022
1005
|
self.attn_backends[i].init_forward_metadata_capture_cuda_graph(
|
1023
1006
|
forward_batch.batch_size,
|
@@ -1034,7 +1017,6 @@ class FlashInferMLAMultiStepDraftBackend:
|
|
1034
1017
|
def init_forward_metadata_replay_cuda_graph(
|
1035
1018
|
self, forward_batch: ForwardBatch, bs: int
|
1036
1019
|
):
|
1037
|
-
|
1038
1020
|
def call_fn(i, forward_batch):
|
1039
1021
|
self.attn_backends[i].init_forward_metadata_replay_cuda_graph(
|
1040
1022
|
bs,
|
@@ -0,0 +1,99 @@
|
|
1
|
+
import triton
|
2
|
+
import triton.language as tl
|
3
|
+
|
4
|
+
# Keep this in sync with the Triton kernel inside `create_flashmla_kv_indices_triton`.
|
5
|
+
# Number of pages that the kernel writes per iteration.
|
6
|
+
# Exposed here so other Python modules can import it instead of hard-coding 64.
|
7
|
+
TRITON_PAD_NUM_PAGE_PER_BLOCK = 64
|
8
|
+
|
9
|
+
|
10
|
+
@triton.jit
|
11
|
+
def create_flashinfer_kv_indices_triton(
|
12
|
+
req_to_token_ptr, # [max_batch, max_context_len]
|
13
|
+
req_pool_indices_ptr,
|
14
|
+
page_kernel_lens_ptr,
|
15
|
+
kv_indptr,
|
16
|
+
kv_start_idx,
|
17
|
+
kv_indices_ptr,
|
18
|
+
req_to_token_ptr_stride: tl.constexpr,
|
19
|
+
):
|
20
|
+
BLOCK_SIZE: tl.constexpr = 512
|
21
|
+
pid = tl.program_id(axis=0)
|
22
|
+
|
23
|
+
# find the req pool idx, this is for batch to token
|
24
|
+
req_pool_index = tl.load(req_pool_indices_ptr + pid)
|
25
|
+
kv_indices_offset = tl.load(kv_indptr + pid)
|
26
|
+
|
27
|
+
kv_start = 0
|
28
|
+
kv_end = 0
|
29
|
+
if kv_start_idx:
|
30
|
+
kv_start = tl.load(kv_start_idx + pid).to(tl.int32)
|
31
|
+
kv_end = kv_start
|
32
|
+
kv_end += tl.load(page_kernel_lens_ptr + pid).to(tl.int32)
|
33
|
+
|
34
|
+
num_loop = tl.cdiv(kv_end - kv_start, BLOCK_SIZE)
|
35
|
+
for i in range(num_loop):
|
36
|
+
# index into req_to_token_ptr needs to be int64
|
37
|
+
offset = tl.arange(0, BLOCK_SIZE).to(tl.int64) + i * BLOCK_SIZE
|
38
|
+
mask = offset < kv_end - kv_start
|
39
|
+
data = tl.load(
|
40
|
+
req_to_token_ptr
|
41
|
+
+ req_pool_index * req_to_token_ptr_stride
|
42
|
+
+ kv_start
|
43
|
+
+ offset,
|
44
|
+
mask=mask,
|
45
|
+
)
|
46
|
+
tl.store(kv_indices_ptr + kv_indices_offset + offset, data, mask=mask)
|
47
|
+
|
48
|
+
|
49
|
+
@triton.jit
|
50
|
+
def create_flashmla_kv_indices_triton(
|
51
|
+
req_to_token_ptr, # [max_batch, max_context_len]
|
52
|
+
req_pool_indices_ptr,
|
53
|
+
page_kernel_lens_ptr,
|
54
|
+
kv_start_idx,
|
55
|
+
kv_indices_ptr,
|
56
|
+
req_to_token_ptr_stride: tl.constexpr,
|
57
|
+
kv_indices_ptr_stride: tl.constexpr,
|
58
|
+
NUM_PAGE_PER_BLOCK: tl.constexpr = TRITON_PAD_NUM_PAGE_PER_BLOCK,
|
59
|
+
PAGED_SIZE: tl.constexpr = 64,
|
60
|
+
):
|
61
|
+
BLOCK_SIZE: tl.constexpr = 4096
|
62
|
+
pid = tl.program_id(axis=0)
|
63
|
+
|
64
|
+
# find the req pool idx, this is for batch to token
|
65
|
+
req_pool_index = tl.load(req_pool_indices_ptr + pid)
|
66
|
+
|
67
|
+
kv_start = 0
|
68
|
+
kv_end = 0
|
69
|
+
if kv_start_idx:
|
70
|
+
kv_start = tl.load(kv_start_idx + pid).to(tl.int32)
|
71
|
+
kv_end = kv_start
|
72
|
+
|
73
|
+
kv_end += tl.load(page_kernel_lens_ptr + pid).to(tl.int32)
|
74
|
+
|
75
|
+
num_paged = tl.cdiv(kv_end - kv_start, PAGED_SIZE)
|
76
|
+
num_pages_loop = tl.cdiv(kv_end - kv_start, BLOCK_SIZE)
|
77
|
+
|
78
|
+
for i in range(num_pages_loop):
|
79
|
+
# index into req_to_token_ptr needs to be int64
|
80
|
+
paged_offset = (
|
81
|
+
tl.arange(0, NUM_PAGE_PER_BLOCK).to(tl.int64) + i * NUM_PAGE_PER_BLOCK
|
82
|
+
) * PAGED_SIZE
|
83
|
+
paged_offset_out = tl.arange(0, NUM_PAGE_PER_BLOCK) + i * NUM_PAGE_PER_BLOCK
|
84
|
+
|
85
|
+
mask = paged_offset < num_paged * PAGED_SIZE
|
86
|
+
mask_out = paged_offset_out < num_paged
|
87
|
+
|
88
|
+
data = tl.load(
|
89
|
+
req_to_token_ptr
|
90
|
+
+ req_pool_index * req_to_token_ptr_stride
|
91
|
+
+ kv_start
|
92
|
+
+ paged_offset,
|
93
|
+
mask=mask,
|
94
|
+
)
|
95
|
+
tl.store(
|
96
|
+
kv_indices_ptr + pid * kv_indices_ptr_stride + paged_offset_out,
|
97
|
+
data // PAGED_SIZE,
|
98
|
+
mask=mask_out,
|
99
|
+
)
|
@@ -157,10 +157,6 @@ def cutlass_fused_experts_fp8(
|
|
157
157
|
rep_a_q = shuffle_rows(a_q, a_map, (m * topk, k))
|
158
158
|
rep_a1_scales = shuffle_rows(a1_scale, a_map, (m * topk, int(k / 128)))
|
159
159
|
|
160
|
-
if not is_sm100_supported():
|
161
|
-
rep_a1_scales = per_group_transpose(rep_a1_scales, expert_offsets)
|
162
|
-
w1_scale = w1_scale.contiguous()
|
163
|
-
|
164
160
|
c1 = torch.empty((m * topk, n * 2), device=device, dtype=out_dtype)
|
165
161
|
c2 = torch.empty((m * topk, k), device=device, dtype=out_dtype)
|
166
162
|
|
@@ -192,9 +188,6 @@ def cutlass_fused_experts_fp8(
|
|
192
188
|
silu_and_mul(c1, intermediate)
|
193
189
|
|
194
190
|
intemediate_q, a2_scale = sglang_per_token_group_quant_fp8(intermediate, 128)
|
195
|
-
if not is_sm100_supported():
|
196
|
-
a2_scale = per_group_transpose(a2_scale, expert_offsets)
|
197
|
-
w2_scale = w2_scale.contiguous()
|
198
191
|
|
199
192
|
fp8_blockwise_scaled_grouped_mm(
|
200
193
|
c2,
|