sglang 0.4.4.post2__tar.gz → 0.4.4.post3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sglang-0.4.4.post2/sglang.egg-info → sglang-0.4.4.post3}/PKG-INFO +8 -4
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/pyproject.toml +14 -5
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/bench_serving.py +23 -3
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/deepseekvl2.py +10 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/model_config.py +5 -16
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/custom_all_reduce.py +1 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/parallel_state.py +32 -5
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/entrypoints/http_server.py +7 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/entrypoints/verl_engine.py +2 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/function_call_parser.py +0 -1
- sglang-0.4.4.post3/sglang/srt/layers/attention/flashattention_backend.py +434 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/dp_attention.py +12 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/topk.py +30 -3
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/__init__.py +134 -165
- sglang-0.4.4.post3/sglang/srt/layers/quantization/awq.py +200 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/fp8_kernel.py +2 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/gptq.py +30 -40
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/w8a8_fp8.py +1 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/rotary_embedding.py +12 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/backend/base_backend.py +4 -4
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/backend/flashinfer_backend.py +12 -9
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/backend/triton_backend.py +5 -8
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/layers.py +19 -33
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/lora_manager.py +20 -7
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/mem_pool.py +12 -6
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/triton_ops/gate_up_lora_b.py +10 -4
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/triton_ops/qkv_lora_b.py +8 -3
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/triton_ops/sgemm_lora_a.py +16 -5
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/triton_ops/sgemm_lora_b.py +11 -6
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/utils.py +6 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/io_struct.py +4 -2
- sglang-0.4.4.post3/sglang/srt/managers/multimodal_processors/clip.py +63 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/schedule_batch.py +1 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/scheduler.py +25 -19
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/tokenizer_manager.py +0 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/tp_worker.py +3 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_executor/cuda_graph_runner.py +9 -8
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_executor/model_runner.py +9 -6
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_loader/loader.py +11 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_loader/weight_utils.py +6 -3
- sglang-0.4.4.post3/sglang/srt/models/clip.py +563 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/deepseek_janus_pro.py +2 -2
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/deepseek_v2.py +151 -26
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gemma3_causal.py +12 -2
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gemma3_mm.py +6 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/openai_api/adapter.py +88 -87
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/openai_api/protocol.py +10 -5
- sglang-0.4.4.post3/sglang/srt/patch_torch.py +71 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/server_args.py +21 -11
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/speculative/eagle_worker.py +1 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/utils.py +33 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/runners.py +27 -2
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_utils.py +1 -1
- sglang-0.4.4.post3/sglang/version.py +1 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3/sglang.egg-info}/PKG-INFO +8 -4
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang.egg-info/SOURCES.txt +4 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang.egg-info/requires.txt +6 -3
- sglang-0.4.4.post2/sglang/srt/layers/attention/flashattention_backend.py +0 -295
- sglang-0.4.4.post2/sglang/version.py +0 -1
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/LICENSE +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/README.md +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/setup.cfg +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/api.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/bench_offline_throughput.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/bench_one_batch.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/bench_one_batch_server.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/check_env.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/global_config.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/anthropic.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/base_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/litellm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/openai.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/runtime_endpoint.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/backend/vertexai.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/chat_template.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/choices.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/compiler.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/interpreter.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/ir.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/lang/tracer.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/launch_server.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/llama3_eval.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/_custom_ops.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/aio_rwlock.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/code_completion_parser.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/chatglm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/dbrx.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/device_config.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/exaone.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/janus_pro.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/load_config.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/configs/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/base_connector.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/redis.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/s3.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/serde/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/serde/safe_serde.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/serde/serde.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/connector/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/constrained/base_grammar_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/constrained/llguidance_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/constrained/outlines_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/constrained/outlines_jump_forward.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/constrained/xgrammar_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/conversation.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/custom_op.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/disaggregation/conn.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/disaggregation/decode.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/disaggregation/mini_lb.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/disaggregation/prefill.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/disaggregation/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/communication_op.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/cuda_wrapper.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/hpu_communicator.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/pynccl.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/pynccl_wrapper.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/shm_broadcast.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/device_communicators/xpu_communicator.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/distributed/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/entrypoints/engine.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/hf_transformers_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/activation.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/base_attn_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/double_sparsity_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/flashinfer_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/flashinfer_mla_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/flashmla_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/torch_native_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/triton_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/triton_ops/decode_attention.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/triton_ops/extend_attention.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/triton_ops/prefill_attention.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/triton_ops/rocm_mla_decode_rope.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/attention/vision.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/elementwise.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/layernorm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/linear.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/logits_processor.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/ep_moe/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/ep_moe/kernels.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/ep_moe/layer.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/ep_moe/token_dispatcher.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_native.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Radeon_Graphics.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/fused_moe_triton/layer.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/moe/router.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/parameter.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/pooler.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/base_config.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/blockwise_int8.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/compressed_tensors/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/fp8.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/fp8_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/int8_kernel.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/int8_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/kv_cache.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/modelopt_quant.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/quantization/w8a8_int8.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/radix_attention.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/sampler.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/torchao_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/layers/vocab_parallel_embedding.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/backend/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/lora.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/lora_config.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/lora/triton_ops/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/cache_controller.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/configure_logging.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/data_parallel_controller.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/detokenizer_manager.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/expert_distribution.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/mm_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processor.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/base_processor.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/deepseek_vl_v2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/gemma3.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/janus_pro.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/llava.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/minicpm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/mlama.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/multimodal_processors/qwen_vl.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/schedule_policy.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/scheduler_output_processor_mixin.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/session_controller.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/tp_worker_overlap_thread.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/managers/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/base_prefix_cache.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/chunk_cache.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/flush_cache.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/hiradix_cache.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/memory_pool.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/paged_allocator.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mem_cache/radix_cache.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/metrics/collector.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/metrics/func_timer.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/mm_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_executor/forward_batch_info.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_loader/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_loader/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/model_parallel.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/baichuan.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/chatglm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/commandr.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/dbrx.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/deepseek.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/deepseek_nextn.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/deepseek_vl2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/exaone.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gemma.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gemma2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gemma2_reward.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gpt2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/gpt_bigcode.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/granite.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/grok.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/internlm2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/internlm2_reward.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llama.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llama_classification.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llama_eagle.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llama_eagle3.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llama_embedding.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llama_reward.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llava.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/llavavid.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/minicpm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/minicpm3.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/minicpmo.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/minicpmv.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/mistral.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/mixtral.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/mixtral_quant.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/mllama.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/olmo.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/olmo2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/olmoe.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/phi3_small.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2_5_vl.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2_classification.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2_eagle.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2_moe.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2_rm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/qwen2_vl.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/registry.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/stablelm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/torch_native_llama.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/xverse.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/xverse_moe.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/models/yivl.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/reasoning_parser.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/custom_logit_processor.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/penaltylib/frequency_penalty.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/penaltylib/min_new_tokens.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/penaltylib/presence_penalty.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/sampling_batch_info.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/sampling/sampling_params.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/server.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/speculative/build_eagle_tree.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/speculative/eagle_utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/speculative/spec_info.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/torch_memory_saver_adapter.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/srt/warmup.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/attention/__init__.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/attention/test_flashattn_backend.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/few_shot_gsm8k.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/few_shot_gsm8k_engine.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/run_eval.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/send_one.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/simple_eval_common.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/simple_eval_gpqa.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/simple_eval_humaneval.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/simple_eval_math.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/simple_eval_mgsm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/simple_eval_mmlu.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_activation.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_block_fp8.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_block_fp8_ep.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_custom_ops.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_dynamic_grad_mode.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_layernorm.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/test/test_programs.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang/utils.py +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang.egg-info/dependency_links.txt +0 -0
- {sglang-0.4.4.post2 → sglang-0.4.4.post3}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: sglang
|
3
|
-
Version: 0.4.4.
|
3
|
+
Version: 0.4.4.post3
|
4
4
|
Summary: SGLang is yet another fast serving framework for large language models and vision language models.
|
5
5
|
License: Apache License
|
6
6
|
Version 2.0, January 2004
|
@@ -218,6 +218,7 @@ Requires-Dist: numpy
|
|
218
218
|
Requires-Dist: IPython
|
219
219
|
Requires-Dist: setproctitle
|
220
220
|
Provides-Extra: runtime-common
|
221
|
+
Requires-Dist: compressed-tensors; extra == "runtime-common"
|
221
222
|
Requires-Dist: datasets; extra == "runtime-common"
|
222
223
|
Requires-Dist: decord; extra == "runtime-common"
|
223
224
|
Requires-Dist: fastapi; extra == "runtime-common"
|
@@ -240,14 +241,17 @@ Requires-Dist: torchao>=0.7.0; extra == "runtime-common"
|
|
240
241
|
Requires-Dist: transformers==4.50.0; extra == "runtime-common"
|
241
242
|
Requires-Dist: uvicorn; extra == "runtime-common"
|
242
243
|
Requires-Dist: uvloop; extra == "runtime-common"
|
243
|
-
Requires-Dist:
|
244
|
+
Requires-Dist: compressed-tensors; extra == "runtime-common"
|
245
|
+
Requires-Dist: xgrammar==0.1.17; extra == "runtime-common"
|
244
246
|
Provides-Extra: srt
|
245
247
|
Requires-Dist: sglang[runtime_common]; extra == "srt"
|
246
|
-
Requires-Dist: sgl-kernel==0.0.5.
|
248
|
+
Requires-Dist: sgl-kernel==0.0.5.post4; extra == "srt"
|
247
249
|
Requires-Dist: flashinfer_python==0.2.3; extra == "srt"
|
248
250
|
Requires-Dist: torch==2.5.1; extra == "srt"
|
249
251
|
Requires-Dist: cuda-python; extra == "srt"
|
250
252
|
Requires-Dist: outlines<=0.1.11,>=0.0.44; extra == "srt"
|
253
|
+
Requires-Dist: partial_json_parser; extra == "srt"
|
254
|
+
Requires-Dist: einops; extra == "srt"
|
251
255
|
Provides-Extra: srt-hip
|
252
256
|
Requires-Dist: sglang[runtime_common]; extra == "srt-hip"
|
253
257
|
Requires-Dist: torch; extra == "srt-hip"
|
@@ -271,7 +275,7 @@ Requires-Dist: anthropic>=0.20.0; extra == "anthropic"
|
|
271
275
|
Provides-Extra: litellm
|
272
276
|
Requires-Dist: litellm>=1.0.0; extra == "litellm"
|
273
277
|
Provides-Extra: torch-memory-saver
|
274
|
-
Requires-Dist: torch_memory_saver>=0.0.
|
278
|
+
Requires-Dist: torch_memory_saver>=0.0.4; extra == "torch-memory-saver"
|
275
279
|
Provides-Extra: test
|
276
280
|
Requires-Dist: jsonlines; extra == "test"
|
277
281
|
Requires-Dist: matplotlib; extra == "test"
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "sglang"
|
7
|
-
version = "0.4.4.
|
7
|
+
version = "0.4.4.post3"
|
8
8
|
description = "SGLang is yet another fast serving framework for large language models and vision language models."
|
9
9
|
readme = "README.md"
|
10
10
|
requires-python = ">=3.8"
|
@@ -17,6 +17,7 @@ dependencies = ["aiohttp", "requests", "tqdm", "numpy", "IPython", "setproctitle
|
|
17
17
|
|
18
18
|
[project.optional-dependencies]
|
19
19
|
runtime_common = [
|
20
|
+
"compressed-tensors",
|
20
21
|
"datasets",
|
21
22
|
"decord",
|
22
23
|
"fastapi",
|
@@ -39,21 +40,29 @@ runtime_common = [
|
|
39
40
|
"transformers==4.50.0",
|
40
41
|
"uvicorn",
|
41
42
|
"uvloop",
|
42
|
-
"
|
43
|
+
"compressed-tensors",
|
44
|
+
"xgrammar==0.1.17",
|
43
45
|
]
|
44
46
|
|
45
47
|
srt = [
|
46
48
|
"sglang[runtime_common]",
|
47
|
-
"sgl-kernel==0.0.5.
|
49
|
+
"sgl-kernel==0.0.5.post4",
|
48
50
|
"flashinfer_python==0.2.3",
|
49
51
|
"torch==2.5.1",
|
50
52
|
"cuda-python",
|
51
53
|
"outlines>=0.0.44,<=0.1.11",
|
54
|
+
"partial_json_parser",
|
55
|
+
"einops",
|
52
56
|
]
|
53
57
|
|
54
58
|
# HIP (Heterogeneous-computing Interface for Portability) for AMD
|
55
59
|
# => base docker rocm/vllm-dev:20250114, not from public vllm whl
|
56
|
-
srt_hip = [
|
60
|
+
srt_hip = [
|
61
|
+
"sglang[runtime_common]",
|
62
|
+
"torch",
|
63
|
+
"vllm==0.6.7.dev2",
|
64
|
+
"outlines==0.1.11"
|
65
|
+
]
|
57
66
|
|
58
67
|
# xpu is not enabled in public vllm and torch whl,
|
59
68
|
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
|
@@ -71,7 +80,7 @@ srt_cpu = ["sglang[runtime_common]", "outlines>=0.0.44,<=0.1.11", "torch"]
|
|
71
80
|
openai = ["openai>=1.0", "tiktoken"]
|
72
81
|
anthropic = ["anthropic>=0.20.0"]
|
73
82
|
litellm = ["litellm>=1.0.0"]
|
74
|
-
torch_memory_saver = ["torch_memory_saver>=0.0.
|
83
|
+
torch_memory_saver = ["torch_memory_saver>=0.0.4"]
|
75
84
|
test = [
|
76
85
|
"jsonlines",
|
77
86
|
"matplotlib",
|
@@ -965,7 +965,7 @@ async def benchmark(
|
|
965
965
|
request_rate: float,
|
966
966
|
max_concurrency: Optional[int],
|
967
967
|
disable_tqdm: bool,
|
968
|
-
|
968
|
+
lora_names: List[str],
|
969
969
|
extra_request_body: Dict[str, Any],
|
970
970
|
profile: bool,
|
971
971
|
pd_seperated: bool = False,
|
@@ -988,6 +988,11 @@ async def benchmark(
|
|
988
988
|
# Warmup
|
989
989
|
print("Starting initial single prompt test run...")
|
990
990
|
test_prompt, test_prompt_len, test_output_len = input_requests[0]
|
991
|
+
if lora_names != None and len(lora_names) != 0:
|
992
|
+
lora_name = lora_names[0]
|
993
|
+
else:
|
994
|
+
lora_name = None
|
995
|
+
|
991
996
|
test_input = RequestFuncInput(
|
992
997
|
model=model_id,
|
993
998
|
prompt=test_prompt,
|
@@ -1028,6 +1033,12 @@ async def benchmark(
|
|
1028
1033
|
tasks: List[asyncio.Task] = []
|
1029
1034
|
async for request in get_request(input_requests, request_rate):
|
1030
1035
|
prompt, prompt_len, output_len = request
|
1036
|
+
if lora_names != None and len(lora_names) != 0:
|
1037
|
+
idx = random.randint(0, len(lora_names) - 1)
|
1038
|
+
lora_name = lora_names[idx]
|
1039
|
+
else:
|
1040
|
+
lora_name = None
|
1041
|
+
|
1031
1042
|
request_func_input = RequestFuncInput(
|
1032
1043
|
model=model_id,
|
1033
1044
|
prompt=prompt,
|
@@ -1347,7 +1358,7 @@ def run_benchmark(args_: argparse.Namespace):
|
|
1347
1358
|
request_rate=args.request_rate,
|
1348
1359
|
max_concurrency=args.max_concurrency,
|
1349
1360
|
disable_tqdm=args.disable_tqdm,
|
1350
|
-
|
1361
|
+
lora_names=args.lora_name,
|
1351
1362
|
extra_request_body=extra_request_body,
|
1352
1363
|
profile=args.profile,
|
1353
1364
|
pd_seperated=args.pd_seperated,
|
@@ -1366,6 +1377,13 @@ def set_ulimit(target_soft_limit=65535):
|
|
1366
1377
|
print(f"Fail to set RLIMIT_NOFILE: {e}")
|
1367
1378
|
|
1368
1379
|
|
1380
|
+
class LoRAPathAction(argparse.Action):
|
1381
|
+
def __call__(self, parser, namespace, values, option_string=None):
|
1382
|
+
setattr(namespace, self.dest, [])
|
1383
|
+
for lora_name in values:
|
1384
|
+
getattr(namespace, self.dest).append(lora_name)
|
1385
|
+
|
1386
|
+
|
1369
1387
|
if __name__ == "__main__":
|
1370
1388
|
parser = ArgumentParser(description="Benchmark the online serving throughput.")
|
1371
1389
|
parser.add_argument(
|
@@ -1509,8 +1527,10 @@ if __name__ == "__main__":
|
|
1509
1527
|
parser.add_argument(
|
1510
1528
|
"--lora-name",
|
1511
1529
|
type=str,
|
1530
|
+
nargs="*",
|
1512
1531
|
default=None,
|
1513
|
-
|
1532
|
+
action=LoRAPathAction,
|
1533
|
+
help="The names of LoRA adapters. You can provide a list of names in the format {name} {name} {name}...",
|
1514
1534
|
)
|
1515
1535
|
parser.add_argument(
|
1516
1536
|
"--prompt-suffix",
|
@@ -4,7 +4,6 @@ from dataclasses import dataclass
|
|
4
4
|
from typing import Dict, List, Optional, Tuple
|
5
5
|
|
6
6
|
import torch
|
7
|
-
import torchvision.transforms as T
|
8
7
|
from PIL import Image, ImageOps
|
9
8
|
from transformers import (
|
10
9
|
AutoProcessor,
|
@@ -76,6 +75,16 @@ class ImageTransform(object):
|
|
76
75
|
self.std = std
|
77
76
|
self.normalize = normalize
|
78
77
|
|
78
|
+
# only load torchvision.transforms when needed
|
79
|
+
try:
|
80
|
+
import torchvision.transforms as T
|
81
|
+
|
82
|
+
# FIXME: add version check for gguf
|
83
|
+
except ImportError as err:
|
84
|
+
raise ImportError(
|
85
|
+
"Please install torchvision via `pip install torchvision` to use Deepseek-VL2."
|
86
|
+
) from err
|
87
|
+
|
79
88
|
transform_pipelines = [T.ToTensor()]
|
80
89
|
|
81
90
|
if normalize:
|
@@ -22,11 +22,7 @@ import torch
|
|
22
22
|
from transformers import PretrainedConfig
|
23
23
|
|
24
24
|
from sglang.srt.hf_transformers_utils import get_config, get_context_length
|
25
|
-
from sglang.srt.layers.quantization import
|
26
|
-
BASE_QUANTIZATION_METHODS,
|
27
|
-
QUANTIZATION_METHODS,
|
28
|
-
VLLM_AVAILABLE,
|
29
|
-
)
|
25
|
+
from sglang.srt.layers.quantization import QUANTIZATION_METHODS
|
30
26
|
from sglang.srt.utils import get_bool_env_var, is_hip
|
31
27
|
|
32
28
|
logger = logging.getLogger(__name__)
|
@@ -239,12 +235,7 @@ class ModelConfig:
|
|
239
235
|
|
240
236
|
# adapted from https://github.com/vllm-project/vllm/blob/v0.6.4.post1/vllm/config.py
|
241
237
|
def _verify_quantization(self) -> None:
|
242
|
-
|
243
|
-
if VLLM_AVAILABLE:
|
244
|
-
supported_quantization = [*QUANTIZATION_METHODS]
|
245
|
-
else:
|
246
|
-
supported_quantization = [*BASE_QUANTIZATION_METHODS]
|
247
|
-
|
238
|
+
supported_quantization = [*QUANTIZATION_METHODS]
|
248
239
|
rocm_supported_quantization = [
|
249
240
|
"awq",
|
250
241
|
"gptq",
|
@@ -282,11 +273,7 @@ class ModelConfig:
|
|
282
273
|
quant_method = quant_cfg.get("quant_method", "").lower()
|
283
274
|
|
284
275
|
# Detect which checkpoint is it
|
285
|
-
|
286
|
-
available_methods = (
|
287
|
-
QUANTIZATION_METHODS if VLLM_AVAILABLE else BASE_QUANTIZATION_METHODS
|
288
|
-
)
|
289
|
-
for _, method in available_methods.items():
|
276
|
+
for _, method in QUANTIZATION_METHODS.items():
|
290
277
|
quantization_override = method.override_quantization_method(
|
291
278
|
quant_cfg, self.quantization
|
292
279
|
)
|
@@ -467,6 +454,7 @@ def is_generation_model(model_architectures: List[str], is_embedding: bool = Fal
|
|
467
454
|
or "InternLM2ForRewardModel" in model_architectures
|
468
455
|
or "Qwen2ForRewardModel" in model_architectures
|
469
456
|
or "Qwen2ForSequenceClassification" in model_architectures
|
457
|
+
or "CLIPModel" in model_architectures
|
470
458
|
):
|
471
459
|
return False
|
472
460
|
else:
|
@@ -488,6 +476,7 @@ multimodal_model_archs = [
|
|
488
476
|
"MllamaForConditionalGeneration",
|
489
477
|
"Qwen2VLForConditionalGeneration",
|
490
478
|
"Qwen2_5_VLForConditionalGeneration",
|
479
|
+
"CLIPModel",
|
491
480
|
]
|
492
481
|
|
493
482
|
|
@@ -5,7 +5,7 @@ import logging
|
|
5
5
|
import os
|
6
6
|
from contextlib import contextmanager
|
7
7
|
from functools import wraps
|
8
|
-
from typing import Callable, List, Optional, TypeVar, Union
|
8
|
+
from typing import Any, Callable, List, Optional, TypeVar, Union
|
9
9
|
|
10
10
|
import torch
|
11
11
|
import torch.distributed as dist
|
@@ -264,10 +264,16 @@ class GroupCoordinator:
|
|
264
264
|
self.ca_comm: Optional[CustomAllreduce] = None
|
265
265
|
if use_custom_allreduce and self.world_size > 1:
|
266
266
|
# Initialize a custom fast all-reduce implementation.
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
267
|
+
try:
|
268
|
+
self.ca_comm = CustomAllreduce(
|
269
|
+
group=self.cpu_group,
|
270
|
+
device=self.device,
|
271
|
+
)
|
272
|
+
except Exception as e:
|
273
|
+
logger.warning(
|
274
|
+
f"Setup Custom allreduce failed with {e}. To silence this "
|
275
|
+
"warning, specify --disable-custom-all-reduce explicitly."
|
276
|
+
)
|
271
277
|
|
272
278
|
from sglang.srt.distributed.device_communicators.hpu_communicator import (
|
273
279
|
HpuCommunicator,
|
@@ -439,6 +445,15 @@ class GroupCoordinator:
|
|
439
445
|
else:
|
440
446
|
torch.distributed.all_reduce(input_, group=self.device_group)
|
441
447
|
|
448
|
+
def reduce_scatter(
|
449
|
+
self,
|
450
|
+
output: torch.Tensor,
|
451
|
+
input_list: List[torch.Tensor],
|
452
|
+
) -> None:
|
453
|
+
# TODO(ch-wan): support other backends
|
454
|
+
torch.distributed.reduce_scatter(output, input_list, group=self.device_group)
|
455
|
+
return output
|
456
|
+
|
442
457
|
def _all_gather_into_tensor(self, output: torch.Tensor, input: torch.Tensor):
|
443
458
|
pynccl_comm = self.pynccl_comm
|
444
459
|
if pynccl_comm is not None and not pynccl_comm.disabled:
|
@@ -456,11 +471,23 @@ class GroupCoordinator:
|
|
456
471
|
output, input, group_name=self.unique_name
|
457
472
|
)
|
458
473
|
|
459
|
-
def all_gather(
|
474
|
+
def all_gather(
|
475
|
+
self,
|
476
|
+
input_: torch.Tensor,
|
477
|
+
dim: int = -1,
|
478
|
+
tensor_list: List[torch.Tensor] = None,
|
479
|
+
) -> torch.Tensor:
|
460
480
|
world_size = self.world_size
|
461
481
|
# Bypass the function if we are using only 1 GPU.
|
462
482
|
if world_size == 1:
|
463
483
|
return input_
|
484
|
+
|
485
|
+
if tensor_list is not None:
|
486
|
+
# TODO(ch-wan): support other backends
|
487
|
+
return torch.distributed.all_gather(
|
488
|
+
tensor_list, input_, group=self.device_group
|
489
|
+
)
|
490
|
+
|
464
491
|
assert (
|
465
492
|
-input_.dim() <= dim < input_.dim()
|
466
493
|
), f"Invalid dim ({dim}) for input tensor with shape {input_.size()}"
|
@@ -561,7 +561,13 @@ def available_models():
|
|
561
561
|
served_model_names = [_global_state.tokenizer_manager.served_model_name]
|
562
562
|
model_cards = []
|
563
563
|
for served_model_name in served_model_names:
|
564
|
-
model_cards.append(
|
564
|
+
model_cards.append(
|
565
|
+
ModelCard(
|
566
|
+
id=served_model_name,
|
567
|
+
root=served_model_name,
|
568
|
+
max_model_len=_global_state.tokenizer_manager.model_config.context_len,
|
569
|
+
)
|
570
|
+
)
|
565
571
|
return ModelList(data=model_cards)
|
566
572
|
|
567
573
|
|
@@ -19,6 +19,7 @@ import torch.distributed as dist
|
|
19
19
|
from torch.distributed.tensor import DeviceMesh, DTensor
|
20
20
|
|
21
21
|
from sglang.srt.model_executor.model_runner import LocalSerializedTensor
|
22
|
+
from sglang.srt.patch_torch import monkey_patch_torch_reductions
|
22
23
|
from sglang.srt.server import Engine
|
23
24
|
from sglang.srt.utils import MultiprocessingSerializer, broadcast_pyobj
|
24
25
|
|
@@ -30,6 +31,7 @@ class VerlEngine:
|
|
30
31
|
nnodes: int = 1,
|
31
32
|
**kwargs,
|
32
33
|
):
|
34
|
+
monkey_patch_torch_reductions()
|
33
35
|
self._device_mesh_cpu = device_mesh_cpu
|
34
36
|
self._tp_rank = device_mesh_cpu.get_local_rank()
|
35
37
|
self._tp_size = device_mesh_cpu.size()
|