sglang 0.4.2__tar.gz → 0.4.2.post2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sglang-0.4.2/sglang.egg-info → sglang-0.4.2.post2}/PKG-INFO +16 -7
- {sglang-0.4.2 → sglang-0.4.2.post2}/README.md +6 -2
- {sglang-0.4.2 → sglang-0.4.2.post2}/pyproject.toml +10 -12
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/constrained/outlines_backend.py +9 -1
- sglang-0.4.2.post2/sglang/srt/custom_op.py +40 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/entrypoints/engine.py +2 -2
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/activation.py +10 -5
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/flashinfer_backend.py +284 -39
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/triton_backend.py +71 -7
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/triton_ops/decode_attention.py +53 -59
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/triton_ops/prefill_attention.py +6 -0
- sglang-0.4.2.post2/sglang/srt/layers/attention/vision.py +407 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/layernorm.py +1 -5
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/ep_moe/layer.py +1 -3
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Radeon_Graphics.json +200 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Radeon_Graphics.json +200 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Radeon_Graphics.json +200 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +178 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Radeon_Graphics.json +200 -0
- sglang-0.4.2.post2/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +175 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +3 -11
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/layer.py +1 -3
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/topk.py +4 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
- sglang-0.4.2.post2/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/fp8.py +7 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/fp8_kernel.py +140 -2
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/rotary_embedding.py +29 -15
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/sampler.py +9 -6
- sglang-0.4.2.post2/sglang/srt/lora/backend/__init__.py +8 -0
- sglang-0.4.2.post2/sglang/srt/lora/backend/base_backend.py +95 -0
- sglang-0.4.2.post2/sglang/srt/lora/backend/flashinfer_backend.py +91 -0
- sglang-0.4.2.post2/sglang/srt/lora/backend/triton_backend.py +61 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/lora/lora.py +127 -112
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/lora/lora_manager.py +50 -18
- sglang-0.4.2.post2/sglang/srt/lora/triton_ops/__init__.py +5 -0
- sglang-0.4.2.post2/sglang/srt/lora/triton_ops/qkv_lora_b.py +182 -0
- sglang-0.4.2.post2/sglang/srt/lora/triton_ops/sgemm_lora_a.py +143 -0
- sglang-0.4.2.post2/sglang/srt/lora/triton_ops/sgemm_lora_b.py +159 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/image_processor.py +77 -38
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/scheduler.py +17 -3
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/mem_cache/base_prefix_cache.py +4 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/mem_cache/chunk_cache.py +3 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/mem_cache/radix_cache.py +30 -1
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_executor/cuda_graph_runner.py +77 -80
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_executor/forward_batch_info.py +58 -59
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_executor/model_runner.py +2 -2
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/minicpmv.py +129 -76
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/mllama.py +16 -56
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/qwen2.py +4 -1
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/qwen2_vl.py +19 -9
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/server_args.py +19 -2
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/speculative/build_eagle_tree.py +4 -2
- sglang-0.4.2.post2/sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +213 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/speculative/eagle_utils.py +361 -372
- sglang-0.4.2.post2/sglang/srt/speculative/eagle_worker.py +315 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/utils.py +7 -2
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/runners.py +2 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/utils.py +42 -0
- sglang-0.4.2.post2/sglang/version.py +1 -0
- {sglang-0.4.2 → sglang-0.4.2.post2/sglang.egg-info}/PKG-INFO +16 -7
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang.egg-info/SOURCES.txt +40 -1
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang.egg-info/requires.txt +9 -4
- sglang-0.4.2/sglang/srt/layers/attention/vision.py +0 -204
- sglang-0.4.2/sglang/srt/layers/custom_op_util.py +0 -25
- sglang-0.4.2/sglang/srt/speculative/eagle_worker.py +0 -183
- sglang-0.4.2/sglang/version.py +0 -1
- {sglang-0.4.2 → sglang-0.4.2.post2}/LICENSE +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/setup.cfg +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/api.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/bench_latency.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/bench_offline_throughput.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/bench_one_batch.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/bench_one_batch_server.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/bench_serving.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/check_env.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/global_config.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/anthropic.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/base_backend.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/litellm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/openai.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/runtime_endpoint.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/backend/vertexai.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/chat_template.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/choices.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/compiler.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/interpreter.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/ir.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/lang/tracer.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/launch_server.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/llama3_eval.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/_custom_ops.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/aio_rwlock.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/chatglm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/dbrx.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/device_config.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/exaone.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/load_config.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/model_config.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/configs/qwen2vl.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/constrained/base_grammar_backend.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/constrained/outlines_jump_forward.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/constrained/xgrammar_backend.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/conversation.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/communication_op.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/cuda_wrapper.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/custom_all_reduce.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/hpu_communicator.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/pynccl.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/pynccl_wrapper.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/shm_broadcast.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/device_communicators/xpu_communicator.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/parallel_state.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/distributed/utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/entrypoints/http_server.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/function_call_parser.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/hf_transformers_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/double_sparsity_backend.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/torch_native_backend.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/attention/triton_ops/extend_attention.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/dp_attention.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/linear.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/logits_processor.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/ep_moe/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/ep_moe/kernels.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_native.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/parameter.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/pooler.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/base_config.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/fp8_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/int8_kernel.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/modelopt_quant.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/quantization/w8a8_int8.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/radix_attention.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/torchao_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/layers/vocab_parallel_embedding.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/lora/lora_config.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/cache_controller.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/configure_logging.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/data_parallel_controller.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/detokenizer_manager.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/io_struct.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/schedule_batch.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/schedule_policy.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/session_controller.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/tokenizer_manager.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/tp_worker.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/tp_worker_overlap_thread.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/managers/utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/mem_cache/flush_cache.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/mem_cache/memory_pool.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/metrics/collector.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/metrics/func_timer.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/mm_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_loader/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_loader/loader.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_loader/utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_loader/weight_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/model_parallel.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/baichuan.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/chatglm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/commandr.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/dbrx.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/deepseek.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/deepseek_v2.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/exaone.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/gemma.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/gemma2.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/gemma2_reward.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/gpt2.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/gpt_bigcode.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/granite.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/grok.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/internlm2.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/internlm2_reward.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llama.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llama_classification.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llama_eagle.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llama_embedding.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llama_reward.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llava.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/llavavid.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/minicpm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/minicpm3.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/mistral.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/mixtral.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/mixtral_quant.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/olmo.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/olmo2.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/olmoe.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/phi3_small.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/qwen.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/qwen2_eagle.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/qwen2_moe.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/registry.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/stablelm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/torch_native_llama.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/xverse.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/xverse_moe.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/models/yivl.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/openai_api/adapter.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/openai_api/protocol.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/custom_logit_processor.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/penaltylib/penalizers/frequency_penalty.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/penaltylib/penalizers/min_new_tokens.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/penaltylib/penalizers/presence_penalty.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/penaltylib/penalizers/repetition_penalty.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/sampling_batch_info.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/sampling/sampling_params.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/server.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/speculative/spec_info.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/srt/torch_memory_saver_adapter.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/few_shot_gsm8k.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/few_shot_gsm8k_engine.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/run_eval.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/simple_eval_common.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/simple_eval_gpqa.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/simple_eval_humaneval.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/simple_eval_math.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/simple_eval_mgsm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/simple_eval_mmlu.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/srt/sampling/penaltylib/utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/test_activation.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/test_block_fp8.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/test_layernorm.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/test_programs.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang/test/test_utils.py +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang.egg-info/dependency_links.txt +0 -0
- {sglang-0.4.2 → sglang-0.4.2.post2}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: sglang
|
3
|
-
Version: 0.4.2
|
3
|
+
Version: 0.4.2.post2
|
4
4
|
Summary: SGLang is yet another fast serving framework for large language models and vision language models.
|
5
5
|
License: Apache License
|
6
6
|
Version 2.0, January 2004
|
@@ -225,7 +225,6 @@ Requires-Dist: huggingface_hub; extra == "runtime-common"
|
|
225
225
|
Requires-Dist: interegular; extra == "runtime-common"
|
226
226
|
Requires-Dist: modelscope; extra == "runtime-common"
|
227
227
|
Requires-Dist: orjson; extra == "runtime-common"
|
228
|
-
Requires-Dist: outlines<0.1.0,>=0.0.44; extra == "runtime-common"
|
229
228
|
Requires-Dist: packaging; extra == "runtime-common"
|
230
229
|
Requires-Dist: pillow; extra == "runtime-common"
|
231
230
|
Requires-Dist: prometheus-client>=0.20.0; extra == "runtime-common"
|
@@ -240,21 +239,27 @@ Requires-Dist: xgrammar>=0.1.10; extra == "runtime-common"
|
|
240
239
|
Provides-Extra: srt
|
241
240
|
Requires-Dist: sglang[runtime_common]; extra == "srt"
|
242
241
|
Requires-Dist: cuda-python; extra == "srt"
|
243
|
-
Requires-Dist: sgl-kernel>=0.0.3; extra == "srt"
|
242
|
+
Requires-Dist: sgl-kernel>=0.0.3.post1; extra == "srt"
|
244
243
|
Requires-Dist: torch; extra == "srt"
|
245
244
|
Requires-Dist: vllm==0.6.4.post1; extra == "srt"
|
246
|
-
Requires-Dist:
|
245
|
+
Requires-Dist: flashinfer_python>=0.2.0.post2; extra == "srt"
|
246
|
+
Requires-Dist: outlines<0.1.0,>=0.0.44; extra == "srt"
|
247
247
|
Provides-Extra: srt-hip
|
248
248
|
Requires-Dist: sglang[runtime_common]; extra == "srt-hip"
|
249
249
|
Requires-Dist: torch; extra == "srt-hip"
|
250
|
-
Requires-Dist: vllm==0.6.
|
250
|
+
Requires-Dist: vllm==0.6.7.dev2; extra == "srt-hip"
|
251
|
+
Requires-Dist: outlines==0.1.11; extra == "srt-hip"
|
252
|
+
Requires-Dist: sgl-kernel>=0.0.3.post1; extra == "srt-hip"
|
251
253
|
Provides-Extra: srt-xpu
|
252
254
|
Requires-Dist: sglang[runtime_common]; extra == "srt-xpu"
|
255
|
+
Requires-Dist: outlines<0.1.0,>=0.0.44; extra == "srt-xpu"
|
253
256
|
Provides-Extra: srt-hpu
|
254
257
|
Requires-Dist: sglang[runtime_common]; extra == "srt-hpu"
|
258
|
+
Requires-Dist: outlines<0.1.0,>=0.0.44; extra == "srt-hpu"
|
255
259
|
Provides-Extra: srt-cpu
|
256
260
|
Requires-Dist: sglang[runtime_common]; extra == "srt-cpu"
|
257
261
|
Requires-Dist: torch; extra == "srt-cpu"
|
262
|
+
Requires-Dist: outlines<0.1.0,>=0.0.44; extra == "srt-cpu"
|
258
263
|
Provides-Extra: openai
|
259
264
|
Requires-Dist: openai>=1.0; extra == "openai"
|
260
265
|
Requires-Dist: tiktoken; extra == "openai"
|
@@ -333,7 +338,7 @@ Requires-Dist: sglang[test]; extra == "dev-cpu"
|
|
333
338
|
| [**Slides**](https://github.com/sgl-project/sgl-learning-materials?tab=readme-ov-file#slides) |
|
334
339
|
|
335
340
|
## News
|
336
|
-
- [2025/01] 🔥 SGLang provides day one support for DeepSeek V3/R1 models on NVIDIA and AMD GPUs with
|
341
|
+
- [2025/01] 🔥 SGLang provides day one support for DeepSeek V3/R1 models on NVIDIA and AMD GPUs with DeepSeek-specific optimizations. ([instructions](https://github.com/sgl-project/sglang/tree/main/benchmark/deepseek_v3), [AMD blog](https://www.amd.com/en/developer/resources/technical-articles/amd-instinct-gpus-power-deepseek-v3-revolutionizing-ai-development-with-sglang.html))
|
337
342
|
- [2024/12] 🔥 v0.4 Release: Zero-Overhead Batch Scheduler, Cache-Aware Load Balancer, Faster Structured Outputs ([blog](https://lmsys.org/blog/2024-12-04-sglang-v0-4/)).
|
338
343
|
- [2024/09] v0.3 Release: 7x Faster DeepSeek MLA, 1.5x Faster torch.compile, Multi-Image/Video LLaVA-OneVision ([blog](https://lmsys.org/blog/2024-09-04-sglang-v0-3/)).
|
339
344
|
- [2024/07] v0.2 Release: Faster Llama3 Serving with SGLang Runtime (vs. TensorRT-LLM, vLLM) ([blog](https://lmsys.org/blog/2024-07-25-sglang-llama3/)).
|
@@ -372,7 +377,11 @@ Learn more in the release blogs: [v0.2 blog](https://lmsys.org/blog/2024-07-25-s
|
|
372
377
|
[Development Roadmap (2024 Q4)](https://github.com/sgl-project/sglang/issues/1487)
|
373
378
|
|
374
379
|
## Adoption and Sponsorship
|
375
|
-
The project is supported by (alphabetically): AMD, Baseten, Cursor, DataCrunch, Etched, Hyperbolic, Jam & Tea Studios, LinkedIn, LMSYS
|
380
|
+
The project is supported by (alphabetically): AMD, Atlas Cloud, Baseten, Cursor, DataCrunch, Etched, Hyperbolic, Jam & Tea Studios, LinkedIn, LMSYS CORP, Meituan, Nebius, Novita AI, NVIDIA, RunPod, Stanford, UC Berkeley, UCLA, xAI, 01.AI.
|
381
|
+
|
382
|
+
## Contact Us
|
383
|
+
|
384
|
+
For enterprises interested in adopting or deploying SGLang at scale, including technical consulting, sponsorship opportunities, or partnership inquiries, please contact us at contact@sglang.ai.
|
376
385
|
|
377
386
|
## Acknowledgment and Citation
|
378
387
|
We learned the design and reused code from the following projects: [Guidance](https://github.com/guidance-ai/guidance), [vLLM](https://github.com/vllm-project/vllm), [LightLLM](https://github.com/ModelTC/lightllm), [FlashInfer](https://github.com/flashinfer-ai/flashinfer), [Outlines](https://github.com/outlines-dev/outlines), and [LMQL](https://github.com/eth-sri/lmql). Please cite the paper, [SGLang: Efficient Execution of Structured Language Model Programs](https://arxiv.org/abs/2312.07104), if you find the project useful.
|
@@ -19,7 +19,7 @@
|
|
19
19
|
| [**Slides**](https://github.com/sgl-project/sgl-learning-materials?tab=readme-ov-file#slides) |
|
20
20
|
|
21
21
|
## News
|
22
|
-
- [2025/01] 🔥 SGLang provides day one support for DeepSeek V3/R1 models on NVIDIA and AMD GPUs with
|
22
|
+
- [2025/01] 🔥 SGLang provides day one support for DeepSeek V3/R1 models on NVIDIA and AMD GPUs with DeepSeek-specific optimizations. ([instructions](https://github.com/sgl-project/sglang/tree/main/benchmark/deepseek_v3), [AMD blog](https://www.amd.com/en/developer/resources/technical-articles/amd-instinct-gpus-power-deepseek-v3-revolutionizing-ai-development-with-sglang.html))
|
23
23
|
- [2024/12] 🔥 v0.4 Release: Zero-Overhead Batch Scheduler, Cache-Aware Load Balancer, Faster Structured Outputs ([blog](https://lmsys.org/blog/2024-12-04-sglang-v0-4/)).
|
24
24
|
- [2024/09] v0.3 Release: 7x Faster DeepSeek MLA, 1.5x Faster torch.compile, Multi-Image/Video LLaVA-OneVision ([blog](https://lmsys.org/blog/2024-09-04-sglang-v0-3/)).
|
25
25
|
- [2024/07] v0.2 Release: Faster Llama3 Serving with SGLang Runtime (vs. TensorRT-LLM, vLLM) ([blog](https://lmsys.org/blog/2024-07-25-sglang-llama3/)).
|
@@ -58,7 +58,11 @@ Learn more in the release blogs: [v0.2 blog](https://lmsys.org/blog/2024-07-25-s
|
|
58
58
|
[Development Roadmap (2024 Q4)](https://github.com/sgl-project/sglang/issues/1487)
|
59
59
|
|
60
60
|
## Adoption and Sponsorship
|
61
|
-
The project is supported by (alphabetically): AMD, Baseten, Cursor, DataCrunch, Etched, Hyperbolic, Jam & Tea Studios, LinkedIn, LMSYS
|
61
|
+
The project is supported by (alphabetically): AMD, Atlas Cloud, Baseten, Cursor, DataCrunch, Etched, Hyperbolic, Jam & Tea Studios, LinkedIn, LMSYS CORP, Meituan, Nebius, Novita AI, NVIDIA, RunPod, Stanford, UC Berkeley, UCLA, xAI, 01.AI.
|
62
|
+
|
63
|
+
## Contact Us
|
64
|
+
|
65
|
+
For enterprises interested in adopting or deploying SGLang at scale, including technical consulting, sponsorship opportunities, or partnership inquiries, please contact us at contact@sglang.ai.
|
62
66
|
|
63
67
|
## Acknowledgment and Citation
|
64
68
|
We learned the design and reused code from the following projects: [Guidance](https://github.com/guidance-ai/guidance), [vLLM](https://github.com/vllm-project/vllm), [LightLLM](https://github.com/ModelTC/lightllm), [FlashInfer](https://github.com/flashinfer-ai/flashinfer), [Outlines](https://github.com/outlines-dev/outlines), and [LMQL](https://github.com/eth-sri/lmql). Please cite the paper, [SGLang: Efficient Execution of Structured Language Model Programs](https://arxiv.org/abs/2312.07104), if you find the project useful.
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "sglang"
|
7
|
-
version = "0.4.2"
|
7
|
+
version = "0.4.2.post2"
|
8
8
|
description = "SGLang is yet another fast serving framework for large language models and vision language models."
|
9
9
|
readme = "README.md"
|
10
10
|
requires-python = ">=3.8"
|
@@ -19,31 +19,29 @@ dependencies = ["requests", "tqdm", "numpy", "IPython", "setproctitle"]
|
|
19
19
|
runtime_common = [
|
20
20
|
"aiohttp", "decord", "fastapi",
|
21
21
|
"hf_transfer", "huggingface_hub", "interegular", "modelscope",
|
22
|
-
"orjson", "
|
23
|
-
"
|
24
|
-
"
|
25
|
-
"pyzmq>=25.1.2", "torchao>=0.7.0", "uvicorn", "uvloop",
|
26
|
-
"xgrammar>=0.1.10"
|
22
|
+
"orjson", "packaging", "pillow", "prometheus-client>=0.20.0",
|
23
|
+
"psutil", "pydantic", "python-multipart", "pyzmq>=25.1.2",
|
24
|
+
"torchao>=0.7.0", "uvicorn", "uvloop", "xgrammar>=0.1.10"
|
27
25
|
]
|
28
26
|
srt = [
|
29
27
|
"sglang[runtime_common]", "cuda-python",
|
30
|
-
"sgl-kernel>=0.0.3", "torch", "vllm==0.6.4.post1",
|
31
|
-
"
|
28
|
+
"sgl-kernel>=0.0.3.post1", "torch", "vllm==0.6.4.post1",
|
29
|
+
"flashinfer_python>=0.2.0.post2", "outlines>=0.0.44,<0.1.0"
|
32
30
|
]
|
33
31
|
|
34
32
|
# HIP (Heterogeneous-computing Interface for Portability) for AMD
|
35
33
|
# => base docker rocm/vllm-dev:20241022, not from public vllm whl
|
36
|
-
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.
|
34
|
+
srt_hip = ["sglang[runtime_common]", "torch", "vllm==0.6.7.dev2", "outlines==0.1.11", "sgl-kernel>=0.0.3.post1"]
|
37
35
|
# xpu is not enabled in public vllm and torch whl,
|
38
36
|
# need to follow https://docs.vllm.ai/en/latest/getting_started/xpu-installation.htmlinstall vllm
|
39
|
-
srt_xpu = ["sglang[runtime_common]"]
|
37
|
+
srt_xpu = ["sglang[runtime_common]", "outlines>=0.0.44,<0.1.0"]
|
40
38
|
#For Intel Gaudi(device : hpu) follow the installation guide
|
41
39
|
#https://docs.vllm.ai/en/latest/getting_started/gaudi-installation.html
|
42
|
-
srt_hpu = ["sglang[runtime_common]"]
|
40
|
+
srt_hpu = ["sglang[runtime_common]", "outlines>=0.0.44,<0.1.0"]
|
43
41
|
# CPU: currently, there are no pre-built vllm wheels for CPU.
|
44
42
|
# To install vllm for CPU, please follow the instruction here:
|
45
43
|
# https://docs.vllm.ai/en/latest/getting_started/installation/cpu/index.html
|
46
|
-
srt_cpu = ["sglang[runtime_common]", "torch"]
|
44
|
+
srt_cpu = ["sglang[runtime_common]", "torch", "outlines>=0.0.44,<0.1.0"]
|
47
45
|
|
48
46
|
openai = ["openai>=1.0", "tiktoken"]
|
49
47
|
anthropic = ["anthropic>=0.20.0"]
|
@@ -20,7 +20,6 @@ from typing import Dict, List, Optional, Tuple, Union
|
|
20
20
|
import interegular
|
21
21
|
import torch
|
22
22
|
from outlines.fsm.guide import RegexGuide
|
23
|
-
from outlines.fsm.json_schema import build_regex_from_schema
|
24
23
|
from outlines.models.transformers import TransformerTokenizer
|
25
24
|
from pydantic import BaseModel
|
26
25
|
|
@@ -29,6 +28,15 @@ from sglang.srt.constrained.base_grammar_backend import (
|
|
29
28
|
BaseGrammarObject,
|
30
29
|
)
|
31
30
|
from sglang.srt.constrained.outlines_jump_forward import OutlinesJumpForwardMap
|
31
|
+
from sglang.srt.utils import is_hip
|
32
|
+
|
33
|
+
is_hip_ = is_hip()
|
34
|
+
|
35
|
+
if is_hip_:
|
36
|
+
from outlines_core.fsm.json_schema import build_regex_from_schema
|
37
|
+
else:
|
38
|
+
from outlines.fsm.json_schema import build_regex_from_schema
|
39
|
+
|
32
40
|
|
33
41
|
logger = logging.getLogger(__name__)
|
34
42
|
|
@@ -0,0 +1,40 @@
|
|
1
|
+
import torch
|
2
|
+
from torch import nn
|
3
|
+
|
4
|
+
_is_cuda = torch.cuda.is_available() and torch.version.cuda
|
5
|
+
_is_rocm = torch.cuda.is_available() and torch.version.hip
|
6
|
+
|
7
|
+
|
8
|
+
class CustomOp(nn.Module):
|
9
|
+
def __init__(self):
|
10
|
+
super().__init__()
|
11
|
+
self._forward_method = self.dispatch_forward()
|
12
|
+
|
13
|
+
def forward(self, *args, **kwargs):
|
14
|
+
return self._forward_method(*args, **kwargs)
|
15
|
+
|
16
|
+
def forward_native(self, *args, **kwargs):
|
17
|
+
raise NotImplementedError
|
18
|
+
|
19
|
+
def forward_cuda(self, *args, **kwargs):
|
20
|
+
raise NotImplementedError
|
21
|
+
|
22
|
+
def forward_hip(self, *args, **kwargs):
|
23
|
+
return self.forward_cuda(*args, **kwargs)
|
24
|
+
|
25
|
+
def forward_xpu(self, *args, **kwargs):
|
26
|
+
return self.forward_native(*args, **kwargs)
|
27
|
+
|
28
|
+
def forward_hpu(self, *args, **kwargs):
|
29
|
+
return self.forward_native(*args, **kwargs)
|
30
|
+
|
31
|
+
def forward_cpu(self, *args, **kwargs):
|
32
|
+
return self.forward_native(*args, **kwargs)
|
33
|
+
|
34
|
+
def dispatch_forward(self):
|
35
|
+
if _is_cuda:
|
36
|
+
return self.forward_cuda
|
37
|
+
elif _is_rocm:
|
38
|
+
return self.forward_hip
|
39
|
+
else:
|
40
|
+
return self.forward_native
|
@@ -316,8 +316,8 @@ def _set_envs_and_config(server_args: ServerArgs):
|
|
316
316
|
# Check flashinfer version
|
317
317
|
if server_args.attention_backend == "flashinfer":
|
318
318
|
assert_pkg_version(
|
319
|
-
"
|
320
|
-
"0.
|
319
|
+
"flashinfer_python",
|
320
|
+
"0.2.0.post2",
|
321
321
|
"Please uninstall the old version and "
|
322
322
|
"reinstall the latest version by following the instructions "
|
323
323
|
"at https://docs.flashinfer.ai/installation.html.",
|
@@ -25,21 +25,18 @@ from sglang.srt.utils import is_cuda_available
|
|
25
25
|
if is_cuda_available():
|
26
26
|
from sgl_kernel import gelu_and_mul, gelu_tanh_and_mul, silu_and_mul
|
27
27
|
|
28
|
-
from
|
29
|
-
|
28
|
+
from sglang.srt.custom_op import CustomOp
|
30
29
|
from sglang.srt.distributed import (
|
31
30
|
divide,
|
32
31
|
get_tensor_model_parallel_rank,
|
33
32
|
get_tensor_model_parallel_world_size,
|
34
33
|
)
|
35
|
-
from sglang.srt.layers.custom_op_util import register_custom_op
|
36
34
|
from sglang.srt.layers.quantization.base_config import QuantizationConfig
|
37
35
|
from sglang.srt.utils import set_weight_attrs
|
38
36
|
|
39
37
|
logger = logging.getLogger(__name__)
|
40
38
|
|
41
39
|
|
42
|
-
@register_custom_op("sglang_silu_and_mul")
|
43
40
|
class SiluAndMul(CustomOp):
|
44
41
|
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
|
45
42
|
d = x.shape[-1] // 2
|
@@ -53,7 +50,6 @@ class SiluAndMul(CustomOp):
|
|
53
50
|
return out
|
54
51
|
|
55
52
|
|
56
|
-
@register_custom_op("sglang_gelu_and_mul")
|
57
53
|
class GeluAndMul(CustomOp):
|
58
54
|
def __init__(self, approximate="tanh"):
|
59
55
|
super().__init__()
|
@@ -76,6 +72,15 @@ class GeluAndMul(CustomOp):
|
|
76
72
|
return out
|
77
73
|
|
78
74
|
|
75
|
+
class QuickGELU(CustomOp):
|
76
|
+
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
|
77
|
+
return x * torch.sigmoid(1.702 * x)
|
78
|
+
|
79
|
+
def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
|
80
|
+
# TODO(zhyncs): Implement the CUDA kernel for QuickGELU in sgl-kernel
|
81
|
+
return self.forward_native(x)
|
82
|
+
|
83
|
+
|
79
84
|
class ScaledActivation(nn.Module):
|
80
85
|
"""An activation function with post-scale parameters.
|
81
86
|
|