sglang 0.4.4__tar.gz → 0.4.4.post1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (529) hide show
  1. {sglang-0.4.4/sglang.egg-info → sglang-0.4.4.post1}/PKG-INFO +1 -1
  2. {sglang-0.4.4 → sglang-0.4.4.post1}/pyproject.toml +1 -1
  3. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/function_call_parser.py +33 -2
  4. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/dp_attention.py +30 -2
  5. sglang-0.4.4.post1/sglang/srt/layers/elementwise.py +411 -0
  6. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/logits_processor.py +1 -0
  7. sglang-0.4.4.post1/sglang/srt/layers/moe/router.py +342 -0
  8. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/cache_controller.py +2 -0
  9. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/data_parallel_controller.py +1 -1
  10. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/schedule_batch.py +1 -1
  11. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/scheduler.py +52 -18
  12. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/scheduler_output_processor_mixin.py +4 -1
  13. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/hiradix_cache.py +9 -1
  14. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/memory_pool.py +4 -1
  15. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_executor/cuda_graph_runner.py +59 -16
  16. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_executor/forward_batch_info.py +13 -4
  17. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/deepseek_v2.py +180 -177
  18. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/grok.py +374 -119
  19. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/openai_api/adapter.py +22 -20
  20. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/server_args.py +5 -5
  21. sglang-0.4.4.post1/sglang/version.py +1 -0
  22. {sglang-0.4.4 → sglang-0.4.4.post1/sglang.egg-info}/PKG-INFO +1 -1
  23. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang.egg-info/SOURCES.txt +2 -0
  24. sglang-0.4.4/sglang/version.py +0 -1
  25. {sglang-0.4.4 → sglang-0.4.4.post1}/LICENSE +0 -0
  26. {sglang-0.4.4 → sglang-0.4.4.post1}/README.md +0 -0
  27. {sglang-0.4.4 → sglang-0.4.4.post1}/setup.cfg +0 -0
  28. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/__init__.py +0 -0
  29. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/api.py +0 -0
  30. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/bench_offline_throughput.py +0 -0
  31. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/bench_one_batch.py +0 -0
  32. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/bench_one_batch_server.py +0 -0
  33. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/bench_serving.py +0 -0
  34. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/check_env.py +0 -0
  35. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/global_config.py +0 -0
  36. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/__init__.py +0 -0
  37. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/__init__.py +0 -0
  38. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/anthropic.py +0 -0
  39. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/base_backend.py +0 -0
  40. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/litellm.py +0 -0
  41. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/openai.py +0 -0
  42. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/runtime_endpoint.py +0 -0
  43. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/backend/vertexai.py +0 -0
  44. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/chat_template.py +0 -0
  45. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/choices.py +0 -0
  46. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/compiler.py +0 -0
  47. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/interpreter.py +0 -0
  48. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/ir.py +0 -0
  49. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/lang/tracer.py +0 -0
  50. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/launch_server.py +0 -0
  51. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/llama3_eval.py +0 -0
  52. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/_custom_ops.py +0 -0
  53. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/aio_rwlock.py +0 -0
  54. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/__init__.py +0 -0
  55. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/chatglm.py +0 -0
  56. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/dbrx.py +0 -0
  57. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/device_config.py +0 -0
  58. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/exaone.py +0 -0
  59. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/janus_pro.py +0 -0
  60. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/load_config.py +0 -0
  61. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/model_config.py +0 -0
  62. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/configs/qwen2_5_vl_config.py +0 -0
  63. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/constrained/base_grammar_backend.py +0 -0
  64. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/constrained/llguidance_backend.py +0 -0
  65. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/constrained/outlines_backend.py +0 -0
  66. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/constrained/outlines_jump_forward.py +0 -0
  67. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/constrained/xgrammar_backend.py +0 -0
  68. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/conversation.py +0 -0
  69. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/custom_op.py +0 -0
  70. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/__init__.py +0 -0
  71. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/communication_op.py +0 -0
  72. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/cuda_wrapper.py +0 -0
  73. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/custom_all_reduce.py +0 -0
  74. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +0 -0
  75. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/hpu_communicator.py +0 -0
  76. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/pynccl.py +0 -0
  77. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/pynccl_wrapper.py +0 -0
  78. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/shm_broadcast.py +0 -0
  79. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/device_communicators/xpu_communicator.py +0 -0
  80. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/parallel_state.py +0 -0
  81. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/distributed/utils.py +0 -0
  82. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/entrypoints/engine.py +0 -0
  83. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/entrypoints/http_server.py +0 -0
  84. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/entrypoints/verl_engine.py +0 -0
  85. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/hf_transformers_utils.py +0 -0
  86. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/activation.py +0 -0
  87. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/base_attn_backend.py +0 -0
  88. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/double_sparsity_backend.py +0 -0
  89. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/flashinfer_backend.py +0 -0
  90. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/flashinfer_mla_backend.py +0 -0
  91. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/torch_native_backend.py +0 -0
  92. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/triton_backend.py +0 -0
  93. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/triton_ops/decode_attention.py +0 -0
  94. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +0 -0
  95. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/triton_ops/extend_attention.py +0 -0
  96. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/triton_ops/prefill_attention.py +0 -0
  97. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/triton_ops/rocm_mla_decode_rope.py +0 -0
  98. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/utils.py +0 -0
  99. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/attention/vision.py +0 -0
  100. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/layernorm.py +0 -0
  101. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/linear.py +0 -0
  102. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/ep_moe/__init__.py +0 -0
  103. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/ep_moe/kernels.py +0 -0
  104. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/ep_moe/layer.py +0 -0
  105. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_native.py +0 -0
  106. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/__init__.py +0 -0
  107. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  108. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  109. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  110. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  111. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  112. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  113. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  114. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  115. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  116. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  117. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  118. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  119. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  120. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  121. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  122. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  123. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  124. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  125. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  126. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  127. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  128. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  129. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  130. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  131. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  132. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  133. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  134. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  135. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  136. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  137. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  138. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  139. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  140. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  141. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  142. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  143. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  144. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  145. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  146. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  147. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  148. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  149. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  150. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  151. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  152. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  153. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  154. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  155. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  156. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200.json +0 -0
  157. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  158. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  159. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200.json +0 -0
  160. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  161. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  162. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  163. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200.json +0 -0
  164. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  165. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  166. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
  167. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  168. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  169. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  170. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200.json +0 -0
  171. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +0 -0
  172. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +0 -0
  173. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Radeon_Graphics.json +0 -0
  174. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  175. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  176. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200.json +0 -0
  177. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +0 -0
  178. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +0 -0
  179. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Radeon_Graphics.json +0 -0
  180. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  181. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  182. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  183. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  184. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200.json +0 -0
  185. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  186. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  187. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  188. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  189. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200.json +0 -0
  190. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +0 -0
  191. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +0 -0
  192. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Radeon_Graphics.json +0 -0
  193. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  194. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  195. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
  196. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  197. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  198. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  199. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200.json +0 -0
  200. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +0 -0
  201. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
  202. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
  203. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
  204. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  205. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  206. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  207. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  208. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200.json +0 -0
  209. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +0 -0
  210. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +0 -0
  211. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Radeon_Graphics.json +0 -0
  212. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  213. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  214. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  215. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  216. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200.json +0 -0
  217. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
  218. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
  219. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
  220. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  221. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  222. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +0 -0
  223. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/fused_moe_triton/layer.py +0 -0
  224. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/moe/topk.py +0 -0
  225. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/parameter.py +0 -0
  226. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/pooler.py +0 -0
  227. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/__init__.py +0 -0
  228. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/base_config.py +0 -0
  229. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/blockwise_int8.py +0 -0
  230. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  231. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  232. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  233. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  234. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  235. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  236. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  237. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  238. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  239. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  240. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  241. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  242. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  243. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  244. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  245. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  246. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  247. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  248. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  249. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  250. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  251. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  252. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  253. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  254. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  255. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  256. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  257. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  258. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  259. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  260. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  261. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  262. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  263. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  264. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  265. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  266. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  267. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  268. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  269. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  270. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  271. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  272. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  273. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  274. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  275. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  276. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  277. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  278. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  279. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  280. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  281. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  282. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  283. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  284. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  285. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  286. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  287. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  288. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  289. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  290. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  291. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  292. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  293. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  294. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  295. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  296. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  297. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  298. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  299. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  300. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  301. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  302. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  303. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  304. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  305. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  306. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  307. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  308. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  309. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  310. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  311. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  312. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  313. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  314. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  315. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  316. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  317. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  318. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  319. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  320. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  321. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  322. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  323. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  324. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  325. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  326. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  327. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  328. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  329. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  330. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  331. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  332. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  333. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  334. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  335. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  336. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  337. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  338. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  339. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  340. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  341. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  342. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  343. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  344. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  345. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  346. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  347. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  348. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  349. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  350. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  351. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  352. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  353. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  354. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  355. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  356. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  357. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  358. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  359. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  360. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  361. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  362. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  363. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  364. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  365. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  366. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  367. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  368. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  369. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  370. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  371. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  372. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  373. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  374. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  375. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  376. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  377. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  378. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  379. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  380. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  381. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  382. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/fp8.py +0 -0
  383. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/fp8_kernel.py +0 -0
  384. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/fp8_utils.py +0 -0
  385. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/gptq.py +0 -0
  386. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/int8_kernel.py +0 -0
  387. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/int8_utils.py +0 -0
  388. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/modelopt_quant.py +0 -0
  389. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/w8a8_fp8.py +0 -0
  390. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/quantization/w8a8_int8.py +0 -0
  391. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/radix_attention.py +0 -0
  392. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/rotary_embedding.py +0 -0
  393. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/sampler.py +0 -0
  394. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/torchao_utils.py +0 -0
  395. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/layers/vocab_parallel_embedding.py +0 -0
  396. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/backend/__init__.py +0 -0
  397. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/backend/base_backend.py +0 -0
  398. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/backend/flashinfer_backend.py +0 -0
  399. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/backend/triton_backend.py +0 -0
  400. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/layers.py +0 -0
  401. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/lora.py +0 -0
  402. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/lora_config.py +0 -0
  403. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/lora_manager.py +0 -0
  404. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/mem_pool.py +0 -0
  405. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/triton_ops/__init__.py +0 -0
  406. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/triton_ops/gate_up_lora_b.py +0 -0
  407. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/triton_ops/qkv_lora_b.py +0 -0
  408. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/triton_ops/sgemm_lora_a.py +0 -0
  409. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/triton_ops/sgemm_lora_b.py +0 -0
  410. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/lora/utils.py +0 -0
  411. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/configure_logging.py +0 -0
  412. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/detokenizer_manager.py +0 -0
  413. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processor.py +0 -0
  414. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processors/base_image_processor.py +0 -0
  415. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processors/janus_pro.py +0 -0
  416. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processors/llava.py +0 -0
  417. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processors/minicpmv.py +0 -0
  418. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processors/mlama.py +0 -0
  419. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/image_processors/qwen_vl.py +0 -0
  420. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/io_struct.py +0 -0
  421. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/multi_modality_padding.py +0 -0
  422. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/schedule_policy.py +0 -0
  423. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/session_controller.py +0 -0
  424. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/tokenizer_manager.py +0 -0
  425. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/tp_worker.py +0 -0
  426. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/tp_worker_overlap_thread.py +0 -0
  427. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/managers/utils.py +0 -0
  428. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/base_prefix_cache.py +0 -0
  429. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/chunk_cache.py +0 -0
  430. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/flush_cache.py +0 -0
  431. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/paged_allocator.py +0 -0
  432. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mem_cache/radix_cache.py +0 -0
  433. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/metrics/collector.py +0 -0
  434. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/metrics/func_timer.py +0 -0
  435. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/mm_utils.py +0 -0
  436. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_executor/model_runner.py +0 -0
  437. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_loader/__init__.py +0 -0
  438. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_loader/loader.py +0 -0
  439. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_loader/utils.py +0 -0
  440. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_loader/weight_utils.py +0 -0
  441. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/model_parallel.py +0 -0
  442. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/baichuan.py +0 -0
  443. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/chatglm.py +0 -0
  444. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/commandr.py +0 -0
  445. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/dbrx.py +0 -0
  446. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/deepseek.py +0 -0
  447. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/deepseek_janus_pro.py +0 -0
  448. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/deepseek_nextn.py +0 -0
  449. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/exaone.py +0 -0
  450. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/gemma.py +0 -0
  451. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/gemma2.py +0 -0
  452. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/gemma2_reward.py +0 -0
  453. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/gpt2.py +0 -0
  454. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/gpt_bigcode.py +0 -0
  455. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/granite.py +0 -0
  456. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/internlm2.py +0 -0
  457. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/internlm2_reward.py +0 -0
  458. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llama.py +0 -0
  459. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llama_classification.py +0 -0
  460. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llama_eagle.py +0 -0
  461. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llama_embedding.py +0 -0
  462. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llama_reward.py +0 -0
  463. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llava.py +0 -0
  464. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/llavavid.py +0 -0
  465. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/minicpm.py +0 -0
  466. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/minicpm3.py +0 -0
  467. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/minicpmv.py +0 -0
  468. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/mistral.py +0 -0
  469. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/mixtral.py +0 -0
  470. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/mixtral_quant.py +0 -0
  471. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/mllama.py +0 -0
  472. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/olmo.py +0 -0
  473. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/olmo2.py +0 -0
  474. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/olmoe.py +0 -0
  475. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/phi3_small.py +0 -0
  476. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen.py +0 -0
  477. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen2.py +0 -0
  478. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen2_5_vl.py +0 -0
  479. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen2_eagle.py +0 -0
  480. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen2_moe.py +0 -0
  481. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen2_rm.py +0 -0
  482. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/qwen2_vl.py +0 -0
  483. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/registry.py +0 -0
  484. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/stablelm.py +0 -0
  485. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/torch_native_llama.py +0 -0
  486. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/xverse.py +0 -0
  487. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/xverse_moe.py +0 -0
  488. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/models/yivl.py +0 -0
  489. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/openai_api/protocol.py +0 -0
  490. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/reasoning_parser.py +0 -0
  491. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/custom_logit_processor.py +0 -0
  492. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
  493. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/penaltylib/frequency_penalty.py +0 -0
  494. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/penaltylib/min_new_tokens.py +0 -0
  495. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
  496. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/penaltylib/presence_penalty.py +0 -0
  497. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/sampling_batch_info.py +0 -0
  498. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/sampling/sampling_params.py +0 -0
  499. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/server.py +0 -0
  500. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/speculative/build_eagle_tree.py +0 -0
  501. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +0 -0
  502. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/speculative/eagle_utils.py +0 -0
  503. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/speculative/eagle_worker.py +0 -0
  504. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/speculative/spec_info.py +0 -0
  505. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/torch_memory_saver_adapter.py +0 -0
  506. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/utils.py +0 -0
  507. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/srt/warmup.py +0 -0
  508. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/few_shot_gsm8k.py +0 -0
  509. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/few_shot_gsm8k_engine.py +0 -0
  510. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/run_eval.py +0 -0
  511. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/runners.py +0 -0
  512. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/send_one.py +0 -0
  513. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/simple_eval_common.py +0 -0
  514. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/simple_eval_gpqa.py +0 -0
  515. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/simple_eval_humaneval.py +0 -0
  516. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/simple_eval_math.py +0 -0
  517. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/simple_eval_mgsm.py +0 -0
  518. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/simple_eval_mmlu.py +0 -0
  519. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_activation.py +0 -0
  520. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_block_fp8.py +0 -0
  521. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_block_fp8_ep.py +0 -0
  522. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_custom_ops.py +0 -0
  523. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_layernorm.py +0 -0
  524. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_programs.py +0 -0
  525. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/test/test_utils.py +0 -0
  526. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang/utils.py +0 -0
  527. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang.egg-info/dependency_links.txt +0 -0
  528. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang.egg-info/requires.txt +0 -0
  529. {sglang-0.4.4 → sglang-0.4.4.post1}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: sglang
3
- Version: 0.4.4
3
+ Version: 0.4.4.post1
4
4
  Summary: SGLang is yet another fast serving framework for large language models and vision language models.
5
5
  License: Apache License
6
6
  Version 2.0, January 2004
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "sglang"
7
- version = "0.4.4"
7
+ version = "0.4.4.post1"
8
8
  description = "SGLang is yet another fast serving framework for large language models and vision language models."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -318,6 +318,10 @@ class Qwen25Detector(BaseFormatDetector):
318
318
  self.bot_token = "<tool_call>"
319
319
  self.eot_token = "</tool_call>"
320
320
 
321
+ def has_tool_call(self, text: str) -> bool:
322
+ """Check if the text contains a Qwen 2.5 format tool call."""
323
+ return self.bot_token in text
324
+
321
325
  def detect_and_parse(self, text: str, tools: List[Function]) -> List[ToolCallItem]:
322
326
  """
323
327
  One-time parsing: Detects and parses tool calls in the provided text.
@@ -352,6 +356,10 @@ class MistralDetector(BaseFormatDetector):
352
356
  self.bot_token = "[TOOL_CALLS] ["
353
357
  self.tool_call_regex = re.compile(r"\[{.*}\]", re.DOTALL)
354
358
 
359
+ def has_tool_call(self, text: str) -> bool:
360
+ """Check if the text contains a Mistral format tool call."""
361
+ return self.bot_token in text
362
+
355
363
  def _clean_text(self, text: str) -> str:
356
364
  """
357
365
  clean text to only leave ''[TOOL_CALLS] [{"name": xxx, "arguments": {xxx}}]'
@@ -397,12 +405,21 @@ class Llama32Detector(BaseFormatDetector):
397
405
  super().__init__()
398
406
  self.bot_token = "<|python_tag|>"
399
407
 
408
+ def has_tool_call(self, text: str) -> bool:
409
+ """Check if the text contains a Llama 3.2 format tool call."""
410
+ # depending on the prompt format the Llama model may or may not
411
+ # prefix the output with the <|python_tag|> token
412
+ return "<|python_tag|>" in text or text.startswith("{")
413
+
400
414
  def detect_and_parse(self, text: str, tools: List[Function]) -> List[ToolCallItem]:
401
415
  """Parse function calls from text, handling multiple JSON objects."""
402
- if "<|python_tag|>" not in text:
416
+ if "<|python_tag|>" not in text and not text.startswith("{"):
403
417
  return []
404
418
 
405
- _, action_text = text.split("<|python_tag|>")
419
+ if "<|python_tag|>" in text:
420
+ _, action_text = text.split("<|python_tag|>")
421
+ else:
422
+ action_text = text
406
423
 
407
424
  # Split by semicolon and process each part
408
425
  json_parts = [part.strip() for part in action_text.split(";") if part.strip()]
@@ -501,6 +518,20 @@ class FunctionCallParser:
501
518
  self.multi_format_parser = MultiFormatParser(detectors)
502
519
  self.tools = tools
503
520
 
521
+ def has_tool_call(self, text: str) -> bool:
522
+ """
523
+ Check if the given text contains a tool call in the format supported by this parser.
524
+ This delegates to the detector's implementation.
525
+
526
+ :param text: The text to check for tool calls
527
+ :return: True if the text contains a tool call, False otherwise
528
+ """
529
+ # Check all detectors in the multi_format_parser
530
+ for detector in self.multi_format_parser.detectors:
531
+ if detector.has_tool_call(text):
532
+ return True
533
+ return False
534
+
504
535
  def parse_non_stream(self, full_text: str):
505
536
  """
506
537
  Non-streaming call: one-time parsing
@@ -1,6 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import functools
4
+ import logging
5
+ from contextlib import contextmanager
4
6
  from typing import TYPE_CHECKING, Union
5
7
 
6
8
  import torch
@@ -14,6 +16,8 @@ from sglang.srt.distributed import (
14
16
  tensor_model_parallel_all_reduce,
15
17
  )
16
18
 
19
+ logger = logging.getLogger(__name__)
20
+
17
21
  if TYPE_CHECKING:
18
22
  from sglang.srt.model_executor.forward_batch_info import ForwardBatch
19
23
 
@@ -86,6 +90,27 @@ def get_attention_dp_size():
86
90
  return _DP_SIZE
87
91
 
88
92
 
93
+ @contextmanager
94
+ def disable_dp_size():
95
+ """Patch the tp group temporarily until this function ends.
96
+
97
+ This method is for draft workers of speculative decoding to run draft model
98
+ with different tp degree from that of target model workers.
99
+
100
+ Args:
101
+ tp_group (GroupCoordinator): the tp group coordinator
102
+ """
103
+ global _DP_SIZE
104
+ assert _DP_SIZE is not None, "dp attention not initialized!"
105
+
106
+ old_dp_size = _DP_SIZE
107
+ _DP_SIZE = 1
108
+ try:
109
+ yield
110
+ finally:
111
+ _DP_SIZE = old_dp_size
112
+
113
+
89
114
  def get_dp_local_info(forward_batch: ForwardBatch):
90
115
  dp_rank = get_attention_dp_rank()
91
116
 
@@ -159,7 +184,8 @@ def dp_gather(
159
184
  layer_id != "embedding" or get_attention_tp_rank() == 0
160
185
  ):
161
186
  assert (
162
- global_tokens.storage().data_ptr() != local_tokens.storage().data_ptr()
187
+ global_tokens.untyped_storage().data_ptr()
188
+ != local_tokens.untyped_storage().data_ptr()
163
189
  ), "aliasing between global_tokens and local_tokens not allowed"
164
190
  memcpy_triton(
165
191
  global_tokens, local_tokens, 0, local_start_pos, local_num_tokens, False
@@ -174,8 +200,9 @@ def dp_gather(
174
200
  torch.ops.sglang.inplace_all_reduce(
175
201
  global_tokens, group_name=get_tp_group().unique_name
176
202
  )
203
+
177
204
  else:
178
- global_tokens = tensor_model_parallel_all_reduce(global_tokens)
205
+ global_tokens[:] = tensor_model_parallel_all_reduce(global_tokens)
179
206
 
180
207
 
181
208
  def dp_scatter(
@@ -186,6 +213,7 @@ def dp_scatter(
186
213
  # local_num_tokens is not necessarily the same as local_tokens.shape[0],
187
214
  # since local_tokens may be padded for cuda graph
188
215
  local_start_pos, local_num_tokens = get_dp_local_info(forward_batch)
216
+
189
217
  local_tokens.fill_(0)
190
218
  assert local_tokens.is_contiguous()
191
219
  assert global_tokens.is_contiguous()
@@ -0,0 +1,411 @@
1
+ from typing import Tuple
2
+
3
+ import torch
4
+ import triton
5
+ import triton.language as tl
6
+
7
+ fused_softcap_autotune = triton.autotune(
8
+ configs=[
9
+ triton.Config(kwargs={"BLOCK_SIZE": 128}, num_warps=4),
10
+ triton.Config(kwargs={"BLOCK_SIZE": 128}, num_warps=8),
11
+ triton.Config(kwargs={"BLOCK_SIZE": 128}, num_warps=16),
12
+ triton.Config(kwargs={"BLOCK_SIZE": 256}, num_warps=4),
13
+ triton.Config(kwargs={"BLOCK_SIZE": 256}, num_warps=8),
14
+ triton.Config(kwargs={"BLOCK_SIZE": 512}, num_warps=4),
15
+ triton.Config(kwargs={"BLOCK_SIZE": 512}, num_warps=8),
16
+ triton.Config(kwargs={"BLOCK_SIZE": 512}, num_warps=16),
17
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=4),
18
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=8),
19
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=16),
20
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=32),
21
+ triton.Config(kwargs={"BLOCK_SIZE": 2048}, num_warps=32),
22
+ triton.Config(kwargs={"BLOCK_SIZE": 4096}, num_warps=32),
23
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=32),
24
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=32),
25
+ triton.Config(kwargs={"BLOCK_SIZE": 32768}, num_warps=32),
26
+ ],
27
+ key=["n_ele"],
28
+ )
29
+
30
+
31
+ @triton.jit
32
+ def fused_softcap_kernel(
33
+ output_ptr,
34
+ input_ptr,
35
+ n_ele,
36
+ softcap_const: tl.constexpr,
37
+ BLOCK_SIZE: tl.constexpr,
38
+ ):
39
+ pid = tl.program_id(axis=0)
40
+ block_start = pid * BLOCK_SIZE
41
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
42
+ mask = offsets < n_ele
43
+ x = tl.load(input_ptr + offsets, mask=mask)
44
+ fx = x.to(tl.float32)
45
+ fxs = fx / softcap_const
46
+ exped = tl.exp(2 * fxs)
47
+ top = exped - 1
48
+ bottom = exped + 1
49
+ output = top / bottom * softcap_const
50
+ tl.store(output_ptr + offsets, output, mask=mask)
51
+
52
+
53
+ fused_softcap_kernel_autotuned = fused_softcap_autotune(fused_softcap_kernel)
54
+
55
+
56
+ def fused_softcap(x, softcap_const, autotune=False):
57
+ output = torch.empty_like(x, dtype=torch.float32)
58
+ n_elements = output.numel()
59
+ if autotune:
60
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
61
+ fused_softcap_kernel_autotuned[grid](output, x, n_elements, softcap_const)
62
+ else:
63
+ fused_softcap_kernel[(triton.cdiv(n_elements, 128),)](
64
+ output, x, n_elements, softcap_const, BLOCK_SIZE=128, num_warps=8
65
+ )
66
+ return output
67
+
68
+
69
+ # cast to float + softcap
70
+ class Softcap:
71
+ def __init__(self, softcap_const: float):
72
+ self.softcap_const = softcap_const
73
+
74
+ def __call__(self, *args, **kwargs):
75
+ return self.forward(*args, **kwargs)
76
+
77
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
78
+ if x.is_cuda:
79
+ return self.forward_cuda(x)
80
+ else:
81
+ return self.forward_native(x)
82
+
83
+ def forward_native(self, x: torch.Tensor) -> torch.Tensor:
84
+ return torch.tanh(x.float() / self.softcap_const) * self.softcap_const
85
+
86
+ def forward_cuda(self, x: torch.Tensor, autotune=False) -> torch.Tensor:
87
+ return fused_softcap(x, self.softcap_const, autotune=autotune)
88
+
89
+
90
+ rmsnorm_autotune = triton.autotune(
91
+ configs=[
92
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=4, num_stages=1),
93
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=8, num_stages=1),
94
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=16, num_stages=1),
95
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=4),
96
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=8),
97
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=16),
98
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=4, num_stages=4),
99
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=8, num_stages=4),
100
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=16, num_stages=4),
101
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=8, num_stages=8),
102
+ triton.Config(kwargs={"BLOCK_SIZE": 1024}, num_warps=16, num_stages=8),
103
+ triton.Config(kwargs={"BLOCK_SIZE": 2048}, num_warps=8),
104
+ triton.Config(kwargs={"BLOCK_SIZE": 2048}, num_warps=16),
105
+ triton.Config(kwargs={"BLOCK_SIZE": 2048}, num_warps=8, num_stages=4),
106
+ triton.Config(kwargs={"BLOCK_SIZE": 2048}, num_warps=16, num_stages=4),
107
+ triton.Config(kwargs={"BLOCK_SIZE": 4096}, num_warps=8),
108
+ triton.Config(kwargs={"BLOCK_SIZE": 4096}, num_warps=16),
109
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=8),
110
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=16),
111
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=32),
112
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=8, num_stages=1),
113
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=16, num_stages=1),
114
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=32, num_stages=1),
115
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=8, num_stages=4),
116
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=16, num_stages=4),
117
+ triton.Config(kwargs={"BLOCK_SIZE": 8192}, num_warps=32, num_stages=4),
118
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=8),
119
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=16),
120
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=32),
121
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=8, num_stages=1),
122
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=16, num_stages=1),
123
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=32, num_stages=1),
124
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=8, num_stages=4),
125
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=16, num_stages=4),
126
+ triton.Config(kwargs={"BLOCK_SIZE": 16384}, num_warps=32, num_stages=4),
127
+ ],
128
+ key=["hidden_dim"],
129
+ )
130
+
131
+
132
+ @triton.jit
133
+ def fused_dual_residual_rmsnorm_kernel(
134
+ output_ptr,
135
+ mid_ptr,
136
+ activ_ptr,
137
+ residual_ptr,
138
+ weight1_ptr,
139
+ weight2_ptr,
140
+ eps: tl.constexpr,
141
+ hidden_dim: tl.constexpr,
142
+ BLOCK_SIZE: tl.constexpr,
143
+ ):
144
+ pid = tl.program_id(axis=0)
145
+ input_start = pid * hidden_dim
146
+
147
+ offsets = tl.arange(0, BLOCK_SIZE)
148
+ mask = offsets < hidden_dim
149
+
150
+ a_ = tl.load(activ_ptr + input_start + offsets, mask=mask, other=0.0)
151
+ a = a_.to(tl.float32)
152
+ rms = tl.sqrt(tl.sum(a * a, axis=0) / hidden_dim + eps)
153
+
154
+ r = tl.load(residual_ptr + input_start + offsets, mask=mask, other=0.0)
155
+ w1_ = tl.load(weight1_ptr + offsets, mask=mask, other=0.0)
156
+ w1 = w1_.to(tl.float32)
157
+
158
+ a2r = r + (a / rms * w1).to(r.dtype)
159
+ tl.store(
160
+ mid_ptr + input_start + offsets,
161
+ a2r,
162
+ mask=mask,
163
+ )
164
+
165
+ a2r = a2r.to(tl.float32)
166
+ rms2 = tl.sqrt(tl.sum(a2r * a2r, axis=0) / hidden_dim + eps)
167
+
168
+ w2_ = tl.load(weight2_ptr + offsets, mask=mask, other=0.0)
169
+ w2 = w2_.to(tl.float32)
170
+
171
+ tl.store(
172
+ output_ptr + input_start + offsets,
173
+ a2r / rms2 * w2, # implicitly casts to output dtype here
174
+ mask=mask,
175
+ )
176
+
177
+
178
+ fused_dual_residual_rmsnorm_kernel_autotune = rmsnorm_autotune(
179
+ fused_dual_residual_rmsnorm_kernel
180
+ )
181
+
182
+
183
+ def fused_dual_residual_rmsnorm(x, residual, weight1, weight2, eps, autotune=False):
184
+ assert len(x.shape) == 2
185
+ assert x.shape == residual.shape and x.dtype == residual.dtype
186
+ output, mid = torch.empty_like(x), torch.empty_like(x)
187
+ bs, hidden_dim = x.shape
188
+ if autotune:
189
+ fused_dual_residual_rmsnorm_kernel_autotune[(bs,)](
190
+ output, mid, x, residual, weight1, weight2, eps=eps, hidden_dim=hidden_dim
191
+ )
192
+ else:
193
+ config = {
194
+ "BLOCK_SIZE": triton.next_power_of_2(hidden_dim),
195
+ "num_warps": max(
196
+ min(triton.next_power_of_2(triton.cdiv(hidden_dim, 256)), 32), 4
197
+ ),
198
+ }
199
+
200
+ fused_dual_residual_rmsnorm_kernel[(bs,)](
201
+ output,
202
+ mid,
203
+ x,
204
+ residual,
205
+ weight1,
206
+ weight2,
207
+ eps=eps,
208
+ hidden_dim=hidden_dim,
209
+ **config,
210
+ )
211
+
212
+ return output, mid
213
+
214
+
215
+ @triton.jit
216
+ def fused_rmsnorm_kernel(
217
+ output_ptr,
218
+ activ_ptr,
219
+ weight_ptr,
220
+ eps: tl.constexpr,
221
+ hidden_dim: tl.constexpr,
222
+ BLOCK_SIZE: tl.constexpr,
223
+ ):
224
+ pid = tl.program_id(axis=0)
225
+ input_start = pid * hidden_dim
226
+
227
+ offsets = tl.arange(0, BLOCK_SIZE)
228
+ mask = offsets < hidden_dim
229
+
230
+ a_ = tl.load(activ_ptr + input_start + offsets, mask=mask, other=0.0)
231
+ a = a_.to(tl.float32)
232
+ rms = tl.sqrt(tl.sum(a * a, axis=0) / hidden_dim + eps)
233
+
234
+ w1_ = tl.load(weight_ptr + offsets, mask=mask, other=0.0)
235
+ w1 = w1_.to(tl.float32)
236
+
237
+ a_rms = a / rms * w1
238
+
239
+ tl.store(
240
+ output_ptr + input_start + offsets,
241
+ a_rms, # implicitly casts to output dtype here
242
+ mask=mask,
243
+ )
244
+
245
+
246
+ def fused_rmsnorm(x, weight, eps, autotune=False, inplace=False):
247
+ assert len(x.shape) == 2
248
+ if inplace:
249
+ output = x
250
+ else:
251
+ output = torch.empty_like(x)
252
+ bs, hidden_dim = x.shape
253
+ config = {
254
+ "BLOCK_SIZE": triton.next_power_of_2(hidden_dim),
255
+ "num_warps": max(
256
+ min(triton.next_power_of_2(triton.cdiv(hidden_dim, 256)), 32), 4
257
+ ),
258
+ }
259
+
260
+ fused_rmsnorm_kernel[(bs,)](
261
+ output, x, weight, eps=eps, hidden_dim=hidden_dim, **config
262
+ )
263
+ return output
264
+
265
+
266
+ class FusedDualResidualRMSNorm:
267
+ """
268
+ Fused implementation of
269
+ y = RMSNorm2(RMSNorm1(x) + residual))
270
+ """
271
+
272
+ def __init__(self, rmsnorm1, rmsnorm2) -> None: # the one after rmsnorm1
273
+ self.rmsnorm1 = rmsnorm1
274
+ self.rmsnorm2 = rmsnorm2
275
+ self.variance_epsilon = self.rmsnorm1.variance_epsilon
276
+ assert self.rmsnorm1.variance_epsilon == self.rmsnorm2.variance_epsilon
277
+ assert self.rmsnorm1.weight.shape == self.rmsnorm2.weight.shape
278
+
279
+ def __call__(self, *args, **kwargs):
280
+ return self.forward(*args, **kwargs)
281
+
282
+ def forward(
283
+ self, x: torch.Tensor, residual: torch.Tensor
284
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
285
+ if x.is_cuda:
286
+ return self.forward_cuda(x, residual)
287
+ else:
288
+ return self.forward_flashinfer(x, residual)
289
+
290
+ def forward_cuda(
291
+ self, x: torch.Tensor, residual: torch.Tensor, autotune=False
292
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
293
+ return fused_dual_residual_rmsnorm(
294
+ x,
295
+ residual,
296
+ self.rmsnorm1.weight,
297
+ self.rmsnorm2.weight,
298
+ self.variance_epsilon,
299
+ autotune=autotune,
300
+ )
301
+
302
+ def forward_flashinfer(
303
+ self,
304
+ x: torch.Tensor,
305
+ residual: torch.Tensor,
306
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
307
+ normed1 = self.rmsnorm1(x)
308
+ residual = normed1 + residual
309
+ return self.rmsnorm2(residual), residual
310
+
311
+ def forward_native(
312
+ self,
313
+ x: torch.Tensor,
314
+ residual: torch.Tensor,
315
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
316
+ normed1 = self.rmsnorm1.forward_native(x)
317
+ residual = normed1 + residual
318
+ return self.rmsnorm2.forward_native(residual), residual
319
+
320
+
321
+ # gelu on first half of vector
322
+ @triton.jit
323
+ def gelu_and_mul_kernel(
324
+ out_hidden_states_ptr, # (bs, hidden_dim)
325
+ out_scales_ptr, # (bs,)
326
+ hidden_states_ptr, # (bs, hidden_dim * 2)
327
+ quant_max: tl.constexpr,
328
+ static_scale: tl.constexpr,
329
+ hidden_dim: tl.constexpr, # the output hidden_dim
330
+ BLOCK_SIZE: tl.constexpr,
331
+ ):
332
+ pid = tl.program_id(axis=0)
333
+
334
+ input_start = pid * hidden_dim * 2
335
+ output_start = pid * hidden_dim
336
+
337
+ input1_offs = tl.arange(0, BLOCK_SIZE)
338
+ mask = tl.arange(0, BLOCK_SIZE) < hidden_dim # shared for input1, input3, output
339
+ input3_offs = hidden_dim + tl.arange(0, BLOCK_SIZE)
340
+ output_offs = tl.arange(0, BLOCK_SIZE)
341
+
342
+ x1 = tl.load(
343
+ hidden_states_ptr + input_start + input1_offs, mask=mask, other=0.0
344
+ ).to(tl.float32)
345
+ x3 = tl.load(
346
+ hidden_states_ptr + input_start + input3_offs, mask=mask, other=0.0
347
+ ).to(tl.float32)
348
+
349
+ # gelu
350
+ # cast down before mul to better match training?
351
+ gelu_x1 = 0.5 * (1.0 + tl.erf(x1 * 0.7071067811865475)) * x1
352
+ out = x3 * gelu_x1.to(hidden_states_ptr.dtype.element_ty)
353
+
354
+ if quant_max is not None:
355
+ raise NotImplementedError()
356
+
357
+ tl.store(out_hidden_states_ptr + output_start + output_offs, out, mask=mask)
358
+
359
+
360
+ def gelu_and_mul_triton(
361
+ hidden_states,
362
+ scales=None,
363
+ quantize=None, # dtype to quantize to
364
+ out=None,
365
+ ):
366
+ bs, in_hidden_dim = hidden_states.shape
367
+ hidden_dim = in_hidden_dim // 2
368
+
369
+ if out is None:
370
+ out_hidden_states = torch.empty(
371
+ (bs, hidden_dim),
372
+ dtype=quantize or hidden_states.dtype,
373
+ device=hidden_states.device,
374
+ )
375
+ else:
376
+ assert out.shape == (bs, hidden_dim)
377
+ assert out.dtype == (quantize or hidden_states.dtype)
378
+ out_hidden_states = out
379
+ out_scales = None
380
+ static_scale = False
381
+ if quantize is not None:
382
+ if scales is None:
383
+ out_scales = torch.empty(
384
+ (bs,), dtype=torch.float32, device=hidden_states.device
385
+ )
386
+ else:
387
+ out_scales = scales
388
+ static_scale = True
389
+
390
+ config = {
391
+ # 8 ele per thread (not tuned)
392
+ "num_warps": max(
393
+ min(triton.next_power_of_2(triton.cdiv(hidden_dim, 8 * 32)), 32), 4
394
+ ),
395
+ }
396
+
397
+ gelu_and_mul_kernel[(bs,)](
398
+ out_hidden_states,
399
+ out_scales,
400
+ hidden_states,
401
+ quant_max=torch.finfo(quantize).max if quantize is not None else None,
402
+ static_scale=static_scale,
403
+ hidden_dim=hidden_dim,
404
+ BLOCK_SIZE=triton.next_power_of_2(hidden_dim),
405
+ **config,
406
+ )
407
+
408
+ if quantize is not None:
409
+ return out_hidden_states, out_scales
410
+ else:
411
+ return out_hidden_states, None
@@ -23,6 +23,7 @@ import triton.language as tl
23
23
  from torch import nn
24
24
 
25
25
  from sglang.srt.distributed import (
26
+ get_tensor_model_parallel_rank,
26
27
  get_tensor_model_parallel_world_size,
27
28
  tensor_model_parallel_all_gather,
28
29
  )