sglang 0.4.4.post3__tar.gz → 0.4.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (597) hide show
  1. {sglang-0.4.4.post3/sglang.egg-info → sglang-0.4.5}/PKG-INFO +4 -3
  2. {sglang-0.4.4.post3 → sglang-0.4.5}/pyproject.toml +4 -3
  3. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/bench_serving.py +49 -7
  4. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/chat_template.py +24 -0
  5. sglang-0.4.5/sglang/srt/_custom_ops.py +117 -0
  6. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/model_config.py +5 -0
  7. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/constrained/base_grammar_backend.py +5 -1
  8. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/conversation.py +29 -4
  9. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/custom_op.py +5 -0
  10. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/custom_all_reduce.py +27 -79
  11. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +2 -2
  12. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/entrypoints/engine.py +0 -5
  13. sglang-0.4.5/sglang/srt/layers/attention/flashattention_backend.py +1029 -0
  14. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/flashinfer_backend.py +5 -7
  15. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/flashinfer_mla_backend.py +1 -3
  16. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/flashmla_backend.py +1 -1
  17. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/ep_moe/kernels.py +142 -0
  18. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/ep_moe/layer.py +79 -80
  19. sglang-0.4.5/sglang/srt/layers/moe/ep_moe/token_dispatcher.py +599 -0
  20. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_native.py +5 -0
  21. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  22. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  23. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  24. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1024,device_name=NVIDIA_H200.json +146 -0
  25. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  26. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  27. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json +146 -0
  28. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json +146 -0
  29. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  30. sglang-0.4.5/sglang/srt/layers/moe/fused_moe_triton/configs/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  31. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +416 -50
  32. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/layer.py +7 -0
  33. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/topk.py +49 -3
  34. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/__init__.py +5 -1
  35. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/blockwise_int8.py +2 -0
  36. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +2 -1
  37. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +34 -10
  38. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/fp8.py +3 -1
  39. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/fp8_utils.py +1 -4
  40. sglang-0.4.5/sglang/srt/layers/quantization/moe_wna16.py +503 -0
  41. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/utils.py +1 -1
  42. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/w8a8_int8.py +2 -0
  43. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/radix_attention.py +2 -0
  44. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/rotary_embedding.py +63 -12
  45. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/cache_controller.py +34 -11
  46. sglang-0.4.5/sglang/srt/managers/mm_utils.py +419 -0
  47. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processor.py +0 -2
  48. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/base_processor.py +45 -77
  49. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/clip.py +7 -26
  50. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/deepseek_vl_v2.py +17 -58
  51. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/gemma3.py +12 -27
  52. sglang-0.4.5/sglang/srt/managers/multimodal_processors/janus_pro.py +58 -0
  53. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/llava.py +34 -14
  54. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/minicpm.py +35 -38
  55. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/mlama.py +10 -23
  56. sglang-0.4.5/sglang/srt/managers/multimodal_processors/mllama4.py +161 -0
  57. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/multimodal_processors/qwen_vl.py +22 -45
  58. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/schedule_batch.py +185 -128
  59. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/scheduler.py +4 -4
  60. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/tokenizer_manager.py +1 -1
  61. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/utils.py +1 -6
  62. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/hiradix_cache.py +62 -52
  63. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/memory_pool.py +72 -6
  64. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/paged_allocator.py +39 -0
  65. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/metrics/collector.py +23 -53
  66. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_executor/cuda_graph_runner.py +8 -6
  67. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_executor/forward_batch_info.py +10 -10
  68. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_executor/model_runner.py +60 -57
  69. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_loader/loader.py +8 -0
  70. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/clip.py +12 -7
  71. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/deepseek_janus_pro.py +10 -15
  72. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/deepseek_v2.py +212 -121
  73. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/deepseek_vl2.py +105 -104
  74. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gemma3_mm.py +14 -80
  75. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llama.py +16 -5
  76. sglang-0.4.5/sglang/srt/models/llama4.py +420 -0
  77. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llava.py +31 -19
  78. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llavavid.py +16 -7
  79. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/minicpmo.py +63 -147
  80. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/minicpmv.py +17 -27
  81. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/mllama.py +29 -14
  82. sglang-0.4.5/sglang/srt/models/mllama4.py +154 -0
  83. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2.py +9 -6
  84. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2_5_vl.py +21 -31
  85. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2_vl.py +20 -21
  86. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/openai_api/adapter.py +18 -6
  87. sglang-0.4.5/sglang/srt/platforms/interface.py +371 -0
  88. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/server_args.py +99 -14
  89. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +5 -5
  90. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/speculative/eagle_utils.py +140 -28
  91. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/speculative/eagle_worker.py +93 -24
  92. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/utils.py +104 -51
  93. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_custom_ops.py +55 -0
  94. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_utils.py +13 -26
  95. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/utils.py +2 -2
  96. sglang-0.4.5/sglang/version.py +1 -0
  97. {sglang-0.4.4.post3 → sglang-0.4.5/sglang.egg-info}/PKG-INFO +4 -3
  98. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang.egg-info/SOURCES.txt +15 -0
  99. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang.egg-info/requires.txt +3 -2
  100. sglang-0.4.4.post3/sglang/srt/_custom_ops.py +0 -150
  101. sglang-0.4.4.post3/sglang/srt/layers/attention/flashattention_backend.py +0 -434
  102. sglang-0.4.4.post3/sglang/srt/layers/moe/ep_moe/token_dispatcher.py +0 -416
  103. sglang-0.4.4.post3/sglang/srt/managers/mm_utils.py +0 -373
  104. sglang-0.4.4.post3/sglang/srt/managers/multimodal_processors/janus_pro.py +0 -84
  105. sglang-0.4.4.post3/sglang/version.py +0 -1
  106. {sglang-0.4.4.post3 → sglang-0.4.5}/LICENSE +0 -0
  107. {sglang-0.4.4.post3 → sglang-0.4.5}/README.md +0 -0
  108. {sglang-0.4.4.post3 → sglang-0.4.5}/setup.cfg +0 -0
  109. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/__init__.py +0 -0
  110. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/api.py +0 -0
  111. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/bench_offline_throughput.py +0 -0
  112. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/bench_one_batch.py +0 -0
  113. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/bench_one_batch_server.py +0 -0
  114. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/check_env.py +0 -0
  115. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/global_config.py +0 -0
  116. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/__init__.py +0 -0
  117. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/__init__.py +0 -0
  118. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/anthropic.py +0 -0
  119. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/base_backend.py +0 -0
  120. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/litellm.py +0 -0
  121. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/openai.py +0 -0
  122. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/runtime_endpoint.py +0 -0
  123. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/backend/vertexai.py +0 -0
  124. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/choices.py +0 -0
  125. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/compiler.py +0 -0
  126. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/interpreter.py +0 -0
  127. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/ir.py +0 -0
  128. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/lang/tracer.py +0 -0
  129. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/launch_server.py +0 -0
  130. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/llama3_eval.py +0 -0
  131. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/aio_rwlock.py +0 -0
  132. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/code_completion_parser.py +0 -0
  133. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/__init__.py +0 -0
  134. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/chatglm.py +0 -0
  135. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/dbrx.py +0 -0
  136. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/deepseekvl2.py +0 -0
  137. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/device_config.py +0 -0
  138. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/exaone.py +0 -0
  139. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/janus_pro.py +0 -0
  140. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/load_config.py +0 -0
  141. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/configs/utils.py +0 -0
  142. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/__init__.py +0 -0
  143. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/base_connector.py +0 -0
  144. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/redis.py +0 -0
  145. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/s3.py +0 -0
  146. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/serde/__init__.py +0 -0
  147. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/serde/safe_serde.py +0 -0
  148. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/serde/serde.py +0 -0
  149. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/connector/utils.py +0 -0
  150. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/constrained/llguidance_backend.py +0 -0
  151. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/constrained/outlines_backend.py +0 -0
  152. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/constrained/outlines_jump_forward.py +0 -0
  153. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/constrained/xgrammar_backend.py +0 -0
  154. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/disaggregation/conn.py +0 -0
  155. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/disaggregation/decode.py +0 -0
  156. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/disaggregation/mini_lb.py +0 -0
  157. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/disaggregation/prefill.py +0 -0
  158. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/disaggregation/utils.py +0 -0
  159. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/__init__.py +0 -0
  160. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/communication_op.py +0 -0
  161. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/cuda_wrapper.py +0 -0
  162. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/hpu_communicator.py +0 -0
  163. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/pynccl.py +0 -0
  164. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/pynccl_wrapper.py +0 -0
  165. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/shm_broadcast.py +0 -0
  166. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/device_communicators/xpu_communicator.py +0 -0
  167. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/parallel_state.py +0 -0
  168. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/distributed/utils.py +0 -0
  169. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/entrypoints/http_server.py +0 -0
  170. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/entrypoints/verl_engine.py +0 -0
  171. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/function_call_parser.py +0 -0
  172. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/hf_transformers_utils.py +0 -0
  173. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/activation.py +0 -0
  174. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/base_attn_backend.py +0 -0
  175. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/double_sparsity_backend.py +0 -0
  176. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/torch_native_backend.py +0 -0
  177. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/triton_backend.py +0 -0
  178. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/triton_ops/decode_attention.py +0 -0
  179. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +0 -0
  180. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/triton_ops/extend_attention.py +0 -0
  181. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/triton_ops/prefill_attention.py +0 -0
  182. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/triton_ops/rocm_mla_decode_rope.py +0 -0
  183. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/utils.py +0 -0
  184. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/attention/vision.py +0 -0
  185. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/dp_attention.py +0 -0
  186. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/elementwise.py +0 -0
  187. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/layernorm.py +0 -0
  188. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/linear.py +0 -0
  189. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/logits_processor.py +0 -0
  190. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/ep_moe/__init__.py +0 -0
  191. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/__init__.py +0 -0
  192. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  193. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  194. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  195. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  196. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  197. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  198. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  199. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  200. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  201. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  202. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  203. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  204. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  205. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  206. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  207. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  208. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  209. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  210. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  211. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  212. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  213. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  214. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  215. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  216. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  217. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  218. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  219. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  220. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  221. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  222. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  223. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  224. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  225. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  226. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  227. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  228. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  229. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  230. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  231. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  232. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  233. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  234. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  235. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  236. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json +0 -0
  237. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json +0 -0
  238. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  239. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  240. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  241. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  242. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  243. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  244. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=1280,device_name=NVIDIA_H200.json +0 -0
  245. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  246. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  247. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=2560,device_name=NVIDIA_H200.json +0 -0
  248. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  249. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  250. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  251. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=320,device_name=NVIDIA_H200.json +0 -0
  252. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  253. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  254. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  255. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
  256. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  257. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  258. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  259. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=64,N=640,device_name=NVIDIA_H200.json +0 -0
  260. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +0 -0
  261. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +0 -0
  262. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=AMD_Radeon_Graphics.json +0 -0
  263. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  264. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  265. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=14336,device_name=NVIDIA_H200.json +0 -0
  266. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +0 -0
  267. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +0 -0
  268. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=AMD_Radeon_Graphics.json +0 -0
  269. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  270. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  271. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  272. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  273. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=1792,device_name=NVIDIA_H200.json +0 -0
  274. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  275. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  276. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  277. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  278. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=2048,device_name=NVIDIA_H200.json +0 -0
  279. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +0 -0
  280. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +0 -0
  281. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=AMD_Radeon_Graphics.json +0 -0
  282. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  283. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  284. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
  285. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  286. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  287. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  288. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_H200.json +0 -0
  289. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=3584,device_name=NVIDIA_L40S.json +0 -0
  290. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
  291. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
  292. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
  293. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  294. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  295. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  296. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  297. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=4096,device_name=NVIDIA_H200.json +0 -0
  298. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +0 -0
  299. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +0 -0
  300. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=AMD_Radeon_Graphics.json +0 -0
  301. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  302. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  303. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  304. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  305. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=7168,device_name=NVIDIA_H200.json +0 -0
  306. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
  307. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
  308. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
  309. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  310. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/fused_moe_triton/configs/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  311. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/moe/router.py +0 -0
  312. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/parameter.py +0 -0
  313. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/pooler.py +0 -0
  314. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/awq.py +0 -0
  315. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/base_config.py +0 -0
  316. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/__init__.py +0 -0
  317. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +0 -0
  318. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +0 -0
  319. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +0 -0
  320. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/compressed_tensors/utils.py +0 -0
  321. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  322. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  323. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  324. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  325. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  326. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  327. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  328. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  329. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  330. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  331. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  332. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  333. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  334. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  335. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  336. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  337. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  338. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  339. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  340. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  341. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  342. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  343. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  344. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  345. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  346. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  347. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  348. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  349. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  350. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  351. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  352. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  353. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  354. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  355. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  356. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  357. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  358. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  359. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  360. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  361. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  362. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  363. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  364. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  365. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  366. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  367. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  368. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  369. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  370. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  371. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  372. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  373. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  374. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  375. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  376. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  377. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  378. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  379. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  380. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  381. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  382. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  383. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  384. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  385. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  386. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  387. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  388. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  389. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  390. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  391. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  392. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  393. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  394. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  395. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  396. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  397. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  398. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  399. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  400. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  401. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  402. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  403. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  404. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  405. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  406. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  407. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  408. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  409. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  410. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  411. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  412. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  413. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  414. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  415. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  416. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  417. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  418. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  419. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  420. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  421. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  422. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  423. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  424. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  425. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  426. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  427. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  428. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  429. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  430. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  431. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  432. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  433. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  434. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  435. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  436. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  437. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  438. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  439. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  440. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  441. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  442. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  443. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  444. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  445. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  446. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  447. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  448. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  449. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  450. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  451. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  452. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  453. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  454. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  455. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  456. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  457. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  458. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  459. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  460. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  461. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  462. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  463. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  464. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  465. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  466. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  467. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  468. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  469. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  470. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  471. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  472. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  473. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/fp8_kernel.py +0 -0
  474. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/gptq.py +0 -0
  475. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/int8_kernel.py +0 -0
  476. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/int8_utils.py +0 -0
  477. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/kv_cache.py +0 -0
  478. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/modelopt_quant.py +0 -0
  479. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/quantization/w8a8_fp8.py +0 -0
  480. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/sampler.py +0 -0
  481. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/torchao_utils.py +0 -0
  482. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/layers/vocab_parallel_embedding.py +0 -0
  483. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/backend/__init__.py +0 -0
  484. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/backend/base_backend.py +0 -0
  485. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/backend/flashinfer_backend.py +0 -0
  486. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/backend/triton_backend.py +0 -0
  487. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/layers.py +0 -0
  488. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/lora.py +0 -0
  489. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/lora_config.py +0 -0
  490. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/lora_manager.py +0 -0
  491. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/mem_pool.py +0 -0
  492. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/triton_ops/__init__.py +0 -0
  493. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/triton_ops/gate_up_lora_b.py +0 -0
  494. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/triton_ops/qkv_lora_b.py +0 -0
  495. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/triton_ops/sgemm_lora_a.py +0 -0
  496. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/triton_ops/sgemm_lora_b.py +0 -0
  497. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/lora/utils.py +0 -0
  498. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/configure_logging.py +0 -0
  499. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/data_parallel_controller.py +0 -0
  500. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/detokenizer_manager.py +0 -0
  501. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/expert_distribution.py +0 -0
  502. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/io_struct.py +0 -0
  503. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/schedule_policy.py +0 -0
  504. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/scheduler_output_processor_mixin.py +0 -0
  505. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/session_controller.py +0 -0
  506. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/tp_worker.py +0 -0
  507. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/managers/tp_worker_overlap_thread.py +0 -0
  508. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/base_prefix_cache.py +0 -0
  509. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/chunk_cache.py +0 -0
  510. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/flush_cache.py +0 -0
  511. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mem_cache/radix_cache.py +0 -0
  512. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/metrics/func_timer.py +0 -0
  513. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/mm_utils.py +0 -0
  514. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_loader/__init__.py +0 -0
  515. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_loader/utils.py +0 -0
  516. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_loader/weight_utils.py +0 -0
  517. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/model_parallel.py +0 -0
  518. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/baichuan.py +0 -0
  519. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/chatglm.py +0 -0
  520. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/commandr.py +0 -0
  521. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/dbrx.py +0 -0
  522. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/deepseek.py +0 -0
  523. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/deepseek_nextn.py +0 -0
  524. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/exaone.py +0 -0
  525. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gemma.py +0 -0
  526. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gemma2.py +0 -0
  527. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gemma2_reward.py +0 -0
  528. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gemma3_causal.py +0 -0
  529. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gpt2.py +0 -0
  530. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/gpt_bigcode.py +0 -0
  531. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/granite.py +0 -0
  532. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/grok.py +0 -0
  533. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/internlm2.py +0 -0
  534. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/internlm2_reward.py +0 -0
  535. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llama_classification.py +0 -0
  536. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llama_eagle.py +0 -0
  537. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llama_eagle3.py +0 -0
  538. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llama_embedding.py +0 -0
  539. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/llama_reward.py +0 -0
  540. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/minicpm.py +0 -0
  541. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/minicpm3.py +0 -0
  542. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/mistral.py +0 -0
  543. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/mixtral.py +0 -0
  544. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/mixtral_quant.py +0 -0
  545. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/olmo.py +0 -0
  546. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/olmo2.py +0 -0
  547. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/olmoe.py +0 -0
  548. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/phi3_small.py +0 -0
  549. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen.py +0 -0
  550. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2_classification.py +0 -0
  551. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2_eagle.py +0 -0
  552. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2_moe.py +0 -0
  553. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/qwen2_rm.py +0 -0
  554. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/registry.py +0 -0
  555. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/stablelm.py +0 -0
  556. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/torch_native_llama.py +0 -0
  557. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/xverse.py +0 -0
  558. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/xverse_moe.py +0 -0
  559. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/models/yivl.py +0 -0
  560. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/openai_api/protocol.py +0 -0
  561. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/patch_torch.py +0 -0
  562. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/reasoning_parser.py +0 -0
  563. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/custom_logit_processor.py +0 -0
  564. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
  565. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/penaltylib/frequency_penalty.py +0 -0
  566. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/penaltylib/min_new_tokens.py +0 -0
  567. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
  568. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/penaltylib/presence_penalty.py +0 -0
  569. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/sampling_batch_info.py +0 -0
  570. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/sampling/sampling_params.py +0 -0
  571. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/server.py +0 -0
  572. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/speculative/build_eagle_tree.py +0 -0
  573. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/speculative/spec_info.py +0 -0
  574. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/torch_memory_saver_adapter.py +0 -0
  575. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/srt/warmup.py +0 -0
  576. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/__init__.py +0 -0
  577. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/attention/__init__.py +0 -0
  578. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/attention/test_flashattn_backend.py +0 -0
  579. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/few_shot_gsm8k.py +0 -0
  580. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/few_shot_gsm8k_engine.py +0 -0
  581. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/run_eval.py +0 -0
  582. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/runners.py +0 -0
  583. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/send_one.py +0 -0
  584. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/simple_eval_common.py +0 -0
  585. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/simple_eval_gpqa.py +0 -0
  586. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/simple_eval_humaneval.py +0 -0
  587. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/simple_eval_math.py +0 -0
  588. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/simple_eval_mgsm.py +0 -0
  589. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/simple_eval_mmlu.py +0 -0
  590. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_activation.py +0 -0
  591. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_block_fp8.py +0 -0
  592. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_block_fp8_ep.py +0 -0
  593. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_dynamic_grad_mode.py +0 -0
  594. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_layernorm.py +0 -0
  595. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang/test/test_programs.py +0 -0
  596. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang.egg-info/dependency_links.txt +0 -0
  597. {sglang-0.4.4.post3 → sglang-0.4.5}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sglang
3
- Version: 0.4.4.post3
3
+ Version: 0.4.5
4
4
  Summary: SGLang is yet another fast serving framework for large language models and vision language models.
5
5
  License: Apache License
6
6
  Version 2.0, January 2004
@@ -234,18 +234,19 @@ Requires-Dist: pillow; extra == "runtime-common"
234
234
  Requires-Dist: prometheus-client>=0.20.0; extra == "runtime-common"
235
235
  Requires-Dist: psutil; extra == "runtime-common"
236
236
  Requires-Dist: pydantic; extra == "runtime-common"
237
+ Requires-Dist: pynvml; extra == "runtime-common"
237
238
  Requires-Dist: python-multipart; extra == "runtime-common"
238
239
  Requires-Dist: pyzmq>=25.1.2; extra == "runtime-common"
239
240
  Requires-Dist: soundfile==0.13.1; extra == "runtime-common"
240
241
  Requires-Dist: torchao>=0.7.0; extra == "runtime-common"
241
- Requires-Dist: transformers==4.50.0; extra == "runtime-common"
242
+ Requires-Dist: transformers==4.51.0; extra == "runtime-common"
242
243
  Requires-Dist: uvicorn; extra == "runtime-common"
243
244
  Requires-Dist: uvloop; extra == "runtime-common"
244
245
  Requires-Dist: compressed-tensors; extra == "runtime-common"
245
246
  Requires-Dist: xgrammar==0.1.17; extra == "runtime-common"
246
247
  Provides-Extra: srt
247
248
  Requires-Dist: sglang[runtime_common]; extra == "srt"
248
- Requires-Dist: sgl-kernel==0.0.5.post4; extra == "srt"
249
+ Requires-Dist: sgl-kernel==0.0.8; extra == "srt"
249
250
  Requires-Dist: flashinfer_python==0.2.3; extra == "srt"
250
251
  Requires-Dist: torch==2.5.1; extra == "srt"
251
252
  Requires-Dist: cuda-python; extra == "srt"
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "sglang"
7
- version = "0.4.4.post3"
7
+ version = "0.4.5"
8
8
  description = "SGLang is yet another fast serving framework for large language models and vision language models."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -33,11 +33,12 @@ runtime_common = [
33
33
  "prometheus-client>=0.20.0",
34
34
  "psutil",
35
35
  "pydantic",
36
+ "pynvml",
36
37
  "python-multipart",
37
38
  "pyzmq>=25.1.2",
38
39
  "soundfile==0.13.1",
39
40
  "torchao>=0.7.0",
40
- "transformers==4.50.0",
41
+ "transformers==4.51.0",
41
42
  "uvicorn",
42
43
  "uvloop",
43
44
  "compressed-tensors",
@@ -46,7 +47,7 @@ runtime_common = [
46
47
 
47
48
  srt = [
48
49
  "sglang[runtime_common]",
49
- "sgl-kernel==0.0.5.post4",
50
+ "sgl-kernel==0.0.8",
50
51
  "flashinfer_python==0.2.3",
51
52
  "torch==2.5.1",
52
53
  "cuda-python",
@@ -44,6 +44,12 @@ ASSISTANT_SUFFIX = "Assistant:"
44
44
  global args
45
45
 
46
46
 
47
+ # don't want to import sglang package here
48
+ def _get_bool_env_var(name: str, default: str = "false") -> bool:
49
+ value = os.getenv(name, default)
50
+ return value.lower() in ("true", "1")
51
+
52
+
47
53
  @dataclass
48
54
  class RequestFuncInput:
49
55
  prompt: str
@@ -969,6 +975,7 @@ async def benchmark(
969
975
  extra_request_body: Dict[str, Any],
970
976
  profile: bool,
971
977
  pd_seperated: bool = False,
978
+ flush_cache: bool = False,
972
979
  ):
973
980
  if backend in ASYNC_REQUEST_FUNCS:
974
981
  request_func = ASYNC_REQUEST_FUNCS[backend]
@@ -986,13 +993,16 @@ async def benchmark(
986
993
  return await request_func(request_func_input=request_func_input, pbar=pbar)
987
994
 
988
995
  # Warmup
989
- print("Starting initial single prompt test run...")
996
+ print(f"Starting warmup with {args.warmup_requests} sequences...")
997
+
998
+ # Use the first request for all warmup iterations
990
999
  test_prompt, test_prompt_len, test_output_len = input_requests[0]
991
1000
  if lora_names != None and len(lora_names) != 0:
992
1001
  lora_name = lora_names[0]
993
1002
  else:
994
1003
  lora_name = None
995
1004
 
1005
+ # Create the test input once
996
1006
  test_input = RequestFuncInput(
997
1007
  model=model_id,
998
1008
  prompt=test_prompt,
@@ -1002,17 +1012,29 @@ async def benchmark(
1002
1012
  lora_name=lora_name,
1003
1013
  extra_request_body=extra_request_body,
1004
1014
  )
1005
- test_output = await request_func(request_func_input=test_input)
1006
- if not test_output.success:
1015
+
1016
+ # Run warmup requests
1017
+ warmup_tasks = []
1018
+ for _ in range(args.warmup_requests):
1019
+ warmup_tasks.append(
1020
+ asyncio.create_task(request_func(request_func_input=test_input))
1021
+ )
1022
+
1023
+ warmup_outputs = await asyncio.gather(*warmup_tasks)
1024
+
1025
+ # Check if at least one warmup request succeeded
1026
+ if not any(output.success for output in warmup_outputs):
1007
1027
  raise ValueError(
1008
- "Initial test run failed - Please make sure benchmark arguments "
1009
- f"are correctly specified. Error: {test_output.error}"
1028
+ "Warmup failed - Please make sure benchmark arguments "
1029
+ f"are correctly specified. Error: {warmup_outputs[0].error}"
1010
1030
  )
1011
1031
  else:
1012
- print("Initial test run completed. Starting main benchmark run...")
1032
+ print(
1033
+ f"Warmup completed with {args.warmup_requests} sequences. Starting main benchmark run..."
1034
+ )
1013
1035
 
1014
1036
  # Flush cache
1015
- if "sglang" in backend:
1037
+ if ("sglang" in backend and _get_bool_env_var("SGLANG_IS_IN_CI")) or flush_cache:
1016
1038
  requests.post(base_url + "/flush_cache", headers=get_auth_headers())
1017
1039
 
1018
1040
  time.sleep(1.0)
@@ -1246,6 +1268,10 @@ def run_benchmark(args_: argparse.Namespace):
1246
1268
  if not hasattr(args, "max_concurrency"):
1247
1269
  args.max_concurrency = None
1248
1270
 
1271
+ # Set default value for warmup_requests if not present
1272
+ if not hasattr(args, "warmup_requests"):
1273
+ args.warmup_requests = 1
1274
+
1249
1275
  print(f"benchmark_args={args}")
1250
1276
 
1251
1277
  # Set global environments
@@ -1347,6 +1373,10 @@ def run_benchmark(args_: argparse.Namespace):
1347
1373
  tokenizer = get_tokenizer(tokenizer_id)
1348
1374
  input_requests = get_dataset(args, tokenizer)
1349
1375
 
1376
+ # compatible with SimpleNamespace
1377
+ if not hasattr(args, "flush_cache"):
1378
+ args.flush_cache = False
1379
+
1350
1380
  return asyncio.run(
1351
1381
  benchmark(
1352
1382
  backend=backend,
@@ -1362,6 +1392,7 @@ def run_benchmark(args_: argparse.Namespace):
1362
1392
  extra_request_body=extra_request_body,
1363
1393
  profile=args.profile,
1364
1394
  pd_seperated=args.pd_seperated,
1395
+ flush_cache=args.flush_cache,
1365
1396
  )
1366
1397
  )
1367
1398
 
@@ -1543,6 +1574,17 @@ if __name__ == "__main__":
1543
1574
  action="store_true",
1544
1575
  help="Benchmark PD disaggregation server",
1545
1576
  )
1577
+ parser.add_argument(
1578
+ "--flush-cache",
1579
+ action="store_true",
1580
+ help="Flush the cache before running the benchmark",
1581
+ )
1582
+ parser.add_argument(
1583
+ "--warmup-requests",
1584
+ type=int,
1585
+ default=1,
1586
+ help="Number of warmup requests to run before the benchmark",
1587
+ )
1546
1588
 
1547
1589
  group = parser.add_argument_group("generated-shared-prefix dataset arguments")
1548
1590
  group.add_argument(
@@ -294,6 +294,30 @@ register_chat_template(
294
294
  )
295
295
  )
296
296
 
297
+ # Reference: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct/blob/main/chat_template.json
298
+ register_chat_template(
299
+ ChatTemplate(
300
+ name="llama-4",
301
+ default_system_prompt=None,
302
+ role_prefix_and_suffix={
303
+ "system": (
304
+ "<|header_start|>system<|header_end|>\n\n",
305
+ "<|eot|>",
306
+ ),
307
+ "user": (
308
+ "<|header_start|>user<|header_end|>\n\n",
309
+ "<|eot|>",
310
+ ),
311
+ "assistant": (
312
+ "<|header_start|>assistant<|header_end|>\n\n",
313
+ "<|eot|>",
314
+ ),
315
+ },
316
+ stop_str=("<|eot|>",),
317
+ image_token="<|image|>",
318
+ )
319
+ )
320
+
297
321
  # Reference: https://modelscope.cn/models/01ai/Yi-1.5-34B-Chat/file/view/master?fileName=tokenizer_config.json&status=1
298
322
  register_chat_template(
299
323
  ChatTemplate(
@@ -0,0 +1,117 @@
1
+ # Adapted from https://github.com/vllm-project/vllm/blob/v0.6.4.post1/vllm/_custom_ops.py
2
+ import logging
3
+ import os
4
+ from typing import List, Tuple
5
+
6
+ import torch
7
+ import torch.library
8
+
9
+ from sglang.srt.utils import get_bool_env_var, is_hip, is_hpu
10
+
11
+ logger = logging.getLogger(__name__)
12
+ use_vllm_custom_allreduce = get_bool_env_var(
13
+ "USE_VLLM_CUSTOM_ALLREDUCE", default="false"
14
+ )
15
+
16
+ if not is_hpu():
17
+ # ROCm does not use vllm custom allreduce
18
+ if use_vllm_custom_allreduce and not is_hip():
19
+ try:
20
+ import vllm._C
21
+ except ImportError as e:
22
+ logger.warning("Failed to import from vllm._C with %r", e)
23
+ else:
24
+ try:
25
+ import sgl_kernel
26
+ except ImportError as e:
27
+ logger.warning("Failed to import from custom_ar with %r", e)
28
+
29
+
30
+ if not is_hip():
31
+ if use_vllm_custom_allreduce:
32
+ custom_op = torch.ops._C_custom_ar
33
+ else:
34
+ custom_op = sgl_kernel.allreduce
35
+
36
+ # custom allreduce
37
+ def init_custom_ar(
38
+ ipc_tensors: List[torch.Tensor],
39
+ rank_data: torch.Tensor,
40
+ rank: int,
41
+ full_nvlink: bool,
42
+ ) -> int:
43
+ return custom_op.init_custom_ar(ipc_tensors, rank_data, rank, full_nvlink)
44
+
45
+ def all_reduce(
46
+ fa: int,
47
+ inp: torch.Tensor,
48
+ out: torch.Tensor,
49
+ reg_buffer: int,
50
+ reg_buffer_sz_bytes: int,
51
+ ) -> None:
52
+ custom_op.all_reduce(fa, inp, out, reg_buffer, reg_buffer_sz_bytes)
53
+
54
+ def dispose(fa: int) -> None:
55
+ custom_op.dispose(fa)
56
+
57
+ def meta_size() -> int:
58
+ return custom_op.meta_size()
59
+
60
+ def register_buffer(fa: int, ipc_tensors: List[int]) -> None:
61
+ return custom_op.register_buffer(fa, ipc_tensors)
62
+
63
+ def get_graph_buffer_ipc_meta(fa: int) -> Tuple[List[int], List[int]]:
64
+ return custom_op.get_graph_buffer_ipc_meta(fa)
65
+
66
+ def register_graph_buffers(
67
+ fa: int, handles: List[List[int]], offsets: List[List[int]]
68
+ ) -> None:
69
+ custom_op.register_graph_buffers(fa, handles, offsets)
70
+
71
+ else:
72
+ # ROCM custom allreduce
73
+
74
+ def init_custom_ar(
75
+ meta: torch.Tensor,
76
+ rank_data: torch.Tensor,
77
+ handles: List[str],
78
+ offsets: List[int],
79
+ rank: int,
80
+ full_nvlink: bool,
81
+ ) -> int:
82
+ return sgl_kernel.allreduce.init_custom_ar(
83
+ meta, rank_data, handles, offsets, rank, full_nvlink
84
+ )
85
+
86
+ def all_reduce_reg(fa: int, inp: torch.Tensor, out: torch.Tensor) -> None:
87
+ sgl_kernel.allreduce.all_reduce_reg(fa, inp, out)
88
+
89
+ def all_reduce_unreg(
90
+ fa: int, inp: torch.Tensor, reg_buffer: torch.Tensor, out: torch.Tensor
91
+ ) -> None:
92
+ sgl_kernel.allreduce.all_reduce_unreg(fa, inp, reg_buffer, out)
93
+
94
+ def dispose(fa: int) -> None:
95
+ sgl_kernel.allreduce.dispose(fa)
96
+
97
+ def meta_size() -> int:
98
+ return sgl_kernel.allreduce.meta_size()
99
+
100
+ def register_buffer(
101
+ fa: int, t: torch.Tensor, handles: List[str], offsets: List[int]
102
+ ) -> None:
103
+ return sgl_kernel.allreduce.register_buffer(fa, t, handles, offsets)
104
+
105
+ def get_graph_buffer_ipc_meta(fa: int) -> Tuple[torch.Tensor, List[int]]:
106
+ return sgl_kernel.allreduce.get_graph_buffer_ipc_meta(fa)
107
+
108
+ def register_graph_buffers(
109
+ fa: int, handles: List[str], offsets: List[List[int]]
110
+ ) -> None:
111
+ sgl_kernel.allreduce.register_graph_buffers(fa, handles, offsets)
112
+
113
+ def allocate_meta_buffer(size: int) -> torch.Tensor:
114
+ return sgl_kernel.allreduce.allocate_meta_buffer(size)
115
+
116
+ def get_meta_buffer_ipc_handle(inp: torch.Tensor) -> torch.Tensor:
117
+ return sgl_kernel.allreduce.get_meta_buffer_ipc_handle(inp)
@@ -65,6 +65,9 @@ class ModelConfig:
65
65
  **kwargs,
66
66
  )
67
67
  self.hf_text_config = get_hf_text_config(self.hf_config)
68
+ self.attention_chunk_size = getattr(
69
+ self.hf_text_config, "attention_chunk_size", None
70
+ )
68
71
 
69
72
  # Check model type
70
73
  self.is_generation = is_generation_model(
@@ -258,6 +261,7 @@ class ModelConfig:
258
261
  "experts_int8",
259
262
  "w8a8_int8",
260
263
  "w8a8_fp8",
264
+ "moe_wna16",
261
265
  ]
262
266
  compatible_quantization_methods = {
263
267
  "w8a8_int8": ["compressed-tensors", "compressed_tensors"],
@@ -466,6 +470,7 @@ multimodal_model_archs = [
466
470
  "Gemma3ForConditionalGeneration",
467
471
  "Grok1VForCausalLM",
468
472
  "Grok1AForCausalLM",
473
+ # TODO: add multimodal support for "Llama4ForConditionalGeneration",
469
474
  "LlavaLlamaForCausalLM",
470
475
  "LlavaMistralForCausalLM",
471
476
  "LlavaQwenForCausalLM",
@@ -169,7 +169,9 @@ class BaseGrammarBackend(ABC):
169
169
  self.cache.clear()
170
170
 
171
171
 
172
- def create_grammar_backend(server_args: ServerArgs, tokenizer, vocab_size):
172
+ def create_grammar_backend(
173
+ server_args: ServerArgs, tokenizer, vocab_size: int
174
+ ) -> Optional[BaseGrammarBackend]:
173
175
  if server_args.grammar_backend == "outlines":
174
176
  from sglang.srt.constrained.outlines_backend import OutlinesGrammarBackend
175
177
 
@@ -188,6 +190,8 @@ def create_grammar_backend(server_args: ServerArgs, tokenizer, vocab_size):
188
190
  tokenizer=tokenizer,
189
191
  whitespace_pattern=server_args.constrained_json_whitespace_pattern,
190
192
  )
193
+ elif server_args.grammar_backend == "none":
194
+ return None
191
195
  else:
192
196
  raise ValueError(f"Invalid grammar backend: {server_args.grammar_backend}")
193
197
 
@@ -33,6 +33,7 @@ class SeparatorStyle(IntEnum):
33
33
  ADD_NEW_LINE_SINGLE = auto()
34
34
  LLAMA2 = auto()
35
35
  LLAMA3 = auto()
36
+ LLAMA4 = auto()
36
37
  CHATGLM = auto()
37
38
  CHATML = auto()
38
39
  CHATINTERN = auto()
@@ -156,19 +157,30 @@ class Conversation:
156
157
  else:
157
158
  ret += role + ":"
158
159
  return ret
160
+ elif self.sep_style == SeparatorStyle.LLAMA4:
161
+ # begin_of_text is added by default
162
+ if self.system_message:
163
+ ret = system_prompt
164
+ else:
165
+ ret = ""
166
+ for i, (role, message) in enumerate(self.messages):
167
+ if message:
168
+ ret += f"<|header_start|>{role}<|header_end|>\n\n"
169
+ ret += f"{message.strip()}<|eot|>"
170
+ else:
171
+ ret += f"<|header_start|>{role}<|header_end|>\n\n"
172
+ return ret
159
173
  elif self.sep_style == SeparatorStyle.LLAMA3:
160
- ret = "<|begin_of_text|>"
161
174
  if self.system_message:
162
- ret += system_prompt
175
+ ret = system_prompt
163
176
  else:
164
- ret += ""
177
+ ret = ""
165
178
  for i, (role, message) in enumerate(self.messages):
166
179
  if message:
167
180
  ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
168
181
  ret += f"{message.strip()}<|eot_id|>"
169
182
  else:
170
183
  ret += f"<|start_header_id|>{role}<|end_header_id|>\n\n"
171
- # print(ret)
172
184
  return ret
173
185
  elif self.sep_style == SeparatorStyle.LLAMA2:
174
186
  seps = [self.sep, self.sep2]
@@ -561,6 +573,19 @@ register_conv_template(
561
573
  )
562
574
  )
563
575
 
576
+ # reference: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct/blob/main/chat_template.json
577
+ register_conv_template(
578
+ Conversation(
579
+ name="llama-4",
580
+ system_template="<|header_start|>system<|header_end|>\n\n{system_message}<|eot|>",
581
+ roles=("user", "assistant"),
582
+ sep_style=SeparatorStyle.LLAMA4,
583
+ sep="",
584
+ stop_str=["<|end_of_text|>", "<|eot|>", "<|eom|>"],
585
+ image_token="<|image|>",
586
+ )
587
+ )
588
+
564
589
  register_conv_template(
565
590
  Conversation(
566
591
  name="chatml",
@@ -50,6 +50,7 @@ if _is_cuda:
50
50
  def scaled_fp8_quant(
51
51
  input: torch.Tensor,
52
52
  scale: Optional[torch.Tensor] = None,
53
+ num_token_padding: Optional[int] = None,
53
54
  use_per_token_if_dynamic: bool = False,
54
55
  ) -> tuple[torch.Tensor, torch.Tensor]:
55
56
  """
@@ -59,6 +60,8 @@ if _is_cuda:
59
60
  input (torch.Tensor): Input tensor to be quantized
60
61
  scale (Optional[torch.Tensor]): Pre-computed scaling factor for static quantization.
61
62
  If None, scales will be computed dynamically.
63
+ num_token_padding (Optional[int]): If specified, pad the first dimension
64
+ of the output to at least this value.
62
65
  use_per_token_if_dynamic (bool): When using dynamic scaling (scale=None),
63
66
  determines the quantization granularity:
64
67
  - True: compute scale per token
@@ -75,6 +78,8 @@ if _is_cuda:
75
78
  assert input.ndim == 2, f"Expected 2D input tensor, got {input.ndim}D"
76
79
  shape = input.shape
77
80
  out_dtype = torch.float8_e4m3fnuz if _is_hip else torch.float8_e4m3fn
81
+ if num_token_padding:
82
+ shape = (max(num_token_padding, input.shape[0]), shape[1])
78
83
  output = torch.empty(shape, device=input.device, dtype=out_dtype)
79
84
 
80
85
  if scale is None:
@@ -18,7 +18,7 @@ from sglang.srt.distributed.device_communicators.custom_all_reduce_utils import
18
18
  gpu_p2p_access_check,
19
19
  )
20
20
  from sglang.srt.distributed.parallel_state import in_the_same_node_as
21
- from sglang.srt.utils import cuda_device_count_stateless, is_cuda, is_hip
21
+ from sglang.srt.utils import is_cuda, is_hip
22
22
 
23
23
  logger = logging.getLogger(__name__)
24
24
 
@@ -217,7 +217,7 @@ class CustomAllreduce:
217
217
  if cuda_visible_devices:
218
218
  device_ids = list(map(int, cuda_visible_devices.split(",")))
219
219
  else:
220
- device_ids = list(range(cuda_device_count_stateless()))
220
+ device_ids = list(range(torch.cuda.device_count()))
221
221
 
222
222
  physical_device_id = device_ids[device.index]
223
223
  tensor = torch.tensor([physical_device_id], dtype=torch.int, device="cpu")
@@ -257,7 +257,7 @@ class CustomAllreduce:
257
257
  self.world_size = world_size
258
258
  self.full_nvlink = full_nvlink
259
259
 
260
- if ops.use_vllm_custom_allreduce and not _is_hip:
260
+ if not _is_hip:
261
261
  # Buffers memory are owned by this Python class and passed to C++.
262
262
  # Meta data composes of two parts: meta data for synchronization and a
263
263
  # temporary buffer for storing intermediate allreduce results.
@@ -280,56 +280,24 @@ class CustomAllreduce:
280
280
  )
281
281
  ops.register_buffer(self._ptr, self.buffer_ptrs)
282
282
  else:
283
- if _is_hip:
284
- # meta data buffers need to be "uncached" for signal on MI200
285
- self.meta = ops.allocate_meta_buffer(ops.meta_size() + max_size)
286
- self.buffer = torch.empty(
287
- max_size, dtype=torch.uint8, device=self.device
288
- )
289
- handle = ops.get_meta_buffer_ipc_handle(self.meta)
290
- shard_data = (
291
- bytes(handle), # ipc handle to base ptr
292
- 0, # offset of base ptr
293
- )
294
- handles, offsets = self._gather_ipc_meta(shard_data)
295
- self.rank_data = torch.empty(
296
- 8 * 1024 * 1024, dtype=torch.uint8, device=self.device
297
- )
298
- self._ptr = ops.init_custom_ar(
299
- self.meta, self.rank_data, handles, offsets, rank, self.full_nvlink
300
- )
301
- self.register_buffer(self.buffer)
302
- self.MSCCL = os.getenv("RCCL_MSCCL_ENABLE", "1") == "1"
303
- else:
304
- # From TensorRT-LLM getMaxRequiredWorkspaceSize
305
- self.max_required_workspace_size = [16 * 1024 * 1024, 8 * 1024 * 1024]
306
-
307
- # sizeof(uint32_t) * (MAX_ALL_REDUCE_BLOCKS + 2) * MAX_RANKS_PER_NODE;
308
- self.barrier_max_size = 8 * (36 + 2) * 8
309
-
310
- self.buffer_ptrs = self.create_shared_buffer(max_size, group=group)
311
- self.tmp_result_buffer_ptrs = self.create_shared_buffer(
312
- max_size, group=group
313
- )
314
- self.rank_data_base = torch.empty(
315
- 8 * 1024 * 1024, dtype=torch.uint8, device=self.device
316
- )
317
- self.barrier_in_ptrs = self.create_shared_buffer(
318
- self.barrier_max_size, group=group
319
- )
320
- self.barrier_out_ptrs = self.create_shared_buffer(
321
- self.barrier_max_size, group=group
322
- )
283
+ # meta data buffers need to be "uncached" for signal on MI200
284
+ self.meta = ops.allocate_meta_buffer(ops.meta_size() + max_size)
285
+ self.buffer = torch.empty(max_size, dtype=torch.uint8, device=self.device)
286
+ handle = ops.get_meta_buffer_ipc_handle(self.meta)
287
+ shard_data = (
288
+ bytes(handle), # ipc handle to base ptr
289
+ 0, # offset of base ptr
290
+ )
291
+ handles, offsets = self._gather_ipc_meta(shard_data)
292
+ self.rank_data = torch.empty(
293
+ 8 * 1024 * 1024, dtype=torch.uint8, device=self.device
294
+ )
295
+ self._ptr = ops.init_custom_ar(
296
+ self.meta, self.rank_data, handles, offsets, rank, self.full_nvlink
297
+ )
298
+ self.register_buffer(self.buffer)
299
+ self.MSCCL = os.getenv("RCCL_MSCCL_ENABLE", "1") == "1"
323
300
 
324
- self._ptr = ops.init_custom_ar(
325
- rank,
326
- world_size,
327
- self.rank_data_base,
328
- self.buffer_ptrs,
329
- self.tmp_result_buffer_ptrs,
330
- self.barrier_in_ptrs,
331
- self.barrier_out_ptrs,
332
- )
333
301
  self.disabled = False
334
302
 
335
303
  @staticmethod
@@ -455,7 +423,7 @@ class CustomAllreduce:
455
423
  return False
456
424
  # for 4 or more non NVLink-capable GPUs, custom allreduce provides
457
425
  # little performance improvement over NCCL.
458
- if ops.use_vllm_custom_allreduce and not _is_hip:
426
+ if not _is_hip:
459
427
  if self.world_size == 2 or self.full_nvlink:
460
428
  return inp_size < self.max_size
461
429
  return False
@@ -471,18 +439,6 @@ class CustomAllreduce:
471
439
  return inp_size < self.max_size
472
440
  return False
473
441
 
474
- if self.world_size == 2:
475
- return (
476
- inp_size < self.max_size
477
- and inp_size < self.max_required_workspace_size[0]
478
- )
479
-
480
- if self.full_nvlink:
481
- return (
482
- inp_size < self.max_size
483
- and inp_size < self.max_required_workspace_size[1]
484
- )
485
-
486
442
  return False
487
443
 
488
444
  # all reduce, assuming inp tensor is IPC registered with register_buffer,
@@ -515,15 +471,12 @@ class CustomAllreduce:
515
471
  """
516
472
  if out is None:
517
473
  out = torch.empty_like(inp)
518
- if ops.use_vllm_custom_allreduce:
519
- if registered:
520
- ops.all_reduce(self._ptr, inp, out, 0, 0)
521
- else:
522
- ops.all_reduce(
523
- self._ptr, inp, out, self.buffer_ptrs[self.rank], self.max_size
524
- )
474
+ if registered:
475
+ ops.all_reduce(self._ptr, inp, out, 0, 0)
525
476
  else:
526
- ops.all_reduce(self._ptr, inp, out)
477
+ ops.all_reduce(
478
+ self._ptr, inp, out, self.buffer_ptrs[self.rank], self.max_size
479
+ )
527
480
  return out
528
481
 
529
482
  def custom_all_reduce(self, input: torch.Tensor) -> Optional[torch.Tensor]:
@@ -554,14 +507,9 @@ class CustomAllreduce:
554
507
  def close(self):
555
508
  if not self.disabled and self._ptr:
556
509
  ops.dispose(self._ptr)
557
- if ops.use_vllm_custom_allreduce:
510
+ if _is_cuda:
558
511
  self.free_shared_buffer(self.meta_ptrs)
559
512
  self.free_shared_buffer(self.buffer_ptrs)
560
- elif _is_cuda:
561
- self.free_shared_buffer(self.buffer_ptrs)
562
- self.free_shared_buffer(self.tmp_result_buffer_ptrs)
563
- self.free_shared_buffer(self.barrier_in_ptrs)
564
- self.free_shared_buffer(self.barrier_out_ptrs)
565
513
  self._ptr = 0
566
514
 
567
515
  def __del__(self):
@@ -11,11 +11,11 @@ import tempfile
11
11
  from itertools import product
12
12
  from typing import Dict, List, Optional, Sequence
13
13
 
14
+ import torch
14
15
  import torch.distributed as dist
15
16
  import torch.multiprocessing as mp
16
17
 
17
18
  from sglang.srt.distributed.device_communicators.cuda_wrapper import CudaRTLibrary
18
- from sglang.srt.utils import cuda_device_count_stateless
19
19
 
20
20
  logger = logging.getLogger(__name__)
21
21
 
@@ -218,7 +218,7 @@ def gpu_p2p_access_check(src: int, tgt: int) -> bool:
218
218
 
219
219
  is_distributed = dist.is_initialized()
220
220
 
221
- num_dev = cuda_device_count_stateless()
221
+ num_dev = torch.cuda.device_count()
222
222
  cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
223
223
  if cuda_visible_devices is None:
224
224
  cuda_visible_devices = ",".join(str(i) for i in range(num_dev))
@@ -151,10 +151,6 @@ class Engine:
151
151
  The arguments of this function is the same as `sglang/srt/managers/io_struct.py::GenerateReqInput`.
152
152
  Please refer to `GenerateReqInput` for the documentation.
153
153
  """
154
- modalities_list = []
155
- if image_data is not None:
156
- modalities_list.append("image")
157
-
158
154
  obj = GenerateReqInput(
159
155
  text=prompt,
160
156
  input_ids=input_ids,
@@ -165,7 +161,6 @@ class Engine:
165
161
  top_logprobs_num=top_logprobs_num,
166
162
  token_ids_logprob=token_ids_logprob,
167
163
  lora_path=lora_path,
168
- modalities=modalities_list,
169
164
  custom_logit_processor=custom_logit_processor,
170
165
  return_hidden_states=return_hidden_states,
171
166
  stream=stream,