sglang 0.4.9.post5__tar.gz → 0.4.9.post6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (788) hide show
  1. {sglang-0.4.9.post5/sglang.egg-info → sglang-0.4.9.post6}/PKG-INFO +2 -1
  2. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/pyproject.toml +2 -1
  3. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/model_config.py +3 -0
  4. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/http_server.py +13 -1
  5. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/protocol.py +3 -1
  6. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/serving_base.py +5 -2
  7. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/ep_moe/layer.py +152 -37
  8. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/ep_moe/token_dispatcher.py +83 -118
  9. sglang-0.4.9.post6/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  10. sglang-0.4.9.post6/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  11. sglang-0.4.9.post6/sglang/srt/layers/moe/token_dispatcher/base_dispatcher.py +48 -0
  12. sglang-0.4.9.post6/sglang/srt/layers/moe/token_dispatcher/standard.py +19 -0
  13. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/topk.py +6 -2
  14. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/modelopt_quant.py +2 -0
  15. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/data_parallel_controller.py +4 -0
  16. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/io_struct.py +12 -0
  17. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/scheduler.py +29 -0
  18. sglang-0.4.9.post6/sglang/srt/managers/scheduler_input_blocker.py +106 -0
  19. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/tokenizer_manager.py +43 -9
  20. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/tp_worker.py +5 -0
  21. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_executor/model_runner.py +15 -13
  22. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/deepseek_v2.py +13 -56
  23. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen3_moe.py +12 -69
  24. sglang-0.4.9.post6/sglang/srt/poll_based_barrier.py +31 -0
  25. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/server_args.py +8 -0
  26. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/two_batch_overlap.py +8 -3
  27. sglang-0.4.9.post6/sglang/test/attention/__init__.py +0 -0
  28. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_utils.py +53 -0
  29. sglang-0.4.9.post6/sglang/version.py +1 -0
  30. {sglang-0.4.9.post5 → sglang-0.4.9.post6/sglang.egg-info}/PKG-INFO +2 -1
  31. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang.egg-info/SOURCES.txt +7 -0
  32. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang.egg-info/requires.txt +1 -0
  33. sglang-0.4.9.post5/sglang/version.py +0 -1
  34. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/LICENSE +0 -0
  35. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/README.md +0 -0
  36. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/setup.cfg +0 -0
  37. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/__init__.py +0 -0
  38. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/api.py +0 -0
  39. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/bench_offline_throughput.py +0 -0
  40. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/bench_one_batch.py +0 -0
  41. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/bench_one_batch_server.py +0 -0
  42. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/bench_serving.py +0 -0
  43. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/check_env.py +0 -0
  44. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/compile_deep_gemm.py +0 -0
  45. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/eval/llama3_eval.py +0 -0
  46. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/eval/loogle_eval.py +0 -0
  47. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/global_config.py +0 -0
  48. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/__init__.py +0 -0
  49. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/anthropic.py +0 -0
  50. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/base_backend.py +0 -0
  51. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/litellm.py +0 -0
  52. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/openai.py +0 -0
  53. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/runtime_endpoint.py +0 -0
  54. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/backend/vertexai.py +0 -0
  55. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/chat_template.py +0 -0
  56. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/choices.py +0 -0
  57. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/compiler.py +0 -0
  58. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/interpreter.py +0 -0
  59. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/ir.py +0 -0
  60. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/lang/tracer.py +0 -0
  61. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/launch_server.py +0 -0
  62. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/profiler.py +0 -0
  63. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/_custom_ops.py +0 -0
  64. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/aio_rwlock.py +0 -0
  65. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/code_completion_parser.py +0 -0
  66. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/__init__.py +0 -0
  67. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/chatglm.py +0 -0
  68. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/dbrx.py +0 -0
  69. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/deepseekvl2.py +0 -0
  70. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/device_config.py +0 -0
  71. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/exaone.py +0 -0
  72. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/internvl.py +0 -0
  73. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/janus_pro.py +0 -0
  74. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/kimi_vl.py +0 -0
  75. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/kimi_vl_moonvit.py +0 -0
  76. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/load_config.py +0 -0
  77. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/update_config.py +0 -0
  78. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/configs/utils.py +0 -0
  79. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/__init__.py +0 -0
  80. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/base_connector.py +0 -0
  81. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/redis.py +0 -0
  82. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/s3.py +0 -0
  83. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/serde/__init__.py +0 -0
  84. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/serde/safe_serde.py +0 -0
  85. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/serde/serde.py +0 -0
  86. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/connector/utils.py +0 -0
  87. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constants.py +0 -0
  88. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/base_grammar_backend.py +0 -0
  89. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/llguidance_backend.py +0 -0
  90. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/outlines_backend.py +0 -0
  91. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/outlines_jump_forward.py +0 -0
  92. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/reasoner_grammar_backend.py +0 -0
  93. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/triton_ops/bitmask_ops.py +0 -0
  94. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/constrained/xgrammar_backend.py +0 -0
  95. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/conversation.py +0 -0
  96. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/custom_op.py +0 -0
  97. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/debug_utils/__init__.py +0 -0
  98. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/debug_utils/dump_comparator.py +0 -0
  99. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/debug_utils/dumper.py +0 -0
  100. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/debug_utils/text_comparator.py +0 -0
  101. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/ascend/__init__.py +0 -0
  102. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/ascend/conn.py +0 -0
  103. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/ascend/transfer_engine.py +0 -0
  104. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/base/__init__.py +0 -0
  105. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/base/conn.py +0 -0
  106. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/common/__init__.py +0 -0
  107. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/common/conn.py +0 -0
  108. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/common/utils.py +0 -0
  109. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/decode.py +0 -0
  110. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/decode_schedule_batch_mixin.py +0 -0
  111. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/fake/__init__.py +0 -0
  112. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/fake/conn.py +0 -0
  113. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/kv_events.py +0 -0
  114. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/launch_lb.py +0 -0
  115. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/mini_lb.py +0 -0
  116. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/mooncake/__init__.py +0 -0
  117. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/mooncake/conn.py +0 -0
  118. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/mooncake/transfer_engine.py +0 -0
  119. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/nixl/__init__.py +0 -0
  120. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/nixl/conn.py +0 -0
  121. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/prefill.py +0 -0
  122. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/disaggregation/utils.py +0 -0
  123. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/__init__.py +0 -0
  124. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/communication_op.py +0 -0
  125. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/cuda_wrapper.py +0 -0
  126. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/custom_all_reduce.py +0 -0
  127. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +0 -0
  128. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/hpu_communicator.py +0 -0
  129. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/npu_communicator.py +0 -0
  130. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/pymscclpp.py +0 -0
  131. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/pynccl.py +0 -0
  132. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/pynccl_wrapper.py +0 -0
  133. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/quick_all_reduce.py +0 -0
  134. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/shm_broadcast.py +0 -0
  135. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/device_communicators/xpu_communicator.py +0 -0
  136. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/parallel_state.py +0 -0
  137. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/distributed/utils.py +0 -0
  138. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/EngineBase.py +0 -0
  139. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/engine.py +0 -0
  140. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/http_server_engine.py +0 -0
  141. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/__init__.py +0 -0
  142. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/serving_chat.py +0 -0
  143. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/serving_completions.py +0 -0
  144. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/serving_embedding.py +0 -0
  145. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/serving_rerank.py +0 -0
  146. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/serving_score.py +0 -0
  147. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/usage_processor.py +0 -0
  148. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/entrypoints/openai/utils.py +0 -0
  149. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/__init__.py +0 -0
  150. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/eplb_algorithms/__init__.py +0 -0
  151. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -0
  152. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/eplb_algorithms/deepseek_vec.py +0 -0
  153. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/eplb_manager.py +0 -0
  154. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/eplb_simulator/__init__.py +0 -0
  155. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/eplb_simulator/reader.py +0 -0
  156. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/expert_distribution.py +0 -0
  157. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/expert_location.py +0 -0
  158. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/expert_location_dispatch.py +0 -0
  159. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/eplb/expert_location_updater.py +0 -0
  160. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/base_format_detector.py +0 -0
  161. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/core_types.py +0 -0
  162. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/deepseekv3_detector.py +0 -0
  163. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/ebnf_composer.py +0 -0
  164. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/function_call_parser.py +0 -0
  165. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/glm4_moe_detector.py +0 -0
  166. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/kimik2_detector.py +0 -0
  167. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/llama32_detector.py +0 -0
  168. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/mistral_detector.py +0 -0
  169. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/pythonic_detector.py +0 -0
  170. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/qwen25_detector.py +0 -0
  171. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/qwen3_coder_detector.py +0 -0
  172. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/function_call/utils.py +0 -0
  173. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/hf_transformers_utils.py +0 -0
  174. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/jinja_template_utils.py +0 -0
  175. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/activation.py +0 -0
  176. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/amx_utils.py +0 -0
  177. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/aiter_backend.py +0 -0
  178. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/ascend_backend.py +0 -0
  179. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/base_attn_backend.py +0 -0
  180. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/cutlass_mla_backend.py +0 -0
  181. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/double_sparsity_backend.py +0 -0
  182. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/flashattention_backend.py +0 -0
  183. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/flashinfer_backend.py +0 -0
  184. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/flashinfer_mla_backend.py +0 -0
  185. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/flashmla_backend.py +0 -0
  186. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/hybrid_attn_backend.py +0 -0
  187. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/intel_amx_backend.py +0 -0
  188. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/merge_state.py +0 -0
  189. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/tbo_backend.py +0 -0
  190. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/torch_native_backend.py +0 -0
  191. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_backend.py +0 -0
  192. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_ops/decode_attention.py +0 -0
  193. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +0 -0
  194. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_ops/extend_attention.py +0 -0
  195. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_ops/merge_state.py +0 -0
  196. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_ops/prefill_attention.py +0 -0
  197. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/triton_ops/rocm_mla_decode_rope.py +0 -0
  198. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/utils.py +0 -0
  199. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/attention/vision.py +0 -0
  200. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/communicator.py +0 -0
  201. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/dp_attention.py +0 -0
  202. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/elementwise.py +0 -0
  203. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/flashinfer_comm_fusion.py +0 -0
  204. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/layernorm.py +0 -0
  205. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/linear.py +0 -0
  206. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/logits_processor.py +0 -0
  207. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/cutlass_moe.py +0 -0
  208. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/cutlass_moe_params.py +0 -0
  209. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/cutlass_w4a8_moe.py +0 -0
  210. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/ep_moe/__init__.py +0 -0
  211. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/ep_moe/kernels.py +0 -0
  212. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_native.py +0 -0
  213. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/__init__.py +0 -0
  214. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  215. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  216. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  217. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  218. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  219. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  220. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3072,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  221. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  222. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  223. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  224. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=1,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  225. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=144,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  226. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  227. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1024,device_name=NVIDIA_H200.json +0 -0
  228. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  229. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  230. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1344,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  231. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  232. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=14336,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  233. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  234. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  235. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  236. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2688,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  237. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=2688,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  238. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3072,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  239. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3072,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  240. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3200,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  241. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  242. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  243. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=6400,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  244. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a16.json +0 -0
  245. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  246. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=int8_w8a16.json +0 -0
  247. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=16,N=800,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  248. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=160,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  249. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=20,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  250. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=24,N=1024,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  251. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  252. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  253. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  254. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  255. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  256. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,block_shape=[128, 128].json +0 -0
  257. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  258. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  259. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  260. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  261. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  262. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  263. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  264. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  265. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  266. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_L20,dtype=int8_w8a8.json +0 -0
  267. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=256,N=64,device_name=NVIDIA_L40S,dtype=int8_w8a8.json +0 -0
  268. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  269. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  270. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  271. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  272. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  273. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  274. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=1280,device_name=NVIDIA_H200.json +0 -0
  275. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  276. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  277. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=2560,device_name=NVIDIA_H200.json +0 -0
  278. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  279. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  280. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  281. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=320,device_name=NVIDIA_H200.json +0 -0
  282. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  283. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  284. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  285. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
  286. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  287. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  288. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  289. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=64,N=640,device_name=NVIDIA_H200.json +0 -0
  290. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI300X.json +0 -0
  291. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Instinct_MI325X.json +0 -0
  292. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=AMD_Radeon_Graphics.json +0 -0
  293. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  294. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  295. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=14336,device_name=NVIDIA_H200.json +0 -0
  296. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI300X.json +0 -0
  297. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Instinct_MI325X.json +0 -0
  298. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=AMD_Radeon_Graphics.json +0 -0
  299. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  300. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  301. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  302. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  303. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=1792,device_name=NVIDIA_H200.json +0 -0
  304. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  305. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  306. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  307. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  308. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=2048,device_name=NVIDIA_H200.json +0 -0
  309. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI300X.json +0 -0
  310. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Instinct_MI325X.json +0 -0
  311. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=AMD_Radeon_Graphics.json +0 -0
  312. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-40GB.json +0 -0
  313. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  314. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_GeForce_RTX_4090,dtype=fp8_w8a8.json +0 -0
  315. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  316. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  317. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  318. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_H200.json +0 -0
  319. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=3584,device_name=NVIDIA_L40S.json +0 -0
  320. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
  321. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
  322. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
  323. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  324. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  325. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  326. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  327. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=4096,device_name=NVIDIA_H200.json +0 -0
  328. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI300X.json +0 -0
  329. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Instinct_MI325X.json +0 -0
  330. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=AMD_Radeon_Graphics.json +0 -0
  331. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_A100-SXM4-80GB.json +0 -0
  332. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  333. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  334. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  335. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=7168,device_name=NVIDIA_H200.json +0 -0
  336. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8.json +0 -0
  337. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8.json +0 -0
  338. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8.json +0 -0
  339. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8.json +0 -0
  340. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_1_0/E=8,N=8192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +0 -0
  341. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  342. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  343. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H20.json +0 -0
  344. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=192,device_name=NVIDIA_H200.json +0 -0
  345. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  346. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  347. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H20.json +0 -0
  348. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  349. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=384,device_name=NVIDIA_H200.json +0 -0
  350. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=512,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  351. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  352. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  353. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  354. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H20.json +0 -0
  355. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  356. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=768,device_name=NVIDIA_H200.json +0 -0
  357. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=128,N=96,device_name=NVIDIA_H20.json +0 -0
  358. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=160,N=320,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  359. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  360. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  361. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  362. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  363. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=257,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  364. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  365. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  366. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  367. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=264,N=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  368. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8.json +0 -0
  369. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  370. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  371. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=272,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  372. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=288,N=64,device_name=NVIDIA_A800-SXM4-80GB.json +0 -0
  373. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_2_0/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  374. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  375. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=160,N=320,device_name=NVIDIA_H20-3e.json +0 -0
  376. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=257,N=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  377. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  378. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  379. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=384,N=256,device_name=NVIDIA_H20-3e,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  380. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=385,N=128,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  381. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=385,N=128,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  382. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=8,N=7168,device_name=NVIDIA_H100_80GB_HBM3.json +0 -0
  383. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +0 -0
  384. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/layer.py +0 -0
  385. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +0 -0
  386. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/moe/router.py +0 -0
  387. {sglang-0.4.9.post5/sglang/srt/layers/quantization/compressed_tensors → sglang-0.4.9.post6/sglang/srt/layers/moe/token_dispatcher}/__init__.py +0 -0
  388. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/multimodal.py +0 -0
  389. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/parameter.py +0 -0
  390. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/pooler.py +0 -0
  391. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/__init__.py +0 -0
  392. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/awq.py +0 -0
  393. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/awq_triton.py +0 -0
  394. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/base_config.py +0 -0
  395. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/blockwise_int8.py +0 -0
  396. {sglang-0.4.9.post5/sglang/test → sglang-0.4.9.post6/sglang/srt/layers/quantization/compressed_tensors}/__init__.py +0 -0
  397. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +0 -0
  398. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +0 -0
  399. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +0 -0
  400. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_scheme.py +0 -0
  401. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +0 -0
  402. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +0 -0
  403. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/compressed_tensors/utils.py +0 -0
  404. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  405. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  406. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  407. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  408. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=1536,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  409. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  410. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  411. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  412. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  413. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  414. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  415. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  416. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  417. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  418. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  419. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  420. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  421. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  422. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  423. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2048,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  424. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  425. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  426. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  427. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  428. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  429. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=2304,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  430. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  431. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  432. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  433. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  434. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  435. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  436. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  437. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  438. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  439. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  440. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=24576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  441. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  442. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  443. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  444. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  445. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=256,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  446. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  447. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  448. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  449. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  450. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  451. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  452. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  453. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  454. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  455. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=3072,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  456. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  457. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  458. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  459. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  460. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  461. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  462. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  463. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  464. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  465. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  466. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=32768,K=512,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  467. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  468. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=36864,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  469. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  470. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  471. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  472. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  473. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  474. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  475. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  476. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  477. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  478. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  479. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  480. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  481. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  482. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  483. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  484. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  485. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  486. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  487. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  488. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  489. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  490. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  491. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  492. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  493. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  494. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  495. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  496. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  497. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  498. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  499. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  500. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  501. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  502. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  503. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  504. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  505. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1024,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  506. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  507. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  508. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  509. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  510. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  511. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=1152,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  512. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  513. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  514. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  515. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  516. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=128,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  517. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  518. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  519. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  520. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  521. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  522. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  523. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  524. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  525. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  526. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  527. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=16384,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  528. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A100-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  529. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_A800-SXM4-80GB,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  530. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  531. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  532. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  533. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  534. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  535. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=18432,device_name=NVIDIA_L20Y,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  536. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  537. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  538. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  539. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  540. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  541. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  542. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  543. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  544. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  545. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  546. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  547. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H100_80GB_HBM3,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  548. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  549. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  550. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  551. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI325X,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  552. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Radeon_Graphics,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  553. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_B200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  554. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H20,dtype=int8_w8a8,block_shape=[128, 128].json +0 -0
  555. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=NVIDIA_H200,dtype=fp8_w8a8,block_shape=[128, 128].json +0 -0
  556. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/deep_gemm_wrapper/__init__.py +0 -0
  557. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +0 -0
  558. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +0 -0
  559. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/deep_gemm_wrapper/entrypoint.py +0 -0
  560. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/fp8.py +0 -0
  561. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/fp8_kernel.py +0 -0
  562. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/fp8_utils.py +0 -0
  563. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/gptq.py +0 -0
  564. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/int8_kernel.py +0 -0
  565. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/int8_utils.py +0 -0
  566. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/kv_cache.py +0 -0
  567. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/marlin_utils.py +0 -0
  568. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/moe_wna16.py +0 -0
  569. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/petit.py +0 -0
  570. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/petit_utils.py +0 -0
  571. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/qoq.py +0 -0
  572. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/scalar_type.py +0 -0
  573. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/unquant.py +0 -0
  574. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/utils.py +0 -0
  575. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/w4afp8.py +0 -0
  576. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/w8a8_fp8.py +0 -0
  577. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/quantization/w8a8_int8.py +0 -0
  578. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/radix_attention.py +0 -0
  579. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/rotary_embedding.py +0 -0
  580. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/sampler.py +0 -0
  581. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/torchao_utils.py +0 -0
  582. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/utils.py +0 -0
  583. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/layers/vocab_parallel_embedding.py +0 -0
  584. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/backend/base_backend.py +0 -0
  585. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/backend/flashinfer_backend.py +0 -0
  586. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/backend/triton_backend.py +0 -0
  587. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/layers.py +0 -0
  588. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/lora.py +0 -0
  589. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/lora_config.py +0 -0
  590. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/lora_manager.py +0 -0
  591. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/lora_registry.py +0 -0
  592. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/mem_pool.py +0 -0
  593. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/triton_ops/__init__.py +0 -0
  594. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/triton_ops/gate_up_lora_b.py +0 -0
  595. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/triton_ops/qkv_lora_b.py +0 -0
  596. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/triton_ops/sgemm_lora_a.py +0 -0
  597. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/triton_ops/sgemm_lora_b.py +0 -0
  598. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/lora/utils.py +0 -0
  599. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/cache_controller.py +0 -0
  600. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/configure_logging.py +0 -0
  601. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/detokenizer_manager.py +0 -0
  602. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/mm_utils.py +0 -0
  603. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/multimodal_processor.py +0 -0
  604. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/schedule_batch.py +0 -0
  605. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/schedule_policy.py +0 -0
  606. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/scheduler_output_processor_mixin.py +0 -0
  607. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/session_controller.py +0 -0
  608. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/template_manager.py +0 -0
  609. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/tp_worker_overlap_thread.py +0 -0
  610. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/managers/utils.py +0 -0
  611. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/allocator.py +0 -0
  612. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/base_prefix_cache.py +0 -0
  613. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/chunk_cache.py +0 -0
  614. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/flush_cache.py +0 -0
  615. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/hicache_storage.py +0 -0
  616. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/hiradix_cache.py +0 -0
  617. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/memory_pool.py +0 -0
  618. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/memory_pool_host.py +0 -0
  619. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/multimodal_cache.py +0 -0
  620. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/radix_cache.py +0 -0
  621. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/mem_cache/swa_radix_cache.py +0 -0
  622. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/metrics/collector.py +0 -0
  623. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/metrics/func_timer.py +0 -0
  624. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_executor/cuda_graph_runner.py +0 -0
  625. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_executor/forward_batch_info.py +0 -0
  626. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_loader/__init__.py +0 -0
  627. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_loader/loader.py +0 -0
  628. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_loader/utils.py +0 -0
  629. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_loader/weight_utils.py +0 -0
  630. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/model_parallel.py +0 -0
  631. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/baichuan.py +0 -0
  632. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/bert.py +0 -0
  633. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/chatglm.py +0 -0
  634. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/clip.py +0 -0
  635. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/commandr.py +0 -0
  636. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/dbrx.py +0 -0
  637. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/deepseek.py +0 -0
  638. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/deepseek_janus_pro.py +0 -0
  639. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/deepseek_nextn.py +0 -0
  640. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/deepseek_vl2.py +0 -0
  641. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/exaone.py +0 -0
  642. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma.py +0 -0
  643. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma2.py +0 -0
  644. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma2_reward.py +0 -0
  645. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma3_causal.py +0 -0
  646. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma3_mm.py +0 -0
  647. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma3n_audio.py +0 -0
  648. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma3n_causal.py +0 -0
  649. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gemma3n_mm.py +0 -0
  650. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/glm4.py +0 -0
  651. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/glm4_moe.py +0 -0
  652. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/glm4_moe_nextn.py +0 -0
  653. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gpt2.py +0 -0
  654. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/gpt_bigcode.py +0 -0
  655. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/granite.py +0 -0
  656. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/granitemoe.py +0 -0
  657. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/grok.py +0 -0
  658. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/hunyuan.py +0 -0
  659. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/idefics2.py +0 -0
  660. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/internlm2.py +0 -0
  661. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/internlm2_reward.py +0 -0
  662. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/interns1.py +0 -0
  663. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/internvl.py +0 -0
  664. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/kimi_vl.py +0 -0
  665. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/kimi_vl_moonvit.py +0 -0
  666. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama.py +0 -0
  667. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama4.py +0 -0
  668. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama_classification.py +0 -0
  669. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama_eagle.py +0 -0
  670. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama_eagle3.py +0 -0
  671. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama_embedding.py +0 -0
  672. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llama_reward.py +0 -0
  673. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llava.py +0 -0
  674. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/llavavid.py +0 -0
  675. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mimo.py +0 -0
  676. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mimo_mtp.py +0 -0
  677. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/minicpm.py +0 -0
  678. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/minicpm3.py +0 -0
  679. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/minicpmo.py +0 -0
  680. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/minicpmv.py +0 -0
  681. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mistral.py +0 -0
  682. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mixtral.py +0 -0
  683. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mixtral_quant.py +0 -0
  684. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mllama.py +0 -0
  685. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/mllama4.py +0 -0
  686. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/olmo.py +0 -0
  687. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/olmo2.py +0 -0
  688. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/olmoe.py +0 -0
  689. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/persimmon.py +0 -0
  690. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/phi.py +0 -0
  691. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/phi3_small.py +0 -0
  692. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/phi4mm.py +0 -0
  693. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/phi4mm_audio.py +0 -0
  694. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/phi4mm_utils.py +0 -0
  695. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/phimoe.py +0 -0
  696. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/pixtral.py +0 -0
  697. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen.py +0 -0
  698. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2.py +0 -0
  699. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_5_vl.py +0 -0
  700. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_audio.py +0 -0
  701. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_classification.py +0 -0
  702. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_eagle.py +0 -0
  703. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_moe.py +0 -0
  704. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_rm.py +0 -0
  705. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen2_vl.py +0 -0
  706. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/qwen3.py +0 -0
  707. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/registry.py +0 -0
  708. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/roberta.py +0 -0
  709. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/siglip.py +0 -0
  710. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/stablelm.py +0 -0
  711. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/torch_native_llama.py +0 -0
  712. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/transformers.py +0 -0
  713. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/vila.py +0 -0
  714. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/xverse.py +0 -0
  715. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/xverse_moe.py +0 -0
  716. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/models/yivl.py +0 -0
  717. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/mm_utils.py +0 -0
  718. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/base_processor.py +0 -0
  719. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/clip.py +0 -0
  720. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -0
  721. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/gemma3.py +0 -0
  722. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/gemma3n.py +0 -0
  723. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/internvl.py +0 -0
  724. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/janus_pro.py +0 -0
  725. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/kimi_vl.py +0 -0
  726. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/llava.py +0 -0
  727. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/minicpm.py +0 -0
  728. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/mlama.py +0 -0
  729. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/mllama4.py +0 -0
  730. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/phi4mm.py +0 -0
  731. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/pixtral.py +0 -0
  732. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/qwen_audio.py +0 -0
  733. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/qwen_vl.py +0 -0
  734. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/multimodal/processors/vila.py +0 -0
  735. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/operations.py +0 -0
  736. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/operations_strategy.py +0 -0
  737. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/patch_torch.py +0 -0
  738. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/reasoning_parser.py +0 -0
  739. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/custom_logit_processor.py +0 -0
  740. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
  741. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/penaltylib/frequency_penalty.py +0 -0
  742. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/penaltylib/min_new_tokens.py +0 -0
  743. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
  744. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/penaltylib/presence_penalty.py +0 -0
  745. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/sampling_batch_info.py +0 -0
  746. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/sampling/sampling_params.py +0 -0
  747. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/speculative/build_eagle_tree.py +0 -0
  748. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +0 -0
  749. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +0 -0
  750. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/speculative/eagle_utils.py +0 -0
  751. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/speculative/eagle_worker.py +0 -0
  752. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/speculative/spec_info.py +0 -0
  753. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/torch_memory_saver_adapter.py +0 -0
  754. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/utils.py +0 -0
  755. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/warmup.py +0 -0
  756. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/srt/weight_sync/utils.py +0 -0
  757. {sglang-0.4.9.post5/sglang/test/attention → sglang-0.4.9.post6/sglang/test}/__init__.py +0 -0
  758. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/attention/test_flashattn_backend.py +0 -0
  759. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/attention/test_flashattn_mla_backend.py +0 -0
  760. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/attention/test_prefix_chunk_info.py +0 -0
  761. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/few_shot_gsm8k.py +0 -0
  762. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/few_shot_gsm8k_engine.py +0 -0
  763. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/run_eval.py +0 -0
  764. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/runners.py +0 -0
  765. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/send_one.py +0 -0
  766. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/simple_eval_common.py +0 -0
  767. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/simple_eval_gpqa.py +0 -0
  768. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/simple_eval_humaneval.py +0 -0
  769. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/simple_eval_math.py +0 -0
  770. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/simple_eval_mgsm.py +0 -0
  771. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/simple_eval_mmlu.py +0 -0
  772. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_activation.py +0 -0
  773. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_block_fp8.py +0 -0
  774. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -0
  775. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_block_fp8_ep.py +0 -0
  776. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_custom_ops.py +0 -0
  777. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_cutlass_moe.py +0 -0
  778. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_cutlass_w4a8_moe.py +0 -0
  779. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_deepep_utils.py +0 -0
  780. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_dynamic_grad_mode.py +0 -0
  781. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_fp4_moe.py +0 -0
  782. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_layernorm.py +0 -0
  783. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_marlin_moe.py +0 -0
  784. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_marlin_utils.py +0 -0
  785. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/test/test_programs.py +0 -0
  786. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang/utils.py +0 -0
  787. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang.egg-info/dependency_links.txt +0 -0
  788. {sglang-0.4.9.post5 → sglang-0.4.9.post6}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: sglang
3
- Version: 0.4.9.post5
3
+ Version: 0.4.9.post6
4
4
  Summary: SGLang is yet another fast serving framework for large language models and vision language models.
5
5
  License: Apache License
6
6
  Version 2.0, January 2004
@@ -269,6 +269,7 @@ Requires-Dist: torchvision==0.22.1; extra == "blackwell"
269
269
  Requires-Dist: cuda-python; extra == "blackwell"
270
270
  Requires-Dist: einops; extra == "blackwell"
271
271
  Requires-Dist: flashinfer_python==0.2.9rc2; extra == "blackwell"
272
+ Requires-Dist: tiktoken; extra == "blackwell"
272
273
  Provides-Extra: srt-hip
273
274
  Requires-Dist: sglang[runtime_common]; extra == "srt-hip"
274
275
  Requires-Dist: torch; extra == "srt-hip"
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "sglang"
7
- version = "0.4.9.post5"
7
+ version = "0.4.9.post6"
8
8
  description = "SGLang is yet another fast serving framework for large language models and vision language models."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -72,6 +72,7 @@ blackwell = [
72
72
  "cuda-python",
73
73
  "einops",
74
74
  "flashinfer_python==0.2.9rc2",
75
+ "tiktoken",
75
76
  ]
76
77
 
77
78
  # HIP (Heterogeneous-computing Interface for Portability) for AMD
@@ -261,6 +261,9 @@ class ModelConfig:
261
261
  self.num_key_value_heads = self.num_attention_heads
262
262
  self.hidden_size = self.hf_text_config.hidden_size
263
263
  self.num_hidden_layers = self.hf_text_config.num_hidden_layers
264
+ self.num_nextn_predict_layers = getattr(
265
+ self.hf_text_config, "num_nextn_predict_layers", None
266
+ )
264
267
  self.vocab_size = self.hf_text_config.vocab_size
265
268
 
266
269
  # Verify quantization
@@ -38,7 +38,7 @@ import orjson
38
38
  import requests
39
39
  import uvicorn
40
40
  import uvloop
41
- from fastapi import Depends, FastAPI, Request, UploadFile
41
+ from fastapi import Depends, FastAPI, HTTPException, Request, UploadFile
42
42
  from fastapi.exceptions import RequestValidationError
43
43
  from fastapi.middleware.cors import CORSMiddleware
44
44
  from fastapi.responses import ORJSONResponse, Response, StreamingResponse
@@ -174,6 +174,18 @@ app.add_middleware(
174
174
  )
175
175
 
176
176
 
177
+ @app.exception_handler(HTTPException)
178
+ async def validation_exception_handler(request: Request, exc: HTTPException):
179
+ """Enrich HTTP exception with status code and other details"""
180
+ error = ErrorResponse(
181
+ object="error",
182
+ message=exc.detail,
183
+ type=str(exc.status_code),
184
+ code=exc.status_code,
185
+ )
186
+ return ORJSONResponse(content=error.model_dump(), status_code=exc.status_code)
187
+
188
+
177
189
  # Custom exception handlers to change validation error status codes
178
190
  @app.exception_handler(RequestValidationError)
179
191
  async def validation_exception_handler(request: Request, exc: RequestValidationError):
@@ -317,7 +317,9 @@ class ToolCall(BaseModel):
317
317
 
318
318
  class ChatCompletionMessageGenericParam(BaseModel):
319
319
  role: Literal["system", "assistant", "tool"]
320
- content: Union[str, List[ChatCompletionMessageContentTextPart], None]
320
+ content: Union[str, List[ChatCompletionMessageContentTextPart], None] = Field(
321
+ default=None
322
+ )
321
323
  tool_call_id: Optional[str] = None
322
324
  name: Optional[str] = None
323
325
  reasoning_content: Optional[str] = None
@@ -4,7 +4,7 @@ import uuid
4
4
  from abc import ABC, abstractmethod
5
5
  from typing import Any, Optional, Union
6
6
 
7
- from fastapi import Request
7
+ from fastapi import HTTPException, Request
8
8
  from fastapi.responses import ORJSONResponse, StreamingResponse
9
9
 
10
10
  from sglang.srt.entrypoints.openai.protocol import ErrorResponse, OpenAIServingRequest
@@ -45,7 +45,10 @@ class OpenAIServingBase(ABC):
45
45
  return await self._handle_non_streaming_request(
46
46
  adapted_request, processed_request, raw_request
47
47
  )
48
-
48
+ except HTTPException as e:
49
+ return self.create_error_response(
50
+ message=e.detail, err_type=str(e.status_code), status_code=e.status_code
51
+ )
49
52
  except Exception as e:
50
53
  logger.exception(f"Error in request: {e}")
51
54
  return self.create_error_response(
@@ -1,5 +1,7 @@
1
+ from __future__ import annotations
2
+
1
3
  import logging
2
- from typing import List, Optional, Tuple
4
+ from typing import TYPE_CHECKING, List, Optional, Tuple
3
5
 
4
6
  import torch
5
7
 
@@ -50,6 +52,13 @@ from sglang.srt.utils import (
50
52
  next_power_of_2,
51
53
  )
52
54
 
55
+ if TYPE_CHECKING:
56
+ from sglang.srt.layers.moe.ep_moe.token_dispatcher import (
57
+ DeepEPLLOutput,
58
+ DeepEPNormalOutput,
59
+ DispatchOutput,
60
+ )
61
+
53
62
  _is_hip = is_hip()
54
63
  _is_npu = is_npu()
55
64
  _is_fp8_fnuz = is_fp8_fnuz()
@@ -791,11 +800,24 @@ class DeepEPMoE(EPMoE):
791
800
  routed_scaling_factor=routed_scaling_factor,
792
801
  )
793
802
  self.deepep_mode = deepep_mode
794
- if deep_gemm_wrapper.ENABLE_JIT_DEEPGEMM:
795
- assert self.use_fp8_w8a8, (
796
- "DeepGEMM requires an fp8_w8a8 model; "
797
- "alternatively, you can disable DeepGEMM by turning off the ENABLE_JIT_DEEPGEMM environment variable."
798
- )
803
+
804
+ # TODO: move to the beginning of the file
805
+ from sglang.srt.distributed.parallel_state import get_tp_group
806
+ from sglang.srt.managers.schedule_batch import global_server_args_dict
807
+ from sglang.srt.two_batch_overlap import MaybeTboDeepEPDispatcher
808
+
809
+ self.deepep_dispatcher = MaybeTboDeepEPDispatcher(
810
+ group=get_tp_group().device_group,
811
+ router_topk=self.top_k,
812
+ permute_fusion=True,
813
+ num_experts=self.num_experts,
814
+ num_local_experts=self.num_local_experts,
815
+ hidden_size=hidden_size,
816
+ params_dtype=params_dtype,
817
+ deepep_mode=deepep_mode,
818
+ async_finish=True, # TODO
819
+ return_recv_hook=True,
820
+ )
799
821
 
800
822
  if self.deepep_mode.enable_low_latency():
801
823
  assert (
@@ -837,37 +859,128 @@ class DeepEPMoE(EPMoE):
837
859
  hidden_states: torch.Tensor,
838
860
  topk_idx: torch.Tensor,
839
861
  topk_weights: torch.Tensor,
840
- reorder_topk_ids: torch.Tensor,
841
- seg_indptr: torch.Tensor,
842
- masked_m: torch.Tensor,
843
- expected_m: int,
844
- num_recv_tokens_per_expert: List[int],
845
862
  forward_batch: ForwardBatch,
846
863
  ):
864
+ dispatch_output = self.dispatch(
865
+ hidden_states, topk_idx, topk_weights, forward_batch
866
+ )
867
+ hidden_states = self.moe_impl(dispatch_output)
868
+ hidden_states = self.combine(
869
+ hidden_states,
870
+ dispatch_output.topk_idx,
871
+ dispatch_output.topk_weights,
872
+ forward_batch,
873
+ )
874
+ return hidden_states
875
+
876
+ def dispatch(
877
+ self,
878
+ hidden_states: torch.Tensor,
879
+ topk_idx: torch.Tensor,
880
+ topk_weights: torch.Tensor,
881
+ forward_batch: ForwardBatch,
882
+ ):
883
+ return self.deepep_dispatcher.dispatch(
884
+ hidden_states=hidden_states,
885
+ topk_idx=topk_idx,
886
+ topk_weights=topk_weights,
887
+ forward_batch=forward_batch,
888
+ )
889
+
890
+ def moe_impl(self, dispatch_output: DispatchOutput):
847
891
  if _use_aiter:
848
892
  # in forward_aiter, we skip token permutation and unpermutation, which have been fused inside aiter kernel
849
- return self.forward_aiter(hidden_states, topk_idx, topk_weights)
850
- resolved_deepep_mode = self.deepep_mode.resolve(
851
- forward_batch.is_extend_in_batch
852
- )
853
- if resolved_deepep_mode == DeepEPMode.normal:
854
- if deep_gemm_wrapper.ENABLE_JIT_DEEPGEMM:
855
- return self.forward_deepgemm_contiguous(
856
- hidden_states, topk_idx, topk_weights, num_recv_tokens_per_expert
857
- )
893
+ return self.forward_aiter(dispatch_output)
894
+ if dispatch_output.format.is_deepep_normal():
895
+ if deep_gemm_wrapper.ENABLE_JIT_DEEPGEMM and self.use_fp8_w8a8:
896
+ return self.forward_deepgemm_contiguous(dispatch_output)
858
897
  else:
859
- return self.forward_normal(hidden_states, reorder_topk_ids, seg_indptr)
860
- elif resolved_deepep_mode == DeepEPMode.low_latency:
861
- return self.forward_deepgemm_masked(hidden_states, masked_m, expected_m)
898
+ return self.forward_normal(dispatch_output)
899
+ elif dispatch_output.format.is_deepep_ll():
900
+ return self.forward_deepgemm_masked(dispatch_output)
862
901
  else:
863
902
  raise ValueError(f"Invalid deepep_mode: {self.deepep_mode}")
864
903
 
865
- def forward_normal(
904
+ def combine(
866
905
  self,
867
906
  hidden_states: torch.Tensor,
868
- reorder_topk_ids: torch.Tensor,
869
- seg_indptr: torch.Tensor,
907
+ topk_idx: torch.Tensor,
908
+ topk_weights: torch.Tensor,
909
+ forward_batch: ForwardBatch,
870
910
  ):
911
+ return self.deepep_dispatcher.combine(
912
+ hidden_states=hidden_states,
913
+ topk_idx=topk_idx,
914
+ topk_weights=topk_weights,
915
+ forward_batch=forward_batch,
916
+ )
917
+
918
+ def _prepare_for_normal(
919
+ self,
920
+ hidden_states: torch.Tensor,
921
+ topk_idx: torch.Tensor,
922
+ ):
923
+ from sglang.srt.layers.moe.ep_moe.kernels import (
924
+ deepep_permute_triton_kernel,
925
+ deepep_run_moe_deep_preprocess,
926
+ )
927
+
928
+ if hidden_states.shape[0] == 0:
929
+ reorder_topk_ids = torch.empty(
930
+ (0,), device=hidden_states.device, dtype=torch.int64
931
+ )
932
+ seg_indptr = torch.zeros(
933
+ (self.num_experts + 1,),
934
+ device=hidden_states.device,
935
+ dtype=torch.int64,
936
+ )
937
+ return reorder_topk_ids, seg_indptr, hidden_states
938
+ else:
939
+ if _use_aiter:
940
+ # skip permutation here as aiter fused_moe has fused inside
941
+ reorder_topk_ids = torch.empty(
942
+ (0,), device=hidden_states.device, dtype=torch.int64
943
+ )
944
+ seg_indptr = torch.zeros(
945
+ (self.num_experts + 1,),
946
+ device=hidden_states.device,
947
+ dtype=torch.int64,
948
+ )
949
+ return reorder_topk_ids, seg_indptr, hidden_states
950
+
951
+ reorder_topk_ids, self.src2dst, seg_indptr = deepep_run_moe_deep_preprocess(
952
+ topk_idx, self.num_experts
953
+ )
954
+ num_total_tokens = reorder_topk_ids.numel()
955
+ gateup_input = torch.empty(
956
+ (int(num_total_tokens), hidden_states.shape[1]),
957
+ device=hidden_states.device,
958
+ dtype=hidden_states.dtype,
959
+ )
960
+ # PreReorder
961
+ deepep_permute_triton_kernel[(hidden_states.shape[0],)](
962
+ hidden_states,
963
+ gateup_input,
964
+ self.src2dst,
965
+ topk_idx,
966
+ None,
967
+ self.router_topk,
968
+ hidden_states.shape[1],
969
+ BLOCK_SIZE=512,
970
+ )
971
+ return reorder_topk_ids, seg_indptr, gateup_input
972
+
973
+ def forward_normal(
974
+ self,
975
+ dispatch_output: DeepEPNormalOutput,
976
+ ):
977
+ hidden_states, topk_idx = (
978
+ dispatch_output.hidden_states,
979
+ dispatch_output.topk_idx,
980
+ )
981
+ reorder_topk_ids, seg_indptr, hidden_states = self._prepare_for_normal(
982
+ hidden_states, topk_idx
983
+ )
871
984
  hidden_states_dtype = hidden_states.dtype
872
985
  hidden_states_device = hidden_states.device
873
986
 
@@ -983,10 +1096,13 @@ class DeepEPMoE(EPMoE):
983
1096
 
984
1097
  def forward_aiter(
985
1098
  self,
986
- hidden_states: torch.Tensor,
987
- topk_idx: torch.Tensor,
988
- topk_weights: torch.Tensor,
1099
+ dispatch_output: DeepEPNormalOutput,
989
1100
  ):
1101
+ hidden_states, topk_idx, topk_weights = (
1102
+ dispatch_output.hidden_states,
1103
+ dispatch_output.topk_idx,
1104
+ dispatch_output.topk_weights,
1105
+ )
990
1106
  if hidden_states.shape[0] == 0:
991
1107
  return hidden_states
992
1108
  # in original deepep, idx == -1 meaning invalid and will not be processed.
@@ -1014,11 +1130,11 @@ class DeepEPMoE(EPMoE):
1014
1130
 
1015
1131
  def forward_deepgemm_contiguous(
1016
1132
  self,
1017
- hidden_states_fp8: Tuple[torch.Tensor, torch.Tensor],
1018
- topk_idx,
1019
- topk_weights,
1020
- num_recv_tokens_per_expert: List[int],
1133
+ dispatch_output: DeepEPNormalOutput,
1021
1134
  ):
1135
+ hidden_states_fp8, topk_idx, topk_weights, num_recv_tokens_per_expert = (
1136
+ dispatch_output
1137
+ )
1022
1138
  hidden_states_fp8, hidden_states_scale = hidden_states_fp8
1023
1139
  assert self.quant_method is not None
1024
1140
  assert self.activation == "silu"
@@ -1138,10 +1254,9 @@ class DeepEPMoE(EPMoE):
1138
1254
 
1139
1255
  def forward_deepgemm_masked(
1140
1256
  self,
1141
- hidden_states_fp8: Tuple[torch.Tensor, torch.Tensor],
1142
- masked_m: torch.Tensor,
1143
- expected_m: int,
1257
+ dispatch_output: DeepEPLLOutput,
1144
1258
  ):
1259
+ hidden_states_fp8, _, _, masked_m, expected_m = dispatch_output
1145
1260
  assert self.quant_method is not None
1146
1261
  assert self.activation == "silu"
1147
1262
 
@@ -1268,7 +1383,7 @@ class FlashInferEPMoE(EPMoE):
1268
1383
  topk_group=self.topk_group,
1269
1384
  intermediate_size=self.w2_weight.shape[2],
1270
1385
  local_expert_offset=self.start_expert_id,
1271
- local_num_experts=self.num_experts_per_partition,
1386
+ local_num_experts=self.num_local_experts,
1272
1387
  routed_scaling_factor=self.routed_scaling_factor,
1273
1388
  tile_tokens_dim=_get_tile_tokens_dim(
1274
1389
  hidden_states.shape[0], self.top_k, self.num_experts
@@ -1,7 +1,27 @@
1
+ # TODO(ch-wan): this file will be moved to sglang/srt/layers/moe/token_dispatcher/deepep.py
2
+
3
+ from __future__ import annotations
4
+
1
5
  import logging
2
6
  from dataclasses import dataclass
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ List,
10
+ NamedTuple,
11
+ Optional,
12
+ Protocol,
13
+ Tuple,
14
+ Union,
15
+ runtime_checkable,
16
+ )
3
17
 
4
18
  from sglang.srt.eplb.expert_distribution import get_global_expert_distribution_recorder
19
+ from sglang.srt.layers.moe.token_dispatcher.base_dispatcher import (
20
+ BaseDispatcher,
21
+ BaseDispatcherConfig,
22
+ DispatchOutput,
23
+ DispatchOutputFormat,
24
+ )
5
25
  from sglang.srt.layers.quantization import deep_gemm_wrapper
6
26
  from sglang.srt.managers.schedule_batch import global_server_args_dict
7
27
  from sglang.srt.utils import (
@@ -24,7 +44,6 @@ except ImportError:
24
44
  use_deepep = False
25
45
 
26
46
  from enum import Enum, IntEnum, auto
27
- from typing import Optional, Tuple, Union
28
47
 
29
48
  import torch
30
49
  import torch.distributed as dist
@@ -41,6 +60,37 @@ _use_aiter = get_bool_env_var("SGLANG_USE_AITER") and is_hip()
41
60
  logger = logging.getLogger(__name__)
42
61
 
43
62
 
63
+ class DeepEPNormalOutput(NamedTuple):
64
+ """DeepEP normal dispatch output."""
65
+
66
+ hidden_states: torch.Tensor | Tuple[torch.Tensor, torch.Tensor]
67
+ topk_idx: torch.Tensor
68
+ topk_weights: torch.Tensor
69
+ num_recv_tokens_per_expert: List[int]
70
+
71
+ @property
72
+ def format(self) -> DispatchOutputFormat:
73
+ return DispatchOutputFormat.deepep_normal
74
+
75
+
76
+ class DeepEPLLOutput(NamedTuple):
77
+ """DeepEP low latency dispatch output."""
78
+
79
+ hidden_states_fp8: Tuple[torch.Tensor, torch.Tensor]
80
+ topk_idx: torch.Tensor
81
+ topk_weights: torch.Tensor
82
+ masked_m: torch.Tensor
83
+ expected_m: int
84
+
85
+ @property
86
+ def format(self) -> DispatchOutputFormat:
87
+ return DispatchOutputFormat.deepep_ll
88
+
89
+
90
+ assert isinstance(DeepEPNormalOutput, DispatchOutput)
91
+ assert isinstance(DeepEPLLOutput, DispatchOutput)
92
+
93
+
44
94
  class DeepEPDispatchMode(IntEnum):
45
95
  NORMAL = auto()
46
96
  LOW_LATENCY = auto()
@@ -107,6 +157,20 @@ class DeepEPBuffer:
107
157
  else:
108
158
  raise NotImplementedError
109
159
 
160
+ total_num_sms = torch.cuda.get_device_properties(
161
+ device="cuda"
162
+ ).multi_processor_count
163
+ if (
164
+ (deepep_mode != DeepEPMode.low_latency)
165
+ and not global_server_args_dict["enable_two_batch_overlap"]
166
+ and (DeepEPConfig.get_instance().num_sms < total_num_sms // 2)
167
+ ):
168
+ logger.warning(
169
+ f"Only use {DeepEPConfig.get_instance().num_sms} SMs for DeepEP communication. "
170
+ f"This may result in highly suboptimal performance. "
171
+ f"Consider using --deepep-config to change the behavior."
172
+ )
173
+
110
174
  cls._buffer = Buffer(
111
175
  group,
112
176
  num_nvl_bytes,
@@ -139,7 +203,7 @@ class DeepEPBuffer:
139
203
  cls._dispatch_mode = DeepEPDispatchMode.LOW_LATENCY
140
204
 
141
205
 
142
- class DeepEPConfig:
206
+ class DeepEPConfig(BaseDispatcherConfig):
143
207
  _instance = None
144
208
 
145
209
  def __init__(self):
@@ -255,63 +319,17 @@ class _DeepEPDispatcherImplNormal(_DeepEPDispatcherImplBase):
255
319
  return hidden_states, topk_idx, topk_weights, previous_event
256
320
 
257
321
  def dispatch_b(self, hidden_states, topk_idx, topk_weights, previous_event):
258
- if deep_gemm_wrapper.ENABLE_JIT_DEEPGEMM:
259
- (
260
- hidden_states,
261
- topk_idx,
262
- topk_weights,
263
- num_recv_tokens_per_expert_list,
264
- event,
265
- ) = self._dispatch_core(
266
- hidden_states, topk_idx, topk_weights, previous_event
267
- )
268
- event.current_stream_wait() if self.async_finish else ()
269
- return (
270
- hidden_states,
271
- topk_idx,
272
- topk_weights,
273
- None,
274
- num_recv_tokens_per_expert_list,
275
- None,
276
- None,
277
- None,
278
- )
279
- else:
280
- (
281
- hidden_states,
282
- topk_idx,
283
- topk_weights,
284
- num_recv_tokens_per_expert_list,
285
- event,
286
- ) = self._dispatch_core(
287
- hidden_states, topk_idx, topk_weights, previous_event
288
- )
289
- event.current_stream_wait() if self.async_finish else ()
290
- if hidden_states.shape[0] > 0:
291
- reorder_topk_ids, seg_indptr, hidden_states = self._deepep_permute(
292
- hidden_states, topk_idx, fp8_dtype=hidden_states.dtype
293
- )
294
- else:
295
- reorder_topk_ids = torch.empty(
296
- (0,), device=hidden_states.device, dtype=torch.int64
297
- )
298
- seg_indptr = torch.zeros(
299
- (self.num_experts + 1,),
300
- device=hidden_states.device,
301
- dtype=torch.int64,
302
- )
303
-
304
- masked_m = expected_m = None
305
- return (
306
- hidden_states,
307
- topk_idx,
308
- topk_weights,
309
- reorder_topk_ids,
310
- None,
311
- seg_indptr,
312
- masked_m,
313
- expected_m,
314
- )
322
+ (
323
+ hidden_states,
324
+ topk_idx,
325
+ topk_weights,
326
+ num_recv_tokens_per_expert,
327
+ event,
328
+ ) = self._dispatch_core(hidden_states, topk_idx, topk_weights, previous_event)
329
+ event.current_stream_wait() if self.async_finish else ()
330
+ return DeepEPNormalOutput(
331
+ hidden_states, topk_idx, topk_weights, num_recv_tokens_per_expert
332
+ )
315
333
 
316
334
  def _dispatch_core(
317
335
  self,
@@ -343,7 +361,7 @@ class _DeepEPDispatcherImplNormal(_DeepEPDispatcherImplBase):
343
361
  recv_x,
344
362
  recv_topk_idx,
345
363
  recv_topk_weights,
346
- num_recv_tokens_per_expert_list,
364
+ num_recv_tokens_per_expert,
347
365
  self.handle,
348
366
  event,
349
367
  ) = buffer.dispatch(
@@ -362,7 +380,7 @@ class _DeepEPDispatcherImplNormal(_DeepEPDispatcherImplBase):
362
380
  )
363
381
 
364
382
  get_global_expert_distribution_recorder().on_deepep_dispatch_normal(
365
- num_recv_tokens_per_expert_list,
383
+ num_recv_tokens_per_expert,
366
384
  num_tokens_per_rank=num_tokens_per_rank,
367
385
  num_tokens_per_rdma_rank=num_tokens_per_rdma_rank,
368
386
  num_tokens_per_expert=num_tokens_per_expert,
@@ -372,58 +390,10 @@ class _DeepEPDispatcherImplNormal(_DeepEPDispatcherImplBase):
372
390
  recv_x,
373
391
  recv_topk_idx,
374
392
  recv_topk_weights,
375
- num_recv_tokens_per_expert_list,
393
+ num_recv_tokens_per_expert,
376
394
  event,
377
395
  )
378
396
 
379
- def _deepep_permute(
380
- self,
381
- hidden_states: torch.Tensor,
382
- topk_idx: torch.Tensor,
383
- fp8_dtype: Optional[torch.dtype] = None,
384
- use_fp8_w8a8: bool = False,
385
- use_block_quant: bool = False,
386
- ):
387
- """
388
- Copy from Megatron-Core token_dispatcher MoEFlexTokenDispatcher
389
- https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/transformer/moe/token_dispatcher.py
390
- """
391
- if _use_aiter:
392
- # skip permutation here as aiter fused_moe has fused inside
393
- reorder_topk_ids = torch.empty(
394
- (0,), device=hidden_states.device, dtype=torch.int64
395
- )
396
- seg_indptr = torch.zeros(
397
- (self.num_experts + 1,), device=hidden_states.device, dtype=torch.int64
398
- )
399
- return reorder_topk_ids, seg_indptr, hidden_states
400
-
401
- reorder_topk_ids, self.src2dst, seg_indptr = deepep_run_moe_deep_preprocess(
402
- topk_idx, self.num_experts
403
- )
404
- num_total_tokens = reorder_topk_ids.numel()
405
- gateup_input = torch.empty(
406
- (int(num_total_tokens), hidden_states.shape[1]),
407
- device=hidden_states.device,
408
- dtype=(
409
- fp8_dtype
410
- if (use_fp8_w8a8 and not use_block_quant)
411
- else hidden_states.dtype
412
- ),
413
- )
414
- # PreReorder
415
- deepep_permute_triton_kernel[(hidden_states.shape[0],)](
416
- hidden_states,
417
- gateup_input,
418
- self.src2dst,
419
- topk_idx,
420
- None,
421
- self.router_topk,
422
- hidden_states.shape[1],
423
- BLOCK_SIZE=512,
424
- )
425
- return reorder_topk_ids, seg_indptr, gateup_input
426
-
427
397
  def combine_a(
428
398
  self,
429
399
  hidden_states: torch.Tensor,
@@ -544,15 +514,10 @@ class _DeepEPDispatcherImplLowLatency(_DeepEPDispatcherImplBase):
544
514
  masked_m
545
515
  )
546
516
 
547
- reorder_topk_ids = seg_indptr = None
548
-
549
- return (
517
+ return DeepEPLLOutput(
550
518
  hidden_states,
551
519
  topk_idx,
552
520
  topk_weights,
553
- reorder_topk_ids,
554
- None,
555
- seg_indptr,
556
521
  masked_m,
557
522
  expected_m,
558
523
  )
@@ -636,7 +601,7 @@ class _Stage(Enum):
636
601
  AFTER_COMBINE_A = auto()
637
602
 
638
603
 
639
- class DeepEPDispatcher:
604
+ class DeepEPDispatcher(BaseDispatcher):
640
605
  def __init__(
641
606
  self,
642
607
  group: torch.distributed.ProcessGroup,
@@ -676,7 +641,7 @@ class DeepEPDispatcher:
676
641
 
677
642
  self._stage = _Stage.INITIAL
678
643
 
679
- def dispatch(self, *args, **kwargs) -> Tuple:
644
+ def dispatch(self, *args, **kwargs) -> DispatchOutput:
680
645
  self.dispatch_a(*args, **kwargs)
681
646
  ret = self.dispatch_b()
682
647
  return ret