sglang 0.5.3rc0__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (482) hide show
  1. sglang/bench_one_batch.py +54 -37
  2. sglang/bench_one_batch_server.py +340 -34
  3. sglang/bench_serving.py +340 -159
  4. sglang/check_env.py +1 -1
  5. sglang/compile_deep_gemm.py +6 -2
  6. sglang/global_config.py +1 -25
  7. sglang/lang/api.py +6 -0
  8. sglang/lang/backend/runtime_endpoint.py +1 -1
  9. sglang/lang/interpreter.py +1 -0
  10. sglang/lang/ir.py +13 -0
  11. sglang/launch_server.py +9 -2
  12. sglang/profiler.py +20 -3
  13. sglang/srt/_custom_ops.py +1 -1
  14. sglang/srt/batch_invariant_ops/__init__.py +27 -0
  15. sglang/srt/batch_invariant_ops/batch_invariant_ops.py +547 -0
  16. sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
  17. sglang/srt/compilation/backend.py +437 -0
  18. sglang/srt/compilation/compilation_config.py +20 -0
  19. sglang/srt/compilation/compilation_counter.py +47 -0
  20. sglang/srt/compilation/compile.py +210 -0
  21. sglang/srt/compilation/compiler_interface.py +503 -0
  22. sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
  23. sglang/srt/compilation/fix_functionalization.py +134 -0
  24. sglang/srt/compilation/fx_utils.py +83 -0
  25. sglang/srt/compilation/inductor_pass.py +140 -0
  26. sglang/srt/compilation/pass_manager.py +66 -0
  27. sglang/srt/compilation/piecewise_context_manager.py +40 -0
  28. sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
  29. sglang/srt/configs/__init__.py +8 -0
  30. sglang/srt/configs/deepseek_ocr.py +262 -0
  31. sglang/srt/configs/deepseekvl2.py +194 -96
  32. sglang/srt/configs/dots_ocr.py +64 -0
  33. sglang/srt/configs/dots_vlm.py +2 -7
  34. sglang/srt/configs/falcon_h1.py +309 -0
  35. sglang/srt/configs/load_config.py +33 -2
  36. sglang/srt/configs/mamba_utils.py +117 -0
  37. sglang/srt/configs/model_config.py +284 -118
  38. sglang/srt/configs/modelopt_config.py +30 -0
  39. sglang/srt/configs/nemotron_h.py +286 -0
  40. sglang/srt/configs/olmo3.py +105 -0
  41. sglang/srt/configs/points_v15_chat.py +29 -0
  42. sglang/srt/configs/qwen3_next.py +11 -47
  43. sglang/srt/configs/qwen3_omni.py +613 -0
  44. sglang/srt/configs/qwen3_vl.py +576 -0
  45. sglang/srt/connector/remote_instance.py +1 -1
  46. sglang/srt/constrained/base_grammar_backend.py +6 -1
  47. sglang/srt/constrained/llguidance_backend.py +5 -0
  48. sglang/srt/constrained/outlines_backend.py +1 -1
  49. sglang/srt/constrained/outlines_jump_forward.py +1 -1
  50. sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
  51. sglang/srt/constrained/utils.py +12 -0
  52. sglang/srt/constrained/xgrammar_backend.py +26 -15
  53. sglang/srt/debug_utils/dumper.py +10 -3
  54. sglang/srt/disaggregation/ascend/conn.py +2 -2
  55. sglang/srt/disaggregation/ascend/transfer_engine.py +48 -10
  56. sglang/srt/disaggregation/base/conn.py +17 -4
  57. sglang/srt/disaggregation/common/conn.py +268 -98
  58. sglang/srt/disaggregation/decode.py +172 -39
  59. sglang/srt/disaggregation/decode_kvcache_offload_manager.py +185 -0
  60. sglang/srt/disaggregation/decode_schedule_batch_mixin.py +25 -16
  61. sglang/srt/disaggregation/fake/conn.py +11 -3
  62. sglang/srt/disaggregation/mooncake/conn.py +203 -555
  63. sglang/srt/disaggregation/nixl/conn.py +217 -63
  64. sglang/srt/disaggregation/prefill.py +113 -270
  65. sglang/srt/disaggregation/utils.py +36 -5
  66. sglang/srt/distributed/device_communicators/all_reduce_utils.py +16 -0
  67. sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
  68. sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
  69. sglang/srt/distributed/device_communicators/pynccl.py +24 -12
  70. sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
  71. sglang/srt/distributed/device_communicators/shm_broadcast.py +4 -2
  72. sglang/srt/distributed/device_communicators/symm_mem.py +164 -0
  73. sglang/srt/distributed/naive_distributed.py +5 -4
  74. sglang/srt/distributed/parallel_state.py +203 -97
  75. sglang/srt/elastic_ep/elastic_ep.py +74 -0
  76. sglang/srt/entrypoints/context.py +3 -2
  77. sglang/srt/entrypoints/engine.py +85 -65
  78. sglang/srt/entrypoints/grpc_server.py +632 -305
  79. sglang/srt/entrypoints/harmony_utils.py +2 -2
  80. sglang/srt/entrypoints/http_server.py +169 -17
  81. sglang/srt/entrypoints/http_server_engine.py +1 -7
  82. sglang/srt/entrypoints/openai/protocol.py +327 -34
  83. sglang/srt/entrypoints/openai/serving_base.py +74 -8
  84. sglang/srt/entrypoints/openai/serving_chat.py +202 -118
  85. sglang/srt/entrypoints/openai/serving_classify.py +204 -0
  86. sglang/srt/entrypoints/openai/serving_completions.py +20 -4
  87. sglang/srt/entrypoints/openai/serving_embedding.py +1 -0
  88. sglang/srt/entrypoints/openai/serving_responses.py +47 -2
  89. sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
  90. sglang/srt/environ.py +323 -0
  91. sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
  92. sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
  93. sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
  94. sglang/srt/eplb/expert_distribution.py +3 -4
  95. sglang/srt/eplb/expert_location.py +30 -5
  96. sglang/srt/eplb/expert_location_dispatch.py +2 -2
  97. sglang/srt/eplb/expert_location_updater.py +2 -2
  98. sglang/srt/function_call/base_format_detector.py +17 -18
  99. sglang/srt/function_call/function_call_parser.py +21 -16
  100. sglang/srt/function_call/glm4_moe_detector.py +4 -8
  101. sglang/srt/function_call/gpt_oss_detector.py +24 -1
  102. sglang/srt/function_call/json_array_parser.py +61 -0
  103. sglang/srt/function_call/kimik2_detector.py +17 -4
  104. sglang/srt/function_call/utils.py +98 -7
  105. sglang/srt/grpc/compile_proto.py +245 -0
  106. sglang/srt/grpc/grpc_request_manager.py +915 -0
  107. sglang/srt/grpc/health_servicer.py +189 -0
  108. sglang/srt/grpc/scheduler_launcher.py +181 -0
  109. sglang/srt/grpc/sglang_scheduler_pb2.py +81 -68
  110. sglang/srt/grpc/sglang_scheduler_pb2.pyi +124 -61
  111. sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +92 -1
  112. sglang/srt/layers/activation.py +11 -7
  113. sglang/srt/layers/attention/aiter_backend.py +17 -18
  114. sglang/srt/layers/attention/ascend_backend.py +125 -10
  115. sglang/srt/layers/attention/attention_registry.py +226 -0
  116. sglang/srt/layers/attention/base_attn_backend.py +32 -4
  117. sglang/srt/layers/attention/cutlass_mla_backend.py +3 -3
  118. sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
  119. sglang/srt/layers/attention/dual_chunk_flashattention_backend.py +1 -1
  120. sglang/srt/layers/attention/fla/chunk.py +0 -1
  121. sglang/srt/layers/attention/fla/chunk_o.py +1 -1
  122. sglang/srt/layers/attention/fla/chunk_scaled_dot_kkt.py +2 -2
  123. sglang/srt/layers/attention/fla/fused_recurrent.py +4 -4
  124. sglang/srt/layers/attention/fla/fused_sigmoid_gating_recurrent.py +2 -2
  125. sglang/srt/layers/attention/fla/index.py +0 -2
  126. sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
  127. sglang/srt/layers/attention/fla/utils.py +0 -3
  128. sglang/srt/layers/attention/fla/wy_fast.py +0 -2
  129. sglang/srt/layers/attention/flashattention_backend.py +52 -15
  130. sglang/srt/layers/attention/flashinfer_backend.py +357 -212
  131. sglang/srt/layers/attention/flashinfer_mla_backend.py +31 -33
  132. sglang/srt/layers/attention/flashmla_backend.py +9 -7
  133. sglang/srt/layers/attention/hybrid_attn_backend.py +12 -4
  134. sglang/srt/layers/attention/hybrid_linear_attn_backend.py +236 -133
  135. sglang/srt/layers/attention/intel_amx_backend.py +1 -1
  136. sglang/srt/layers/attention/mamba/causal_conv1d.py +2 -1
  137. sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +24 -103
  138. sglang/srt/layers/attention/mamba/mamba.py +514 -1
  139. sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
  140. sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
  141. sglang/srt/layers/attention/mamba/ops/__init__.py +2 -0
  142. sglang/srt/layers/attention/mamba/ops/layernorm_gated.py +172 -0
  143. sglang/srt/layers/attention/mamba/ops/mamba_ssm.py +442 -0
  144. sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +214 -0
  145. sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +562 -0
  146. sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +646 -0
  147. sglang/srt/layers/attention/mamba/ops/ssd_combined.py +261 -0
  148. sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +264 -0
  149. sglang/srt/layers/attention/npu_ops/mla_preprocess.py +393 -0
  150. sglang/srt/layers/attention/nsa/dequant_k_cache.py +163 -0
  151. sglang/srt/layers/attention/nsa/index_buf_accessor.py +354 -0
  152. sglang/srt/layers/attention/nsa/nsa_indexer.py +718 -0
  153. sglang/srt/layers/attention/nsa/quant_k_cache.py +255 -0
  154. sglang/srt/layers/attention/nsa/tilelang_kernel.py +785 -0
  155. sglang/srt/layers/attention/nsa/transform_index.py +144 -0
  156. sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
  157. sglang/srt/layers/attention/nsa/utils.py +23 -0
  158. sglang/srt/layers/attention/nsa_backend.py +1201 -0
  159. sglang/srt/layers/attention/tbo_backend.py +6 -6
  160. sglang/srt/layers/attention/torch_flex_backend.py +325 -0
  161. sglang/srt/layers/attention/triton_backend.py +249 -42
  162. sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
  163. sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
  164. sglang/srt/layers/attention/trtllm_mha_backend.py +7 -9
  165. sglang/srt/layers/attention/trtllm_mla_backend.py +523 -48
  166. sglang/srt/layers/attention/utils.py +11 -7
  167. sglang/srt/layers/attention/vision.py +61 -3
  168. sglang/srt/layers/attention/wave_backend.py +4 -4
  169. sglang/srt/layers/attention/xpu_backend.py +1028 -0
  170. sglang/srt/layers/communicator.py +19 -7
  171. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
  172. sglang/srt/layers/deep_gemm_wrapper/configurer.py +25 -0
  173. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
  174. sglang/srt/layers/dp_attention.py +28 -1
  175. sglang/srt/layers/elementwise.py +3 -1
  176. sglang/srt/layers/layernorm.py +47 -15
  177. sglang/srt/layers/linear.py +30 -5
  178. sglang/srt/layers/logits_processor.py +161 -18
  179. sglang/srt/layers/modelopt_utils.py +11 -0
  180. sglang/srt/layers/moe/cutlass_moe.py +0 -2
  181. sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
  182. sglang/srt/layers/moe/ep_moe/kernels.py +36 -458
  183. sglang/srt/layers/moe/ep_moe/layer.py +243 -448
  184. sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +52 -25
  185. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  186. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
  187. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  188. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
  189. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
  190. sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +17 -5
  191. sglang/srt/layers/moe/fused_moe_triton/layer.py +86 -81
  192. sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
  193. sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
  194. sglang/srt/layers/moe/moe_runner/runner.py +3 -0
  195. sglang/srt/layers/moe/moe_runner/triton.py +3 -1
  196. sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
  197. sglang/srt/layers/moe/router.py +51 -15
  198. sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
  199. sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
  200. sglang/srt/layers/moe/token_dispatcher/deepep.py +177 -106
  201. sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
  202. sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
  203. sglang/srt/layers/moe/topk.py +3 -2
  204. sglang/srt/layers/moe/utils.py +27 -1
  205. sglang/srt/layers/parameter.py +23 -6
  206. sglang/srt/layers/quantization/__init__.py +2 -53
  207. sglang/srt/layers/quantization/awq.py +183 -6
  208. sglang/srt/layers/quantization/awq_triton.py +29 -0
  209. sglang/srt/layers/quantization/base_config.py +20 -1
  210. sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
  211. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +21 -49
  212. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
  213. sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +5 -0
  214. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
  215. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +173 -0
  216. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
  217. sglang/srt/layers/quantization/fp8.py +86 -20
  218. sglang/srt/layers/quantization/fp8_kernel.py +55 -10
  219. sglang/srt/layers/quantization/fp8_utils.py +43 -15
  220. sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
  221. sglang/srt/layers/quantization/gptq.py +0 -1
  222. sglang/srt/layers/quantization/int8_kernel.py +18 -2
  223. sglang/srt/layers/quantization/marlin_utils.py +12 -0
  224. sglang/srt/layers/quantization/modelopt_quant.py +141 -81
  225. sglang/srt/layers/quantization/mxfp4.py +17 -34
  226. sglang/srt/layers/quantization/petit.py +1 -1
  227. sglang/srt/layers/quantization/quark/quark.py +3 -1
  228. sglang/srt/layers/quantization/quark/quark_moe.py +18 -5
  229. sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
  230. sglang/srt/layers/quantization/unquant.py +1 -4
  231. sglang/srt/layers/quantization/utils.py +0 -1
  232. sglang/srt/layers/quantization/w4afp8.py +51 -24
  233. sglang/srt/layers/quantization/w8a8_int8.py +45 -27
  234. sglang/srt/layers/radix_attention.py +59 -9
  235. sglang/srt/layers/rotary_embedding.py +750 -46
  236. sglang/srt/layers/sampler.py +84 -16
  237. sglang/srt/layers/sparse_pooler.py +98 -0
  238. sglang/srt/layers/utils.py +23 -1
  239. sglang/srt/layers/vocab_parallel_embedding.py +4 -1
  240. sglang/srt/lora/backend/base_backend.py +3 -3
  241. sglang/srt/lora/backend/chunked_backend.py +348 -0
  242. sglang/srt/lora/backend/triton_backend.py +9 -4
  243. sglang/srt/lora/eviction_policy.py +139 -0
  244. sglang/srt/lora/lora.py +7 -5
  245. sglang/srt/lora/lora_manager.py +33 -7
  246. sglang/srt/lora/lora_registry.py +1 -1
  247. sglang/srt/lora/mem_pool.py +41 -17
  248. sglang/srt/lora/triton_ops/__init__.py +4 -0
  249. sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +214 -0
  250. sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +176 -0
  251. sglang/srt/lora/utils.py +7 -5
  252. sglang/srt/managers/cache_controller.py +83 -152
  253. sglang/srt/managers/data_parallel_controller.py +156 -87
  254. sglang/srt/managers/detokenizer_manager.py +51 -24
  255. sglang/srt/managers/io_struct.py +223 -129
  256. sglang/srt/managers/mm_utils.py +49 -10
  257. sglang/srt/managers/multi_tokenizer_mixin.py +83 -98
  258. sglang/srt/managers/multimodal_processor.py +1 -2
  259. sglang/srt/managers/overlap_utils.py +130 -0
  260. sglang/srt/managers/schedule_batch.py +340 -529
  261. sglang/srt/managers/schedule_policy.py +158 -18
  262. sglang/srt/managers/scheduler.py +665 -620
  263. sglang/srt/managers/scheduler_input_blocker.py +1 -1
  264. sglang/srt/managers/scheduler_metrics_mixin.py +150 -131
  265. sglang/srt/managers/scheduler_output_processor_mixin.py +337 -122
  266. sglang/srt/managers/scheduler_pp_mixin.py +341 -0
  267. sglang/srt/managers/scheduler_profiler_mixin.py +62 -15
  268. sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
  269. sglang/srt/managers/scheduler_update_weights_mixin.py +40 -14
  270. sglang/srt/managers/tokenizer_communicator_mixin.py +141 -19
  271. sglang/srt/managers/tokenizer_manager.py +462 -226
  272. sglang/srt/managers/tp_worker.py +217 -156
  273. sglang/srt/managers/utils.py +79 -47
  274. sglang/srt/mem_cache/allocator.py +21 -22
  275. sglang/srt/mem_cache/allocator_ascend.py +42 -28
  276. sglang/srt/mem_cache/base_prefix_cache.py +3 -3
  277. sglang/srt/mem_cache/chunk_cache.py +20 -2
  278. sglang/srt/mem_cache/common.py +480 -0
  279. sglang/srt/mem_cache/evict_policy.py +38 -0
  280. sglang/srt/mem_cache/hicache_storage.py +44 -2
  281. sglang/srt/mem_cache/hiradix_cache.py +134 -34
  282. sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
  283. sglang/srt/mem_cache/memory_pool.py +602 -208
  284. sglang/srt/mem_cache/memory_pool_host.py +134 -183
  285. sglang/srt/mem_cache/multimodal_cache.py +0 -1
  286. sglang/srt/mem_cache/radix_cache.py +263 -78
  287. sglang/srt/mem_cache/radix_cache_cpp.py +29 -21
  288. sglang/srt/mem_cache/storage/__init__.py +10 -0
  289. sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +157 -0
  290. sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +97 -0
  291. sglang/srt/mem_cache/storage/backend_factory.py +223 -0
  292. sglang/srt/mem_cache/storage/eic/eic_storage.py +777 -0
  293. sglang/srt/mem_cache/storage/eic/test_unit.py +115 -0
  294. sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
  295. sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +180 -59
  296. sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +15 -9
  297. sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +217 -26
  298. sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
  299. sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
  300. sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
  301. sglang/srt/mem_cache/swa_radix_cache.py +115 -58
  302. sglang/srt/metrics/collector.py +113 -120
  303. sglang/srt/metrics/func_timer.py +3 -8
  304. sglang/srt/metrics/utils.py +8 -1
  305. sglang/srt/model_executor/cpu_graph_runner.py +2 -2
  306. sglang/srt/model_executor/cuda_graph_runner.py +81 -36
  307. sglang/srt/model_executor/forward_batch_info.py +40 -50
  308. sglang/srt/model_executor/model_runner.py +507 -319
  309. sglang/srt/model_executor/npu_graph_runner.py +11 -5
  310. sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
  311. sglang/srt/model_loader/__init__.py +1 -1
  312. sglang/srt/model_loader/loader.py +438 -37
  313. sglang/srt/model_loader/utils.py +0 -1
  314. sglang/srt/model_loader/weight_utils.py +200 -27
  315. sglang/srt/models/apertus.py +2 -3
  316. sglang/srt/models/arcee.py +2 -2
  317. sglang/srt/models/bailing_moe.py +40 -56
  318. sglang/srt/models/bailing_moe_nextn.py +3 -4
  319. sglang/srt/models/bert.py +1 -1
  320. sglang/srt/models/deepseek_nextn.py +25 -4
  321. sglang/srt/models/deepseek_ocr.py +1516 -0
  322. sglang/srt/models/deepseek_v2.py +793 -235
  323. sglang/srt/models/dots_ocr.py +171 -0
  324. sglang/srt/models/dots_vlm.py +0 -1
  325. sglang/srt/models/dots_vlm_vit.py +1 -1
  326. sglang/srt/models/falcon_h1.py +570 -0
  327. sglang/srt/models/gemma3_causal.py +0 -2
  328. sglang/srt/models/gemma3_mm.py +17 -1
  329. sglang/srt/models/gemma3n_mm.py +2 -3
  330. sglang/srt/models/glm4_moe.py +17 -40
  331. sglang/srt/models/glm4_moe_nextn.py +4 -4
  332. sglang/srt/models/glm4v.py +3 -2
  333. sglang/srt/models/glm4v_moe.py +6 -6
  334. sglang/srt/models/gpt_oss.py +12 -35
  335. sglang/srt/models/grok.py +10 -23
  336. sglang/srt/models/hunyuan.py +2 -7
  337. sglang/srt/models/interns1.py +0 -1
  338. sglang/srt/models/kimi_vl.py +1 -7
  339. sglang/srt/models/kimi_vl_moonvit.py +4 -2
  340. sglang/srt/models/llama.py +6 -2
  341. sglang/srt/models/llama_eagle3.py +1 -1
  342. sglang/srt/models/longcat_flash.py +6 -23
  343. sglang/srt/models/longcat_flash_nextn.py +4 -15
  344. sglang/srt/models/mimo.py +2 -13
  345. sglang/srt/models/mimo_mtp.py +1 -2
  346. sglang/srt/models/minicpmo.py +7 -5
  347. sglang/srt/models/mixtral.py +1 -4
  348. sglang/srt/models/mllama.py +1 -1
  349. sglang/srt/models/mllama4.py +27 -6
  350. sglang/srt/models/nemotron_h.py +511 -0
  351. sglang/srt/models/olmo2.py +31 -4
  352. sglang/srt/models/opt.py +5 -5
  353. sglang/srt/models/phi.py +1 -1
  354. sglang/srt/models/phi4mm.py +1 -1
  355. sglang/srt/models/phimoe.py +0 -1
  356. sglang/srt/models/pixtral.py +0 -3
  357. sglang/srt/models/points_v15_chat.py +186 -0
  358. sglang/srt/models/qwen.py +0 -1
  359. sglang/srt/models/qwen2.py +0 -7
  360. sglang/srt/models/qwen2_5_vl.py +5 -5
  361. sglang/srt/models/qwen2_audio.py +2 -15
  362. sglang/srt/models/qwen2_moe.py +70 -4
  363. sglang/srt/models/qwen2_vl.py +6 -3
  364. sglang/srt/models/qwen3.py +18 -3
  365. sglang/srt/models/qwen3_moe.py +50 -38
  366. sglang/srt/models/qwen3_next.py +43 -21
  367. sglang/srt/models/qwen3_next_mtp.py +3 -4
  368. sglang/srt/models/qwen3_omni_moe.py +661 -0
  369. sglang/srt/models/qwen3_vl.py +791 -0
  370. sglang/srt/models/qwen3_vl_moe.py +343 -0
  371. sglang/srt/models/registry.py +15 -3
  372. sglang/srt/models/roberta.py +55 -3
  373. sglang/srt/models/sarashina2_vision.py +268 -0
  374. sglang/srt/models/solar.py +505 -0
  375. sglang/srt/models/starcoder2.py +357 -0
  376. sglang/srt/models/step3_vl.py +3 -5
  377. sglang/srt/models/torch_native_llama.py +9 -2
  378. sglang/srt/models/utils.py +61 -0
  379. sglang/srt/multimodal/processors/base_processor.py +21 -9
  380. sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
  381. sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
  382. sglang/srt/multimodal/processors/dots_vlm.py +2 -4
  383. sglang/srt/multimodal/processors/glm4v.py +1 -5
  384. sglang/srt/multimodal/processors/internvl.py +20 -10
  385. sglang/srt/multimodal/processors/janus_pro.py +0 -1
  386. sglang/srt/multimodal/processors/mllama4.py +0 -8
  387. sglang/srt/multimodal/processors/phi4mm.py +0 -1
  388. sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
  389. sglang/srt/multimodal/processors/qwen_vl.py +83 -17
  390. sglang/srt/multimodal/processors/sarashina2_vision.py +81 -0
  391. sglang/srt/multimodal/processors/step3_vl.py +1 -1
  392. sglang/srt/parser/conversation.py +41 -0
  393. sglang/srt/parser/jinja_template_utils.py +6 -0
  394. sglang/srt/parser/reasoning_parser.py +0 -1
  395. sglang/srt/sampling/custom_logit_processor.py +77 -2
  396. sglang/srt/sampling/sampling_batch_info.py +36 -23
  397. sglang/srt/sampling/sampling_params.py +75 -0
  398. sglang/srt/server_args.py +1300 -338
  399. sglang/srt/server_args_config_parser.py +146 -0
  400. sglang/srt/single_batch_overlap.py +161 -0
  401. sglang/srt/speculative/base_spec_worker.py +34 -0
  402. sglang/srt/speculative/cpp_ngram/ngram.cpp +374 -0
  403. sglang/srt/speculative/cpp_ngram/ngram.h +110 -0
  404. sglang/srt/speculative/cpp_ngram/ngram_cache.py +138 -0
  405. sglang/srt/speculative/cpp_ngram/ngram_cache_binding.cpp +43 -0
  406. sglang/srt/speculative/cpp_ngram/param.h +125 -0
  407. sglang/srt/speculative/cpp_ngram/queue.h +71 -0
  408. sglang/srt/speculative/draft_utils.py +226 -0
  409. sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +26 -8
  410. sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +26 -3
  411. sglang/srt/speculative/eagle_info.py +786 -0
  412. sglang/srt/speculative/eagle_info_v2.py +458 -0
  413. sglang/srt/speculative/eagle_utils.py +113 -1270
  414. sglang/srt/speculative/eagle_worker.py +120 -285
  415. sglang/srt/speculative/eagle_worker_v2.py +702 -0
  416. sglang/srt/speculative/ngram_info.py +433 -0
  417. sglang/srt/speculative/ngram_worker.py +246 -0
  418. sglang/srt/speculative/spec_info.py +49 -0
  419. sglang/srt/speculative/spec_utils.py +641 -0
  420. sglang/srt/speculative/standalone_worker.py +4 -14
  421. sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
  422. sglang/srt/tracing/trace.py +32 -6
  423. sglang/srt/two_batch_overlap.py +35 -18
  424. sglang/srt/utils/__init__.py +2 -0
  425. sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
  426. sglang/srt/{utils.py → utils/common.py} +583 -113
  427. sglang/srt/{hf_transformers_utils.py → utils/hf_transformers_utils.py} +86 -19
  428. sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
  429. sglang/srt/{offloader.py → utils/offloader.py} +4 -4
  430. sglang/srt/{patch_torch.py → utils/patch_torch.py} +8 -0
  431. sglang/srt/utils/profile_merger.py +199 -0
  432. sglang/srt/utils/rpd_utils.py +452 -0
  433. sglang/srt/utils/slow_rank_detector.py +71 -0
  434. sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +5 -7
  435. sglang/srt/warmup.py +8 -4
  436. sglang/srt/weight_sync/utils.py +1 -1
  437. sglang/test/attention/test_flashattn_backend.py +1 -1
  438. sglang/test/attention/test_flashattn_mla_backend.py +0 -1
  439. sglang/test/attention/test_prefix_chunk_info.py +0 -2
  440. sglang/test/attention/test_trtllm_mla_backend.py +221 -53
  441. sglang/test/few_shot_gsm8k_engine.py +2 -4
  442. sglang/test/get_logits_ut.py +57 -0
  443. sglang/test/kit_matched_stop.py +157 -0
  444. sglang/test/longbench_v2/__init__.py +1 -0
  445. sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
  446. sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
  447. sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
  448. sglang/test/run_eval.py +120 -11
  449. sglang/test/runners.py +3 -1
  450. sglang/test/send_one.py +42 -7
  451. sglang/test/simple_eval_common.py +8 -2
  452. sglang/test/simple_eval_gpqa.py +0 -1
  453. sglang/test/simple_eval_humaneval.py +0 -3
  454. sglang/test/simple_eval_longbench_v2.py +344 -0
  455. sglang/test/simple_eval_mmmu_vlm.py +441 -0
  456. sglang/test/test_block_fp8.py +3 -4
  457. sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
  458. sglang/test/test_cutlass_moe.py +1 -2
  459. sglang/test/test_cutlass_w4a8_moe.py +10 -20
  460. sglang/test/test_deterministic.py +430 -0
  461. sglang/test/test_deterministic_utils.py +73 -0
  462. sglang/test/test_disaggregation_utils.py +93 -1
  463. sglang/test/test_marlin_moe.py +0 -1
  464. sglang/test/test_programs.py +1 -1
  465. sglang/test/test_utils.py +432 -16
  466. sglang/utils.py +10 -1
  467. sglang/version.py +1 -1
  468. {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/METADATA +64 -43
  469. {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/RECORD +476 -346
  470. sglang/srt/entrypoints/grpc_request_manager.py +0 -580
  471. sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +0 -32
  472. sglang/srt/managers/tp_worker_overlap_thread.py +0 -319
  473. sglang/srt/mem_cache/lora_radix_cache.py +0 -421
  474. sglang/srt/speculative/build_eagle_tree.py +0 -427
  475. sglang/test/test_block_fp8_ep.py +0 -358
  476. /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
  477. /sglang/srt/{remote_instance_weight_loader_utils.py → model_loader/remote_instance_weight_loader_utils.py} +0 -0
  478. /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
  479. /sglang/srt/{poll_based_barrier.py → utils/poll_based_barrier.py} +0 -0
  480. {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
  481. {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
  482. {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,505 @@
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ # Adapted from
5
+ # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
6
+ # Copyright 2023 The vLLM team.
7
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
8
+ #
9
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
10
+ # and OPT implementations in this library. It has been modified from its
11
+ # original forms to accommodate minor architectural differences compared
12
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
13
+ #
14
+ # Licensed under the Apache License, Version 2.0 (the "License");
15
+ # you may not use this file except in compliance with the License.
16
+ # You may obtain a copy of the License at
17
+ #
18
+ # http://www.apache.org/licenses/LICENSE-2.0
19
+ #
20
+ # Unless required by applicable law or agreed to in writing, software
21
+ # distributed under the License is distributed on an "AS IS" BASIS,
22
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23
+ # See the License for the specific language governing permissions and
24
+ # limitations under the License.
25
+ # Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/models/solar.py
26
+ from collections.abc import Iterable
27
+ from typing import Any, List, Optional, Tuple, Union
28
+
29
+ import torch
30
+ from torch import nn
31
+ from transformers import PretrainedConfig
32
+
33
+ from sglang.srt.distributed import get_pp_group, get_tensor_model_parallel_world_size
34
+ from sglang.srt.distributed.parallel_state import get_tensor_model_parallel_rank
35
+ from sglang.srt.layers.activation import SiluAndMul
36
+ from sglang.srt.layers.layernorm import RMSNorm
37
+ from sglang.srt.layers.linear import (
38
+ MergedColumnParallelLinear,
39
+ QKVParallelLinear,
40
+ RowParallelLinear,
41
+ )
42
+ from sglang.srt.layers.logits_processor import LogitsProcessor, LogitsProcessorOutput
43
+ from sglang.srt.layers.quantization import QuantizationConfig
44
+ from sglang.srt.layers.radix_attention import RadixAttention
45
+ from sglang.srt.layers.rotary_embedding import get_rope
46
+ from sglang.srt.layers.utils import PPMissingLayer
47
+ from sglang.srt.layers.vocab_parallel_embedding import (
48
+ DEFAULT_VOCAB_PADDING_SIZE,
49
+ ParallelLMHead,
50
+ VocabParallelEmbedding,
51
+ )
52
+ from sglang.srt.model_executor.forward_batch_info import ForwardBatch, PPProxyTensors
53
+ from sglang.srt.model_loader.weight_utils import (
54
+ default_weight_loader,
55
+ kv_cache_scales_loader,
56
+ )
57
+ from sglang.srt.utils import add_prefix, make_layers
58
+
59
+
60
+ class SolarMLP(nn.Module):
61
+
62
+ def __init__(
63
+ self,
64
+ hidden_size: int,
65
+ intermediate_size: int,
66
+ hidden_act: str,
67
+ quant_config: Optional[QuantizationConfig] = None,
68
+ bias: bool = False,
69
+ prefix: str = "",
70
+ ) -> None:
71
+ super().__init__()
72
+ self.gate_up_proj = MergedColumnParallelLinear(
73
+ input_size=hidden_size,
74
+ output_sizes=[intermediate_size] * 2,
75
+ bias=bias,
76
+ quant_config=quant_config,
77
+ prefix=f"{prefix}.gate_up_proj",
78
+ )
79
+ self.down_proj = RowParallelLinear(
80
+ input_size=intermediate_size,
81
+ output_size=hidden_size,
82
+ bias=bias,
83
+ quant_config=quant_config,
84
+ prefix=f"{prefix}.down_proj",
85
+ )
86
+ if hidden_act != "silu":
87
+ raise ValueError(
88
+ f"Unsupported activation: {hidden_act}. "
89
+ "Only silu is supported for now."
90
+ )
91
+ self.act_fn = SiluAndMul()
92
+
93
+ def forward(self, x):
94
+ gate_up, _ = self.gate_up_proj(x)
95
+ x = self.act_fn(gate_up)
96
+ x, _ = self.down_proj(x)
97
+ return x
98
+
99
+
100
+ class SolarAttention(nn.Module):
101
+
102
+ def __init__(
103
+ self,
104
+ config: PretrainedConfig,
105
+ hidden_size: int,
106
+ num_heads: int,
107
+ num_kv_heads: int,
108
+ rope_theta: float = 10000,
109
+ rope_scaling: Optional[dict[str, Any]] = None,
110
+ max_position_embeddings: int = 8192,
111
+ quant_config: Optional[QuantizationConfig] = None,
112
+ bias: bool = False,
113
+ prefix: str = "",
114
+ layer_id: int = 0,
115
+ ) -> None:
116
+ super().__init__()
117
+ self.hidden_size = hidden_size
118
+ tp_size = get_tensor_model_parallel_world_size()
119
+ self.total_num_heads = num_heads
120
+ assert self.total_num_heads % tp_size == 0
121
+ self.num_heads = self.total_num_heads // tp_size
122
+ self.total_num_kv_heads = num_kv_heads
123
+ if self.total_num_kv_heads >= tp_size:
124
+ assert self.total_num_kv_heads % tp_size == 0
125
+ else:
126
+ assert tp_size % self.total_num_kv_heads == 0
127
+ self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
128
+
129
+ self.head_dim = getattr(config, "head_dim", None)
130
+ if self.head_dim is None:
131
+ self.head_dim = self.hidden_size // self.total_num_heads
132
+ self.q_size = self.num_heads * self.head_dim
133
+ self.kv_size = self.num_kv_heads * self.head_dim
134
+ self.scaling = self.head_dim**-0.5
135
+ self.rope_theta = rope_theta
136
+ self.max_position_embeddings = max_position_embeddings
137
+
138
+ self.qkv_proj = QKVParallelLinear(
139
+ hidden_size=hidden_size,
140
+ head_size=self.head_dim,
141
+ total_num_heads=self.total_num_heads,
142
+ total_num_kv_heads=self.total_num_kv_heads,
143
+ bias=bias,
144
+ quant_config=quant_config,
145
+ prefix=f"{prefix}.qkv_proj",
146
+ )
147
+ self.o_proj = RowParallelLinear(
148
+ input_size=self.total_num_heads * self.head_dim,
149
+ output_size=hidden_size,
150
+ bias=bias,
151
+ quant_config=quant_config,
152
+ prefix=f"{prefix}.o_proj",
153
+ )
154
+
155
+ self.rotary_emb = get_rope(
156
+ self.head_dim,
157
+ rotary_dim=self.head_dim,
158
+ max_position=max_position_embeddings,
159
+ base=rope_theta,
160
+ rope_scaling=rope_scaling,
161
+ )
162
+ self.attn = RadixAttention(
163
+ self.num_heads,
164
+ self.head_dim,
165
+ self.scaling,
166
+ num_kv_heads=self.num_kv_heads,
167
+ layer_id=layer_id,
168
+ quant_config=quant_config,
169
+ prefix=f"{prefix}.attn",
170
+ )
171
+
172
+ def forward(
173
+ self,
174
+ positions: torch.Tensor,
175
+ forward_batch: ForwardBatch,
176
+ hidden_states: torch.Tensor,
177
+ ) -> torch.Tensor:
178
+ qkv, _ = self.qkv_proj(hidden_states)
179
+ q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
180
+ q, k = self.rotary_emb(positions, q, k)
181
+ attn_output = self.attn(q, k, v, forward_batch=forward_batch)
182
+ output, _ = self.o_proj(attn_output)
183
+ return output
184
+
185
+
186
+ class SolarDecoderLayer(nn.Module):
187
+
188
+ def __init__(
189
+ self,
190
+ config: PretrainedConfig,
191
+ layer_id: int,
192
+ quant_config: Optional[QuantizationConfig] = None,
193
+ prefix: str = "",
194
+ ) -> None:
195
+ super().__init__()
196
+ self.hidden_size = config.hidden_size
197
+ rope_theta = getattr(config, "rope_theta", 10000)
198
+ rope_scaling = getattr(config, "rope_scaling", None)
199
+
200
+ if rope_scaling is not None and getattr(
201
+ config, "original_max_position_embeddings", None
202
+ ):
203
+ rope_scaling["original_max_position_embeddings"] = (
204
+ config.original_max_position_embeddings
205
+ )
206
+ max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
207
+
208
+ attention_bias = getattr(config, "attention_bias", False) or getattr(
209
+ config, "bias", False
210
+ )
211
+ self.self_attn = SolarAttention(
212
+ config=config,
213
+ layer_id=layer_id,
214
+ hidden_size=self.hidden_size,
215
+ num_heads=config.num_attention_heads,
216
+ num_kv_heads=getattr(
217
+ config, "num_key_value_heads", config.num_attention_heads
218
+ ),
219
+ rope_theta=rope_theta,
220
+ rope_scaling=rope_scaling,
221
+ max_position_embeddings=max_position_embeddings,
222
+ quant_config=quant_config,
223
+ bias=attention_bias,
224
+ prefix=f"{prefix}.self_attn",
225
+ )
226
+ self.mlp = SolarMLP(
227
+ hidden_size=self.hidden_size,
228
+ intermediate_size=config.intermediate_size,
229
+ hidden_act=config.hidden_act,
230
+ quant_config=quant_config,
231
+ bias=getattr(config, "mlp_bias", False),
232
+ prefix=f"{prefix}.mlp",
233
+ )
234
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
235
+ self.post_attention_layernorm = RMSNorm(
236
+ config.hidden_size, eps=config.rms_norm_eps
237
+ )
238
+
239
+ def forward(
240
+ self,
241
+ positions: torch.Tensor,
242
+ hidden_states: torch.Tensor,
243
+ forward_batch: ForwardBatch,
244
+ residual: Optional[torch.Tensor],
245
+ ) -> tuple[torch.Tensor, torch.Tensor]:
246
+ # Self Attention
247
+ if residual is None:
248
+ residual = hidden_states
249
+ hidden_states = self.input_layernorm(hidden_states)
250
+ else:
251
+ hidden_states, residual = self.input_layernorm(hidden_states, residual)
252
+ hidden_states = self.self_attn(
253
+ positions=positions,
254
+ hidden_states=hidden_states,
255
+ forward_batch=forward_batch,
256
+ )
257
+
258
+ # Fully Connected
259
+ hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
260
+ hidden_states = self.mlp(hidden_states)
261
+ return hidden_states, residual
262
+
263
+
264
+ class SolarModel(nn.Module):
265
+
266
+ def __init__(
267
+ self,
268
+ config: PretrainedConfig,
269
+ quant_config: Optional[QuantizationConfig] = None,
270
+ prefix: str = "",
271
+ ):
272
+ super().__init__()
273
+ self.config = config
274
+
275
+ self.vocab_size = config.vocab_size
276
+ self.org_vocab_size = config.vocab_size
277
+ self.pp_group = get_pp_group()
278
+ if self.pp_group.is_first_rank:
279
+ self.embed_tokens = VocabParallelEmbedding(
280
+ config.vocab_size,
281
+ config.hidden_size,
282
+ quant_config=quant_config,
283
+ prefix=add_prefix("embed_tokens", prefix),
284
+ )
285
+ else:
286
+ self.embed_tokens = PPMissingLayer()
287
+ self.start_layer, self.end_layer, self.layers = make_layers(
288
+ config.num_hidden_layers,
289
+ lambda idx, prefix: SolarDecoderLayer(
290
+ config=config,
291
+ quant_config=quant_config,
292
+ layer_id=idx,
293
+ prefix=prefix,
294
+ ),
295
+ prefix=f"{prefix}.layers",
296
+ )
297
+ if get_pp_group().is_last_rank:
298
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
299
+ else:
300
+ self.norm = PPMissingLayer()
301
+
302
+ def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
303
+ return self.embed_tokens(input_ids)
304
+
305
+ def forward(
306
+ self,
307
+ input_ids: Optional[torch.Tensor],
308
+ positions: torch.Tensor,
309
+ forward_batch: ForwardBatch,
310
+ inputs_embeds: Optional[torch.Tensor] = None,
311
+ pp_proxy_tensors: Optional[PPProxyTensors] = None,
312
+ ) -> Union[torch.Tensor, Tuple[torch.Tensor, List[torch.Tensor]], PPProxyTensors]:
313
+ if self.pp_group().is_first_rank:
314
+ if inputs_embeds is not None:
315
+ hidden_states = inputs_embeds
316
+ else:
317
+ hidden_states = self.get_input_embeddings(input_ids)
318
+ residual = None
319
+ else:
320
+ assert pp_proxy_tensors is not None
321
+
322
+ hidden_states = pp_proxy_tensors["hidden_states"]
323
+ residual = pp_proxy_tensors["residual"]
324
+
325
+ # Depth up-scaling mechanism: caches hidden states and residuals from intermediate layers and interpolates them with the states of later layers.
326
+ # `bskcn` stands for "backbone skip connection".
327
+ bskcn_h_1 = None
328
+ bskcn_h_2 = None
329
+ bskcn_r_1 = None
330
+ bskcn_r_2 = None
331
+ bskcn_tv = self.config.bskcn_tv[0] if self.training else self.config.bskcn_tv[1]
332
+
333
+ for i in range(self.start_layer, self.end_layer):
334
+ if i in self.config.bskcn_1:
335
+ bskcn_h_1 = hidden_states.clone()
336
+ bskcn_r_1 = residual.clone() if residual is not None else None
337
+ if i in self.config.bskcn_2:
338
+ bskcn_h_2 = hidden_states.clone()
339
+ bskcn_r_2 = residual.clone() if residual is not None else None
340
+ if i in self.config.bskcn_3:
341
+ hidden_states = bskcn_h_1 * bskcn_tv + hidden_states * (1 - bskcn_tv)
342
+ if bskcn_r_1 is not None and residual is not None:
343
+ residual = bskcn_r_1 * bskcn_tv + residual * (1 - bskcn_tv)
344
+ if i in self.config.bskcn_4:
345
+ hidden_states = bskcn_h_2 * bskcn_tv + hidden_states * (1 - bskcn_tv)
346
+ if bskcn_r_2 is not None and residual is not None:
347
+ residual = bskcn_r_2 * bskcn_tv + residual * (1 - bskcn_tv)
348
+ layer = self.layers[i]
349
+ hidden_states, residual = layer(
350
+ positions=positions,
351
+ hidden_states=hidden_states,
352
+ forward_batch=forward_batch,
353
+ residual=residual,
354
+ )
355
+
356
+ if not self.pp_group().is_last_rank:
357
+ return PPProxyTensors(
358
+ {"hidden_states": hidden_states, "residual": residual}
359
+ )
360
+
361
+ hidden_states, _ = self.norm(hidden_states, residual)
362
+ return hidden_states
363
+
364
+ def load_kv_cache_scales(self, quantization_param_path: str) -> None:
365
+ tp_size = get_tensor_model_parallel_world_size()
366
+ tp_rank = get_tensor_model_parallel_rank()
367
+ for layer_idx, scaling_factor in kv_cache_scales_loader(
368
+ quantization_param_path,
369
+ tp_rank,
370
+ tp_size,
371
+ self.config.num_hidden_layers,
372
+ self.config.__class__.model_type,
373
+ ):
374
+ if not isinstance(self.layers[layer_idx], nn.Identity):
375
+ layer_self_attn = self.layers[layer_idx].self_attn
376
+
377
+ if hasattr(layer_self_attn.attn, "k_scale"):
378
+ layer_self_attn.attn.k_scale = scaling_factor
379
+ layer_self_attn.attn.v_scale = scaling_factor
380
+ else:
381
+ raise RuntimeError(
382
+ "Self attention has no KV cache scaling " "factor attribute!"
383
+ )
384
+
385
+
386
+ class SolarForCausalLM(nn.Module):
387
+
388
+ packed_modules_mapping = {
389
+ "qkv_proj": [
390
+ ("q_proj", "q"),
391
+ ("k_proj", "k"),
392
+ ("v_proj", "v"),
393
+ ],
394
+ "gate_up_proj": [
395
+ ("gate_proj", 0),
396
+ ("up_proj", 1),
397
+ ],
398
+ }
399
+
400
+ default_bitsandbytes_target_modules = [
401
+ ".gate_proj.",
402
+ ".down_proj.",
403
+ ".up_proj.",
404
+ ".q_proj.",
405
+ ".k_proj.",
406
+ ".v_proj.",
407
+ ".o_proj.",
408
+ ]
409
+ column_parallel_weights_modules = [".down_proj.", ".o_proj."]
410
+ bitsandbytes_stacked_params_mapping = {
411
+ ".q_proj": (".qkv_proj", 0),
412
+ ".k_proj": (".qkv_proj", 1),
413
+ ".v_proj": (".qkv_proj", 2),
414
+ ".gate_proj": (".gate_up_proj", 0),
415
+ ".up_proj": (".gate_up_proj", 1),
416
+ }
417
+
418
+ def __init__(
419
+ self,
420
+ config: PretrainedConfig,
421
+ quant_config: Optional[QuantizationConfig] = None,
422
+ prefix: str = "",
423
+ ):
424
+ super().__init__()
425
+ self.pp_group = get_pp_group()
426
+ self.config = config
427
+ self.quant_config = quant_config
428
+ self.model = SolarModel(
429
+ config=config,
430
+ quant_config=self.quant_config,
431
+ prefix=add_prefix("model", prefix),
432
+ )
433
+
434
+ if self.pp_group.is_last_rank:
435
+ self.unpadded_vocab_size = config.vocab_size
436
+ self.lm_head = ParallelLMHead(
437
+ self.unpadded_vocab_size,
438
+ config.hidden_size,
439
+ org_num_embeddings=config.vocab_size,
440
+ padding_size=DEFAULT_VOCAB_PADDING_SIZE,
441
+ quant_config=quant_config,
442
+ )
443
+ if config.tie_word_embeddings and self.pp_group.is_first_rank:
444
+ self.lm_head.weight = self.model.embed_tokens.weight
445
+
446
+ logit_scale = getattr(config, "logit_scale", 1.0)
447
+ self.logits_processor = LogitsProcessor(
448
+ self.unpadded_vocab_size, config.vocab_size, logit_scale
449
+ )
450
+ else:
451
+ self.lm_head = PPMissingLayer()
452
+
453
+ def forward(
454
+ self,
455
+ input_ids: torch.Tensor,
456
+ positions: torch.Tensor,
457
+ forward_batch: ForwardBatch,
458
+ inputs_embeds: Optional[torch.Tensor] = None,
459
+ ) -> Union[torch.Tensor, LogitsProcessorOutput]:
460
+ hidden_states = self.model(
461
+ input_ids=input_ids,
462
+ positions=positions,
463
+ forward_batch=forward_batch,
464
+ inputs_embeds=inputs_embeds,
465
+ )
466
+
467
+ if self.pp_group().is_last_rank:
468
+ logits = self.logits_processor(self.lm_head, hidden_states, forward_batch)
469
+ return logits
470
+
471
+ return hidden_states
472
+
473
+ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
474
+
475
+ params_dict = dict(self.named_parameters())
476
+ for name, loaded_weight in weights:
477
+
478
+ is_packed = False
479
+ for packed_name, sources in self.packed_modules_mapping.items():
480
+ for src_name, shard_id in sources:
481
+ if src_name in name:
482
+
483
+ model_param_name = name.replace(src_name, packed_name)
484
+
485
+ if model_param_name in params_dict:
486
+ param = params_dict[model_param_name]
487
+ weight_loader = getattr(
488
+ param, "weight_loader", default_weight_loader
489
+ )
490
+ weight_loader(param, loaded_weight, shard_id)
491
+ is_packed = True
492
+ break
493
+ if is_packed:
494
+ break
495
+
496
+ if is_packed:
497
+ continue
498
+
499
+ if name in params_dict:
500
+ param = params_dict[name]
501
+ weight_loader = getattr(param, "weight_loader", default_weight_loader)
502
+ weight_loader(param, loaded_weight)
503
+
504
+
505
+ EntryClass = SolarForCausalLM