sglang 0.4.10.post1__py3-none-any.whl → 0.4.10.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. sglang/compile_deep_gemm.py +8 -1
  2. sglang/global_config.py +5 -1
  3. sglang/srt/conversation.py +0 -112
  4. sglang/srt/disaggregation/decode_schedule_batch_mixin.py +1 -0
  5. sglang/srt/disaggregation/prefill.py +1 -0
  6. sglang/srt/distributed/device_communicators/pynccl.py +7 -0
  7. sglang/srt/distributed/device_communicators/pynccl_allocator.py +133 -0
  8. sglang/srt/distributed/device_communicators/pynccl_wrapper.py +42 -3
  9. sglang/srt/distributed/parallel_state.py +11 -0
  10. sglang/srt/entrypoints/engine.py +4 -2
  11. sglang/srt/entrypoints/http_server.py +35 -15
  12. sglang/srt/eplb/expert_distribution.py +4 -2
  13. sglang/srt/hf_transformers_utils.py +25 -10
  14. sglang/srt/layers/attention/cutlass_mla_backend.py +3 -3
  15. sglang/srt/layers/attention/flashattention_backend.py +7 -11
  16. sglang/srt/layers/attention/trtllm_mla_backend.py +6 -6
  17. sglang/srt/layers/attention/vision.py +27 -10
  18. sglang/srt/layers/communicator.py +14 -4
  19. sglang/srt/layers/linear.py +7 -1
  20. sglang/srt/layers/logits_processor.py +9 -1
  21. sglang/srt/layers/moe/ep_moe/layer.py +11 -35
  22. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_3_1/E=128,N=352,device_name=NVIDIA_RTX_6000_Ada_Generation,dtype=fp8_w8a8.json +146 -0
  23. sglang/srt/layers/moe/fused_moe_triton/layer.py +26 -23
  24. sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +0 -31
  25. sglang/srt/layers/moe/token_dispatcher/__init__.py +23 -0
  26. sglang/srt/layers/moe/token_dispatcher/base_dispatcher.py +12 -1
  27. sglang/srt/layers/moe/{ep_moe/token_dispatcher.py → token_dispatcher/deepep.py} +8 -15
  28. sglang/srt/layers/moe/utils.py +43 -0
  29. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +3 -2
  30. sglang/srt/layers/quantization/deep_gemm_wrapper/compile_utils.py +1 -1
  31. sglang/srt/layers/quantization/fp8.py +5 -1
  32. sglang/srt/layers/quantization/fp8_kernel.py +0 -4
  33. sglang/srt/layers/vocab_parallel_embedding.py +7 -1
  34. sglang/srt/lora/lora_registry.py +7 -0
  35. sglang/srt/managers/cache_controller.py +8 -4
  36. sglang/srt/managers/data_parallel_controller.py +52 -2
  37. sglang/srt/managers/io_struct.py +6 -1
  38. sglang/srt/managers/schedule_batch.py +3 -2
  39. sglang/srt/managers/schedule_policy.py +3 -1
  40. sglang/srt/managers/scheduler.py +144 -6
  41. sglang/srt/managers/template_manager.py +25 -22
  42. sglang/srt/managers/tokenizer_manager.py +114 -62
  43. sglang/srt/managers/utils.py +45 -1
  44. sglang/srt/mem_cache/cpp_radix_tree/radix_tree.py +182 -0
  45. sglang/srt/mem_cache/hicache_storage.py +13 -21
  46. sglang/srt/mem_cache/radix_cache_cpp.py +229 -0
  47. sglang/srt/mem_cache/storage/hf3fs/hf3fs_utils.cpp +35 -0
  48. sglang/srt/model_executor/cuda_graph_runner.py +17 -3
  49. sglang/srt/model_executor/forward_batch_info.py +13 -3
  50. sglang/srt/model_executor/model_runner.py +5 -0
  51. sglang/srt/models/deepseek_v2.py +23 -17
  52. sglang/srt/models/glm4_moe.py +82 -19
  53. sglang/srt/models/grok.py +3 -3
  54. sglang/srt/models/llama4.py +13 -2
  55. sglang/srt/models/mixtral.py +3 -3
  56. sglang/srt/models/mllama4.py +428 -19
  57. sglang/srt/models/qwen2_moe.py +1 -4
  58. sglang/srt/models/qwen3_moe.py +7 -8
  59. sglang/srt/models/step3_vl.py +1 -1
  60. sglang/srt/multimodal/processors/base_processor.py +4 -3
  61. sglang/srt/multimodal/processors/gemma3n.py +0 -7
  62. sglang/srt/operations_strategy.py +1 -1
  63. sglang/srt/server_args.py +80 -20
  64. sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +18 -0
  65. sglang/srt/two_batch_overlap.py +6 -4
  66. sglang/srt/utils.py +3 -24
  67. sglang/srt/weight_sync/utils.py +1 -1
  68. sglang/test/runners.py +2 -2
  69. sglang/test/test_utils.py +3 -3
  70. sglang/version.py +1 -1
  71. {sglang-0.4.10.post1.dist-info → sglang-0.4.10.post2.dist-info}/METADATA +3 -2
  72. {sglang-0.4.10.post1.dist-info → sglang-0.4.10.post2.dist-info}/RECORD +80 -74
  73. /sglang/srt/mem_cache/{mooncake_store → storage/mooncake_store}/mooncake_store.py +0 -0
  74. /sglang/srt/mem_cache/{mooncake_store → storage/mooncake_store}/unit_test.py +0 -0
  75. /sglang/srt/mem_cache/{nixl → storage/nixl}/hicache_nixl.py +0 -0
  76. /sglang/srt/mem_cache/{nixl → storage/nixl}/nixl_utils.py +0 -0
  77. /sglang/srt/mem_cache/{nixl → storage/nixl}/test_hicache_nixl_storage.py +0 -0
  78. {sglang-0.4.10.post1.dist-info → sglang-0.4.10.post2.dist-info}/WHEEL +0 -0
  79. {sglang-0.4.10.post1.dist-info → sglang-0.4.10.post2.dist-info}/licenses/LICENSE +0 -0
  80. {sglang-0.4.10.post1.dist-info → sglang-0.4.10.post2.dist-info}/top_level.txt +0 -0
@@ -17,6 +17,7 @@ import time
17
17
 
18
18
  import requests
19
19
 
20
+ from sglang.srt.disaggregation.utils import FAKE_BOOTSTRAP_HOST
20
21
  from sglang.srt.entrypoints.http_server import launch_server
21
22
  from sglang.srt.managers.io_struct import GenerateReqInput
22
23
  from sglang.srt.managers.tokenizer_manager import TokenizerManager
@@ -52,7 +53,9 @@ class CompileArgs:
52
53
 
53
54
 
54
55
  @warmup("compile-deep-gemm")
55
- async def warm_up_compile(tokenizer_manager: TokenizerManager):
56
+ async def warm_up_compile(
57
+ disaggregation_mode: str, tokenizer_manager: TokenizerManager
58
+ ):
56
59
  print("\nGenerate warm up request for compiling DeepGEMM...\n")
57
60
  generate_req_input = GenerateReqInput(
58
61
  input_ids=[0, 1, 2, 3],
@@ -62,6 +65,10 @@ async def warm_up_compile(tokenizer_manager: TokenizerManager):
62
65
  "ignore_eos": True,
63
66
  },
64
67
  )
68
+ if disaggregation_mode != "null":
69
+ generate_req_input.bootstrap_room = 0
70
+ generate_req_input.bootstrap_host = FAKE_BOOTSTRAP_HOST
71
+
65
72
  await tokenizer_manager.generate_request(generate_req_input, None).__anext__()
66
73
 
67
74
 
sglang/global_config.py CHANGED
@@ -30,7 +30,11 @@ class GlobalConfig:
30
30
  self.default_new_token_ratio_decay_steps = float(
31
31
  os.environ.get("SGLANG_NEW_TOKEN_RATIO_DECAY_STEPS", 600)
32
32
  )
33
-
33
+ self.torch_empty_cache_interval = float(
34
+ os.environ.get(
35
+ "SGLANG_EMPTY_CACHE_INTERVAL", -1
36
+ ) # in seconds. Set if you observe high memory accumulation over a long serving period.
37
+ )
34
38
  # Runtime constants: others
35
39
  self.retract_decode_steps = 20
36
40
  self.flashinfer_workspace_size = os.environ.get(
@@ -954,20 +954,6 @@ register_conv_template(
954
954
  )
955
955
  )
956
956
 
957
- register_conv_template(
958
- Conversation(
959
- name="mimo-vl",
960
- system_message="You are MiMo, an AI assistant developed by Xiaomi.",
961
- system_template="<|im_start|>system\n{system_message}",
962
- roles=("<|im_start|>user", "<|im_start|>assistant"),
963
- sep="<|im_end|>\n",
964
- sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
965
- stop_str=["<|im_end|>"],
966
- image_token="<|vision_start|><|image_pad|><|vision_end|>",
967
- )
968
- )
969
-
970
-
971
957
  register_conv_template(
972
958
  Conversation(
973
959
  name="qwen2-audio",
@@ -981,51 +967,11 @@ register_conv_template(
981
967
  )
982
968
  )
983
969
 
984
- register_conv_template(
985
- Conversation(
986
- name="llama_4_vision",
987
- system_message="You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.",
988
- system_template="<|header_start|>system<|header_end|>\n\n{system_message}<|eot|>",
989
- roles=("user", "assistant"),
990
- sep_style=SeparatorStyle.LLAMA4,
991
- sep="",
992
- stop_str="<|eot|>",
993
- image_token="<|image|>",
994
- )
995
- )
996
-
997
- register_conv_template(
998
- Conversation(
999
- name="step3-vl",
1000
- system_message="<|begin▁of▁sentence|>You are a helpful assistant",
1001
- system_template="{system_message}\n",
1002
- roles=(
1003
- "<|BOT|>user\n",
1004
- "<|BOT|>assistant\n<think>\n",
1005
- ),
1006
- sep="<|EOT|>",
1007
- sep_style=SeparatorStyle.NO_COLON_SINGLE,
1008
- stop_str="<|EOT|>",
1009
- image_token="<im_patch>",
1010
- # add_bos=True,
1011
- )
1012
- )
1013
-
1014
970
 
1015
971
  @register_conv_template_matching_function
1016
972
  def match_internvl(model_path: str):
1017
973
  if re.search(r"internvl", model_path, re.IGNORECASE):
1018
974
  return "internvl-2-5"
1019
- if re.search(r"intern.*s1", model_path, re.IGNORECASE):
1020
- return "interns1"
1021
-
1022
-
1023
- @register_conv_template_matching_function
1024
- def match_llama_vision(model_path: str):
1025
- if re.search(r"llama.*3\.2.*vision", model_path, re.IGNORECASE):
1026
- return "llama_3_vision"
1027
- if re.search(r"llama.*4.*", model_path, re.IGNORECASE):
1028
- return "llama_4_vision"
1029
975
 
1030
976
 
1031
977
  @register_conv_template_matching_function
@@ -1040,22 +986,6 @@ def match_vicuna(model_path: str):
1040
986
  return "vicuna_v1.1"
1041
987
 
1042
988
 
1043
- @register_conv_template_matching_function
1044
- def match_llama2_chat(model_path: str):
1045
- if re.search(
1046
- r"llama-2.*chat|codellama.*instruct",
1047
- model_path,
1048
- re.IGNORECASE,
1049
- ):
1050
- return "llama-2"
1051
-
1052
-
1053
- @register_conv_template_matching_function
1054
- def match_mistral(model_path: str):
1055
- if re.search(r"pixtral|(mistral|mixtral).*instruct", model_path, re.IGNORECASE):
1056
- return "mistral"
1057
-
1058
-
1059
989
  @register_conv_template_matching_function
1060
990
  def match_deepseek_vl(model_path: str):
1061
991
  if re.search(r"deepseek.*vl2", model_path, re.IGNORECASE):
@@ -1064,12 +994,6 @@ def match_deepseek_vl(model_path: str):
1064
994
 
1065
995
  @register_conv_template_matching_function
1066
996
  def match_qwen_chat_ml(model_path: str):
1067
- if re.search(r"gme.*qwen.*vl", model_path, re.IGNORECASE):
1068
- return "gme-qwen2-vl"
1069
- if re.search(r"qwen.*vl", model_path, re.IGNORECASE):
1070
- return "qwen2-vl"
1071
- if re.search(r"qwen.*audio", model_path, re.IGNORECASE):
1072
- return "qwen2-audio"
1073
997
  if re.search(
1074
998
  r"llava-v1\.6-34b|llava-v1\.6-yi-34b|llava-next-video-34b|llava-onevision-qwen2",
1075
999
  model_path,
@@ -1078,12 +1002,6 @@ def match_qwen_chat_ml(model_path: str):
1078
1002
  return "chatml-llava"
1079
1003
 
1080
1004
 
1081
- @register_conv_template_matching_function
1082
- def match_gemma3_instruct(model_path: str):
1083
- if re.search(r"gemma-3.*it", model_path, re.IGNORECASE):
1084
- return "gemma-it"
1085
-
1086
-
1087
1005
  @register_conv_template_matching_function
1088
1006
  def match_openbmb_minicpm(model_path: str):
1089
1007
  if re.search(r"minicpm-v", model_path, re.IGNORECASE):
@@ -1092,37 +1010,7 @@ def match_openbmb_minicpm(model_path: str):
1092
1010
  return "minicpmo"
1093
1011
 
1094
1012
 
1095
- @register_conv_template_matching_function
1096
- def match_moonshot_kimivl(model_path: str):
1097
- if re.search(r"kimi.*vl", model_path, re.IGNORECASE):
1098
- return "kimi-vl"
1099
-
1100
-
1101
- @register_conv_template_matching_function
1102
- def match_devstral(model_path: str):
1103
- if re.search(r"devstral", model_path, re.IGNORECASE):
1104
- return "devstral"
1105
-
1106
-
1107
1013
  @register_conv_template_matching_function
1108
1014
  def match_phi_4_mm(model_path: str):
1109
1015
  if "phi-4-multimodal" in model_path.lower():
1110
1016
  return "phi-4-mm"
1111
-
1112
-
1113
- @register_conv_template_matching_function
1114
- def match_vila(model_path: str):
1115
- if re.search(r"vila", model_path, re.IGNORECASE):
1116
- return "chatml"
1117
-
1118
-
1119
- @register_conv_template_matching_function
1120
- def match_mimo_vl(model_path: str):
1121
- if re.search(r"mimo.*vl", model_path, re.IGNORECASE):
1122
- return "mimo-vl"
1123
-
1124
-
1125
- # @register_conv_template_matching_function
1126
- # def match_step3(model_path: str):
1127
- # if re.search(r"step3", model_path, re.IGNORECASE):
1128
- # return "step3-vl"
@@ -88,6 +88,7 @@ class ScheduleBatchDisaggregationDecodeMixin:
88
88
  self.extend_lens = [r.extend_input_len for r in reqs]
89
89
  self.extend_logprob_start_lens = [r.extend_logprob_start_len for r in reqs]
90
90
  self.extend_input_logprob_token_ids = extend_input_logprob_token_ids
91
+ self.multimodal_inputs = [r.multimodal_inputs for r in reqs]
91
92
 
92
93
  # Build sampling info
93
94
  self.sampling_info = SamplingBatchInfo.from_schedule_batch(
@@ -460,6 +460,7 @@ class SchedulerDisaggregationPrefillMixin:
460
460
 
461
461
  # We need to remove the sync in the following function for overlap schedule.
462
462
  self.set_next_batch_sampling_info_done(batch)
463
+ self.maybe_send_health_check_signal()
463
464
 
464
465
  def process_disagg_prefill_inflight_queue(
465
466
  self: Scheduler, rids_to_check: Optional[List[str]] = None
@@ -75,6 +75,7 @@ class PyNcclCommunicator:
75
75
  self.available = True
76
76
  self.disabled = False
77
77
 
78
+ self.nccl_version = self.nccl.ncclGetRawVersion()
78
79
  if self.rank == 0:
79
80
  logger.info("sglang is using nccl==%s", self.nccl.ncclGetVersion())
80
81
 
@@ -259,6 +260,12 @@ class PyNcclCommunicator:
259
260
  cudaStream_t(stream.cuda_stream),
260
261
  )
261
262
 
263
+ def register_comm_window_raw(self, ptr: int, size: int):
264
+ return self.nccl.ncclCommWindowRegister(self.comm, buffer_type(ptr), size, 1)
265
+
266
+ def deregister_comm_window(self, window):
267
+ return self.nccl.ncclCommWindowDeregister(self.comm, window)
268
+
262
269
  @contextmanager
263
270
  def change_state(
264
271
  self, enable: Optional[bool] = None, stream: Optional[torch.cuda.Stream] = None
@@ -0,0 +1,133 @@
1
+ import tempfile
2
+
3
+ import torch
4
+ from packaging import version
5
+ from torch.cuda.memory import CUDAPluggableAllocator
6
+
7
+ from sglang.srt.distributed.parallel_state import GroupCoordinator
8
+ from sglang.srt.managers.schedule_batch import global_server_args_dict
9
+
10
+ nccl_allocator_source = """
11
+ #include <nccl.h>
12
+ extern "C" {
13
+
14
+ void* nccl_alloc_plug(size_t size, int device, void* stream) {
15
+ void* ptr;
16
+ ncclResult_t err = ncclMemAlloc(&ptr, size);
17
+ return ptr;
18
+
19
+ }
20
+
21
+ void nccl_free_plug(void* ptr, size_t size, int device, void* stream) {
22
+ ncclResult_t err = ncclMemFree(ptr);
23
+ }
24
+
25
+ }
26
+ """
27
+
28
+ _allocator = None
29
+ _mem_pool = None
30
+ _registered_base_addrs = set()
31
+ _graph_pool_id = None
32
+
33
+
34
+ def is_symmetric_memory_enabled():
35
+ return global_server_args_dict["enable_symm_mem"]
36
+
37
+
38
+ def set_graph_pool_id(graph_pool_id):
39
+ global _graph_pool_id
40
+ _graph_pool_id = graph_pool_id
41
+
42
+
43
+ def get_nccl_mem_pool():
44
+ global _allocator, _mem_pool
45
+ if _mem_pool is None:
46
+ out_dir = tempfile.gettempdir()
47
+ nccl_allocator_libname = "nccl_allocator"
48
+ torch.utils.cpp_extension.load_inline(
49
+ name=nccl_allocator_libname,
50
+ cpp_sources=nccl_allocator_source,
51
+ with_cuda=True,
52
+ extra_ldflags=["-lnccl"],
53
+ verbose=True,
54
+ is_python_module=False,
55
+ build_directory=out_dir,
56
+ )
57
+ _allocator = CUDAPluggableAllocator(
58
+ f"{out_dir}/{nccl_allocator_libname}.so",
59
+ "nccl_alloc_plug",
60
+ "nccl_free_plug",
61
+ ).allocator()
62
+ _mem_pool = torch.cuda.MemPool(_allocator)
63
+ return _mem_pool
64
+
65
+
66
+ class use_symmetric_memory:
67
+ def __init__(self, group_coordinator: GroupCoordinator):
68
+ if not is_symmetric_memory_enabled():
69
+ self.group_coordinator = None
70
+ self._mem_pool_ctx = None
71
+ self.is_graph_capture = None
72
+ self.device = None
73
+ self.pre_2_8_0 = None
74
+ else:
75
+ self.group_coordinator = group_coordinator
76
+ self._mem_pool_ctx = torch.cuda.use_mem_pool(get_nccl_mem_pool())
77
+ self.is_graph_capture = torch.cuda.is_current_stream_capturing()
78
+ self.device = torch.cuda.current_device()
79
+ self.pre_2_8_0 = version.parse(torch.__version__) < version.parse("2.8.0")
80
+
81
+ def __enter__(self):
82
+ if not is_symmetric_memory_enabled():
83
+ return self
84
+ assert (
85
+ self.group_coordinator.pynccl_comm is not None
86
+ ), f"Symmetric memory requires pynccl to be enabled in group '{self.group_coordinator.group_name}'"
87
+ assert (
88
+ self.group_coordinator.pynccl_comm.nccl_version >= 22703
89
+ ), "NCCL version 2.27.3 or higher is required for NCCL symmetric memory"
90
+ if self.is_graph_capture:
91
+ assert (
92
+ _graph_pool_id is not None
93
+ ), "graph_pool_id is not set under graph capture"
94
+ # Pause graph memory pool to use symmetric memory with cuda graph
95
+ if self.pre_2_8_0:
96
+ torch._C._cuda_endAllocateCurrentStreamToPool(
97
+ self.device, _graph_pool_id
98
+ )
99
+ else:
100
+ torch._C._cuda_endAllocateToPool(self.device, _graph_pool_id)
101
+ self._mem_pool_ctx.__enter__()
102
+ return self
103
+
104
+ def tag(self, tensor: torch.Tensor):
105
+ if not is_symmetric_memory_enabled():
106
+ return
107
+ tensor.symmetric_memory = True
108
+
109
+ def __exit__(self, exc_type, exc_val, exc_tb):
110
+ if not is_symmetric_memory_enabled():
111
+ return
112
+ global _registered_base_addrs
113
+ self._mem_pool_ctx.__exit__(exc_type, exc_val, exc_tb)
114
+ for segment in get_nccl_mem_pool().snapshot():
115
+ if segment["address"] not in _registered_base_addrs:
116
+ if segment["stream"] == 0 and self.pre_2_8_0:
117
+ # PyTorch version < 2.8.0 has a multi-thread MemPool bug
118
+ # See https://github.com/pytorch/pytorch/issues/152861
119
+ # Fixed at https://github.com/pytorch/pytorch/commit/f01e628e3b31852983ab30b25bf251f557ba9c0b
120
+ # WAR is to skip allocations on the default stream since the forward_pass thread always runs on a custom stream
121
+ continue
122
+ self.group_coordinator.pynccl_comm.register_comm_window_raw(
123
+ segment["address"], segment["total_size"]
124
+ )
125
+ _registered_base_addrs.add(segment["address"])
126
+
127
+ if self.is_graph_capture:
128
+ if self.pre_2_8_0:
129
+ torch._C._cuda_beginAllocateToPool(self.device, _graph_pool_id)
130
+ else:
131
+ torch._C._cuda_beginAllocateCurrentThreadToPool(
132
+ self.device, _graph_pool_id
133
+ )
@@ -67,6 +67,7 @@ def find_nccl_library() -> str:
67
67
 
68
68
  ncclResult_t = ctypes.c_int
69
69
  ncclComm_t = ctypes.c_void_p
70
+ ncclWindow_t = ctypes.c_void_p
70
71
 
71
72
 
72
73
  class ncclUniqueId(ctypes.Structure):
@@ -279,6 +280,23 @@ class NCCLLibrary:
279
280
  Function("ncclCommDestroy", ncclResult_t, [ncclComm_t]),
280
281
  ]
281
282
 
283
+ exported_functions_symm_mem = [
284
+ # ncclResult_t ncclCommWindowRegister(ncclComm_t comm, void* buff, size_t size, ncclWindow_t* win, int winFlags);
285
+ Function(
286
+ "ncclCommWindowRegister",
287
+ ncclResult_t,
288
+ [
289
+ ncclComm_t,
290
+ buffer_type,
291
+ ctypes.c_size_t,
292
+ ctypes.POINTER(ncclWindow_t),
293
+ ctypes.c_int,
294
+ ],
295
+ ),
296
+ # ncclResult_t ncclCommWindowDeregister(ncclComm_t comm, ncclWindow_t win);
297
+ Function("ncclCommWindowDeregister", ncclResult_t, [ncclComm_t, ncclWindow_t]),
298
+ ]
299
+
282
300
  # class attribute to store the mapping from the path to the library
283
301
  # to avoid loading the same library multiple times
284
302
  path_to_library_cache: Dict[str, Any] = {}
@@ -312,7 +330,10 @@ class NCCLLibrary:
312
330
 
313
331
  if so_file not in NCCLLibrary.path_to_dict_mapping:
314
332
  _funcs: Dict[str, Any] = {}
315
- for func in NCCLLibrary.exported_functions:
333
+ exported_functions = NCCLLibrary.exported_functions
334
+ if hasattr(self.lib, "ncclCommWindowRegister"):
335
+ exported_functions.extend(NCCLLibrary.exported_functions_symm_mem)
336
+ for func in exported_functions:
316
337
  f = getattr(self.lib, func.name)
317
338
  f.restype = func.restype
318
339
  f.argtypes = func.argtypes
@@ -328,10 +349,14 @@ class NCCLLibrary:
328
349
  error_str = self.ncclGetErrorString(result)
329
350
  raise RuntimeError(f"NCCL error: {error_str}")
330
351
 
331
- def ncclGetVersion(self) -> str:
352
+ def ncclGetRawVersion(self) -> int:
332
353
  version = ctypes.c_int()
333
354
  self.NCCL_CHECK(self._funcs["ncclGetVersion"](ctypes.byref(version)))
334
- version_str = str(version.value)
355
+ # something like 21903
356
+ return version.value
357
+
358
+ def ncclGetVersion(self) -> str:
359
+ version_str = str(self.ncclGetRawVersion())
335
360
  # something like 21903 --> "2.19.3"
336
361
  major = version_str[0].lstrip("0")
337
362
  minor = version_str[1:3].lstrip("0")
@@ -460,6 +485,20 @@ class NCCLLibrary:
460
485
  def ncclCommDestroy(self, comm: ncclComm_t) -> None:
461
486
  self.NCCL_CHECK(self._funcs["ncclCommDestroy"](comm))
462
487
 
488
+ def ncclCommWindowRegister(
489
+ self, comm: ncclComm_t, buff: buffer_type, size: int, win_flags: int
490
+ ) -> ncclWindow_t:
491
+ window = ncclWindow_t()
492
+ self.NCCL_CHECK(
493
+ self._funcs["ncclCommWindowRegister"](
494
+ comm, buff, size, ctypes.byref(window), win_flags
495
+ )
496
+ )
497
+ return window
498
+
499
+ def ncclCommWindowDeregister(self, comm: ncclComm_t, window: ncclWindow_t) -> None:
500
+ self.NCCL_CHECK(self._funcs["ncclCommWindowDeregister"](comm, window))
501
+
463
502
 
464
503
  __all__ = [
465
504
  "NCCLLibrary",
@@ -497,6 +497,17 @@ class GroupCoordinator:
497
497
  if self.npu_communicator is not None and not self.npu_communicator.disabled:
498
498
  return self.npu_communicator.all_reduce(input_)
499
499
 
500
+ if (
501
+ self.pynccl_comm is not None
502
+ and hasattr(input_, "symmetric_memory")
503
+ and input_.symmetric_memory
504
+ ):
505
+ with self.pynccl_comm.change_state(
506
+ enable=True, stream=torch.cuda.current_stream()
507
+ ):
508
+ self.pynccl_comm.all_reduce(input_)
509
+ return input_
510
+
500
511
  outplace_all_reduce_method = None
501
512
  if (
502
513
  self.qr_comm is not None
@@ -623,8 +623,9 @@ class Engine(EngineBase):
623
623
  def _set_envs_and_config(server_args: ServerArgs):
624
624
  # Set global environments
625
625
  os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
626
- os.environ["NCCL_CUMEM_ENABLE"] = "0"
627
- os.environ["NCCL_NVLS_ENABLE"] = str(int(server_args.enable_nccl_nvls))
626
+ os.environ["NCCL_CUMEM_ENABLE"] = str(int(server_args.enable_symm_mem))
627
+ if not server_args.enable_symm_mem:
628
+ os.environ["NCCL_NVLS_ENABLE"] = str(int(server_args.enable_nccl_nvls))
628
629
  os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
629
630
  os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4"
630
631
  os.environ["CUDA_MODULE_LOADING"] = "AUTO"
@@ -731,6 +732,7 @@ def _launch_subprocesses(
731
732
  pp_rank,
732
733
  None,
733
734
  writer,
735
+ None,
734
736
  ),
735
737
  )
736
738
 
@@ -45,6 +45,7 @@ from fastapi.responses import ORJSONResponse, Response, StreamingResponse
45
45
 
46
46
  from sglang.srt.disaggregation.utils import (
47
47
  FAKE_BOOTSTRAP_HOST,
48
+ DisaggregationMode,
48
49
  register_disaggregation_server,
49
50
  )
50
51
  from sglang.srt.entrypoints.engine import _launch_subprocesses
@@ -88,7 +89,7 @@ from sglang.srt.managers.io_struct import (
88
89
  VertexGenerateReqInput,
89
90
  )
90
91
  from sglang.srt.managers.template_manager import TemplateManager
91
- from sglang.srt.managers.tokenizer_manager import TokenizerManager
92
+ from sglang.srt.managers.tokenizer_manager import ServerStatus, TokenizerManager
92
93
  from sglang.srt.metrics.func_timer import enable_func_timer
93
94
  from sglang.srt.reasoning_parser import ReasoningParser
94
95
  from sglang.srt.server_args import ServerArgs
@@ -230,23 +231,28 @@ async def validate_json_request(raw_request: Request):
230
231
 
231
232
 
232
233
  @app.get("/health")
233
- async def health() -> Response:
234
- """Check the health of the http server."""
235
- return Response(status_code=200)
236
-
237
-
238
234
  @app.get("/health_generate")
239
235
  async def health_generate(request: Request) -> Response:
240
- """Check the health of the inference server by generating one token."""
236
+ """
237
+ Check the health of the inference server by sending a special request to generate one token.
238
+
239
+ If the server is running something, this request will be ignored, so it creates zero overhead.
240
+ If the server is not running anything, this request will be run, so we know whether the server is healthy.
241
+ """
242
+
241
243
  if _global_state.tokenizer_manager.gracefully_exit:
242
244
  logger.info("Health check request received during shutdown. Returning 503.")
243
245
  return Response(status_code=503)
244
246
 
247
+ if not _global_state.tokenizer_manager.server_status.is_healthy():
248
+ return Response(status_code=503)
249
+
245
250
  sampling_params = {"max_new_tokens": 1, "temperature": 0.0}
246
251
  rid = f"HEALTH_CHECK_{time.time()}"
247
252
 
248
253
  if _global_state.tokenizer_manager.is_image_gen:
249
- raise NotImplementedError()
254
+ # Keep this branch for some internal use cases.
255
+ raise NotImplementedError("Image generation is not supported yet.")
250
256
  elif _global_state.tokenizer_manager.is_generation:
251
257
  gri = GenerateReqInput(
252
258
  rid=rid,
@@ -254,6 +260,12 @@ async def health_generate(request: Request) -> Response:
254
260
  sampling_params=sampling_params,
255
261
  log_metrics=False,
256
262
  )
263
+ if (
264
+ _global_state.tokenizer_manager.server_args.disaggregation_mode
265
+ != DisaggregationMode.NULL
266
+ ):
267
+ gri.bootstrap_host = FAKE_BOOTSTRAP_HOST
268
+ gri.bootstrap_room = 0
257
269
  else:
258
270
  gri = EmbeddingReqInput(
259
271
  rid=rid, input_ids=[0], sampling_params=sampling_params, log_metrics=False
@@ -263,9 +275,6 @@ async def health_generate(request: Request) -> Response:
263
275
  async for _ in _global_state.tokenizer_manager.generate_request(gri, request):
264
276
  break
265
277
 
266
- # This request is a special request.
267
- # If the server already has something running, this request will be ignored, so it creates zero overhead.
268
- # If the server is not running, this request will be run, so we know whether the server is healthy.
269
278
  task = asyncio.create_task(gen())
270
279
 
271
280
  # As long as we receive any response from the detokenizer/scheduler, we consider the server is healthy.
@@ -1032,8 +1041,10 @@ def _execute_server_warmup(
1032
1041
  timeout=600,
1033
1042
  )
1034
1043
  assert res.status_code == 200, f"{res}"
1044
+ _global_state.tokenizer_manager.server_status = ServerStatus.Up
1045
+
1035
1046
  else:
1036
- logger.info(f"Start of prefill warmup ...")
1047
+ logger.info(f"Start of pd disaggregation warmup ...")
1037
1048
  json_data = {
1038
1049
  "sampling_params": {
1039
1050
  "temperature": 0.0,
@@ -1055,9 +1066,18 @@ def _execute_server_warmup(
1055
1066
  headers=headers,
1056
1067
  timeout=1800, # because of deep gemm precache is very long if not precache.
1057
1068
  )
1058
- logger.info(
1059
- f"End of prefill warmup with status {res.status_code}, resp: {res.json()}"
1060
- )
1069
+ if res.status_code == 200:
1070
+ logger.info(
1071
+ f"End of prefill disaggregation mode warmup with status {res.status_code}, resp: {res.json()}"
1072
+ )
1073
+ _global_state.tokenizer_manager.server_status = ServerStatus.Up
1074
+ else:
1075
+ logger.info(
1076
+ "Prefill disaggregation mode warm Up Failed, status code: {}".format(
1077
+ res.status_code
1078
+ )
1079
+ )
1080
+ _global_state.tokenizer_manager.server_status = ServerStatus.UnHealthy
1061
1081
 
1062
1082
  except Exception:
1063
1083
  last_traceback = get_exception_traceback()
@@ -288,12 +288,14 @@ class _SinglePassGatherer(ABC):
288
288
  )
289
289
 
290
290
  if server_args.expert_distribution_recorder_mode == "stat_approx":
291
- if server_args.enable_deepep_moe and (server_args.deepep_mode == "normal"):
291
+ if server_args.moe_a2a_backend is not None and (
292
+ server_args.deepep_mode == "normal"
293
+ ):
292
294
  return _DeepepNormalSinglePassGatherer(expert_location_metadata, rank)
293
295
  else:
294
296
  raise NotImplementedError
295
297
 
296
- if server_args.enable_deepep_moe:
298
+ if server_args.moe_a2a_backend is not None:
297
299
  if server_args.deepep_mode == "normal":
298
300
  return _SelectExpertsSinglePassGatherer(expert_location_metadata, rank)
299
301
  elif server_args.deepep_mode == "low_latency":
@@ -14,7 +14,6 @@
14
14
  """Utilities for Huggingface Transformers."""
15
15
 
16
16
  import contextlib
17
- import logging
18
17
  import os
19
18
  import warnings
20
19
  from pathlib import Path
@@ -45,7 +44,7 @@ from sglang.srt.configs import (
45
44
  )
46
45
  from sglang.srt.configs.internvl import InternVLChatConfig
47
46
  from sglang.srt.connector import create_remote_connector
48
- from sglang.srt.utils import is_remote_url, lru_cache_frozenset
47
+ from sglang.srt.utils import is_remote_url, logger, lru_cache_frozenset
49
48
 
50
49
  _CONFIG_REGISTRY: Dict[str, Type[PretrainedConfig]] = {
51
50
  ChatGLMConfig.model_type: ChatGLMConfig,
@@ -317,15 +316,31 @@ def get_processor(
317
316
 
318
317
  if config.model_type not in {"llava", "clip"}:
319
318
  kwargs["use_fast"] = use_fast
319
+ try:
320
+ processor = AutoProcessor.from_pretrained(
321
+ tokenizer_name,
322
+ *args,
323
+ trust_remote_code=trust_remote_code,
324
+ revision=revision,
325
+ **kwargs,
326
+ )
320
327
 
321
- processor = AutoProcessor.from_pretrained(
322
- tokenizer_name,
323
- *args,
324
- trust_remote_code=trust_remote_code,
325
- revision=revision,
326
- **kwargs,
327
- )
328
-
328
+ except ValueError as e:
329
+ error_message = str(e)
330
+ if "does not have a slow version" in error_message:
331
+ logger.info(
332
+ f"Processor {tokenizer_name} does not have a slow version. Automatically use fast version"
333
+ )
334
+ kwargs["use_fast"] = True
335
+ processor = AutoProcessor.from_pretrained(
336
+ tokenizer_name,
337
+ *args,
338
+ trust_remote_code=trust_remote_code,
339
+ revision=revision,
340
+ **kwargs,
341
+ )
342
+ else:
343
+ raise e
329
344
  tokenizer = get_tokenizer_from_processor(processor)
330
345
 
331
346
  attach_additional_stop_token_ids(tokenizer)