sglang 0.4.1.post6__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. sglang/__init__.py +21 -23
  2. sglang/api.py +2 -7
  3. sglang/bench_offline_throughput.py +41 -27
  4. sglang/bench_one_batch.py +60 -4
  5. sglang/bench_one_batch_server.py +1 -1
  6. sglang/bench_serving.py +83 -71
  7. sglang/lang/backend/runtime_endpoint.py +183 -4
  8. sglang/lang/chat_template.py +46 -4
  9. sglang/launch_server.py +1 -1
  10. sglang/srt/_custom_ops.py +80 -42
  11. sglang/srt/configs/device_config.py +1 -1
  12. sglang/srt/configs/load_config.py +1 -0
  13. sglang/srt/configs/model_config.py +1 -0
  14. sglang/srt/constrained/base_grammar_backend.py +21 -0
  15. sglang/srt/constrained/xgrammar_backend.py +8 -4
  16. sglang/srt/conversation.py +14 -1
  17. sglang/srt/distributed/__init__.py +3 -3
  18. sglang/srt/distributed/communication_op.py +2 -1
  19. sglang/srt/distributed/device_communicators/cuda_wrapper.py +2 -1
  20. sglang/srt/distributed/device_communicators/custom_all_reduce.py +112 -42
  21. sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +2 -2
  22. sglang/srt/distributed/device_communicators/hpu_communicator.py +2 -1
  23. sglang/srt/distributed/device_communicators/pynccl.py +80 -1
  24. sglang/srt/distributed/device_communicators/pynccl_wrapper.py +112 -2
  25. sglang/srt/distributed/device_communicators/shm_broadcast.py +5 -72
  26. sglang/srt/distributed/device_communicators/xpu_communicator.py +2 -1
  27. sglang/srt/distributed/parallel_state.py +1 -1
  28. sglang/srt/distributed/utils.py +2 -1
  29. sglang/srt/entrypoints/engine.py +452 -0
  30. sglang/srt/entrypoints/http_server.py +603 -0
  31. sglang/srt/function_call_parser.py +494 -0
  32. sglang/srt/layers/activation.py +8 -8
  33. sglang/srt/layers/attention/flashinfer_backend.py +10 -9
  34. sglang/srt/layers/attention/triton_backend.py +4 -6
  35. sglang/srt/layers/attention/vision.py +204 -0
  36. sglang/srt/layers/dp_attention.py +71 -0
  37. sglang/srt/layers/layernorm.py +5 -5
  38. sglang/srt/layers/linear.py +65 -14
  39. sglang/srt/layers/logits_processor.py +49 -64
  40. sglang/srt/layers/moe/ep_moe/layer.py +24 -16
  41. sglang/srt/layers/moe/fused_moe_native.py +84 -1
  42. sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  43. sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +27 -7
  44. sglang/srt/layers/moe/fused_moe_triton/layer.py +38 -5
  45. sglang/srt/layers/parameter.py +18 -8
  46. sglang/srt/layers/quantization/__init__.py +20 -23
  47. sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  48. sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  49. sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  50. sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  51. sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  52. sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  53. sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  54. sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  55. sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  56. sglang/srt/layers/quantization/fp8.py +10 -4
  57. sglang/srt/layers/quantization/modelopt_quant.py +1 -2
  58. sglang/srt/layers/quantization/w8a8_int8.py +1 -1
  59. sglang/srt/layers/radix_attention.py +2 -2
  60. sglang/srt/layers/rotary_embedding.py +1184 -31
  61. sglang/srt/layers/sampler.py +64 -6
  62. sglang/srt/layers/torchao_utils.py +12 -6
  63. sglang/srt/layers/vocab_parallel_embedding.py +2 -2
  64. sglang/srt/lora/lora.py +1 -9
  65. sglang/srt/managers/configure_logging.py +3 -0
  66. sglang/srt/managers/data_parallel_controller.py +79 -72
  67. sglang/srt/managers/detokenizer_manager.py +24 -6
  68. sglang/srt/managers/image_processor.py +158 -2
  69. sglang/srt/managers/io_struct.py +57 -3
  70. sglang/srt/managers/schedule_batch.py +78 -45
  71. sglang/srt/managers/schedule_policy.py +26 -12
  72. sglang/srt/managers/scheduler.py +326 -201
  73. sglang/srt/managers/session_controller.py +1 -0
  74. sglang/srt/managers/tokenizer_manager.py +210 -121
  75. sglang/srt/managers/tp_worker.py +6 -4
  76. sglang/srt/managers/tp_worker_overlap_thread.py +5 -8
  77. sglang/srt/managers/utils.py +44 -0
  78. sglang/srt/mem_cache/memory_pool.py +10 -32
  79. sglang/srt/metrics/collector.py +15 -6
  80. sglang/srt/model_executor/cuda_graph_runner.py +26 -30
  81. sglang/srt/model_executor/forward_batch_info.py +5 -7
  82. sglang/srt/model_executor/model_runner.py +44 -19
  83. sglang/srt/model_loader/loader.py +83 -6
  84. sglang/srt/model_loader/weight_utils.py +145 -6
  85. sglang/srt/models/baichuan.py +6 -6
  86. sglang/srt/models/chatglm.py +2 -2
  87. sglang/srt/models/commandr.py +17 -5
  88. sglang/srt/models/dbrx.py +13 -5
  89. sglang/srt/models/deepseek.py +3 -3
  90. sglang/srt/models/deepseek_v2.py +11 -11
  91. sglang/srt/models/exaone.py +2 -2
  92. sglang/srt/models/gemma.py +2 -2
  93. sglang/srt/models/gemma2.py +15 -25
  94. sglang/srt/models/gpt2.py +3 -5
  95. sglang/srt/models/gpt_bigcode.py +1 -1
  96. sglang/srt/models/granite.py +2 -2
  97. sglang/srt/models/grok.py +4 -3
  98. sglang/srt/models/internlm2.py +2 -2
  99. sglang/srt/models/llama.py +7 -5
  100. sglang/srt/models/minicpm.py +2 -2
  101. sglang/srt/models/minicpm3.py +9 -9
  102. sglang/srt/models/minicpmv.py +1238 -0
  103. sglang/srt/models/mixtral.py +3 -3
  104. sglang/srt/models/mixtral_quant.py +3 -3
  105. sglang/srt/models/mllama.py +2 -2
  106. sglang/srt/models/olmo.py +3 -3
  107. sglang/srt/models/olmo2.py +4 -4
  108. sglang/srt/models/olmoe.py +7 -13
  109. sglang/srt/models/phi3_small.py +2 -2
  110. sglang/srt/models/qwen.py +2 -2
  111. sglang/srt/models/qwen2.py +41 -4
  112. sglang/srt/models/qwen2_moe.py +3 -3
  113. sglang/srt/models/qwen2_vl.py +22 -122
  114. sglang/srt/models/stablelm.py +2 -2
  115. sglang/srt/models/torch_native_llama.py +20 -7
  116. sglang/srt/models/xverse.py +6 -6
  117. sglang/srt/models/xverse_moe.py +6 -6
  118. sglang/srt/openai_api/adapter.py +139 -37
  119. sglang/srt/openai_api/protocol.py +7 -4
  120. sglang/srt/sampling/custom_logit_processor.py +38 -0
  121. sglang/srt/sampling/penaltylib/penalizers/repetition_penalty.py +11 -14
  122. sglang/srt/sampling/sampling_batch_info.py +143 -18
  123. sglang/srt/sampling/sampling_params.py +3 -1
  124. sglang/srt/server.py +4 -1090
  125. sglang/srt/server_args.py +77 -15
  126. sglang/srt/speculative/eagle_utils.py +37 -15
  127. sglang/srt/speculative/eagle_worker.py +11 -13
  128. sglang/srt/utils.py +164 -129
  129. sglang/test/runners.py +8 -13
  130. sglang/test/test_programs.py +2 -1
  131. sglang/test/test_utils.py +83 -22
  132. sglang/utils.py +12 -2
  133. sglang/version.py +1 -1
  134. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/METADATA +21 -10
  135. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/RECORD +138 -123
  136. sglang/launch_server_llavavid.py +0 -25
  137. sglang/srt/constrained/__init__.py +0 -16
  138. sglang/srt/distributed/device_communicators/__init__.py +0 -0
  139. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/LICENSE +0 -0
  140. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/WHEEL +0 -0
  141. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,452 @@
1
+ # Copyright 2023-2024 SGLang Team
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ==============================================================================
14
+ """
15
+ The entry point of inference server. (SRT = SGLang Runtime)
16
+
17
+ This file implements python APIs for the inference engine.
18
+ """
19
+
20
+ import asyncio
21
+ import atexit
22
+ import dataclasses
23
+ import logging
24
+ import multiprocessing as mp
25
+ import os
26
+ import signal
27
+ import threading
28
+ from typing import AsyncIterator, Dict, Iterator, List, Optional, Tuple, Union
29
+
30
+ # Fix a bug of Python threading
31
+ setattr(threading, "_register_atexit", lambda *args, **kwargs: None)
32
+
33
+ import torch
34
+ import uvloop
35
+
36
+ from sglang.srt.managers.data_parallel_controller import (
37
+ run_data_parallel_controller_process,
38
+ )
39
+ from sglang.srt.managers.detokenizer_manager import run_detokenizer_process
40
+ from sglang.srt.managers.io_struct import (
41
+ EmbeddingReqInput,
42
+ GenerateReqInput,
43
+ GetWeightsByNameReqInput,
44
+ InitWeightsUpdateGroupReqInput,
45
+ ReleaseMemoryOccupationReqInput,
46
+ ResumeMemoryOccupationReqInput,
47
+ UpdateWeightsFromDistributedReqInput,
48
+ UpdateWeightsFromTensorReqInput,
49
+ )
50
+ from sglang.srt.managers.scheduler import run_scheduler_process
51
+ from sglang.srt.managers.tokenizer_manager import TokenizerManager
52
+ from sglang.srt.openai_api.adapter import load_chat_template_for_openai_api
53
+ from sglang.srt.server_args import PortArgs, ServerArgs
54
+ from sglang.srt.torch_memory_saver_adapter import TorchMemorySaverAdapter
55
+ from sglang.srt.utils import (
56
+ MultiprocessingSerializer,
57
+ assert_pkg_version,
58
+ configure_logger,
59
+ kill_process_tree,
60
+ launch_dummy_health_check_server,
61
+ maybe_set_triton_cache_manager,
62
+ prepare_model_and_tokenizer,
63
+ set_prometheus_multiproc_dir,
64
+ set_ulimit,
65
+ )
66
+ from sglang.version import __version__
67
+
68
+ logger = logging.getLogger(__name__)
69
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
70
+
71
+
72
+ class Engine:
73
+ """
74
+ The entry point to the inference engine.
75
+
76
+ - The engine consists of three components:
77
+ 1. TokenizerManager: Tokenizes the requests and sends them to the scheduler.
78
+ 2. Scheduler (subprocess): Receives requests from the Tokenizer Manager, schedules batches, forwards them, and sends the output tokens to the Detokenizer Manager.
79
+ 3. DetokenizerManager (subprocess): Detokenizes the output tokens and sends the result back to the Tokenizer Manager.
80
+
81
+ Note:
82
+ 1. The HTTP server, Engine, and TokenizerManager both run in the main process.
83
+ 2. Inter-process communication is done through ICP (each process uses a different port) via the ZMQ library.
84
+ """
85
+
86
+ def __init__(self, **kwargs):
87
+ """
88
+ The arguments of this function is the same as `sglang/srt/server_args.py::ServerArgs`.
89
+ Please refer to `ServerArgs` for the documentation.
90
+ """
91
+ if "server_args" in kwargs:
92
+ # Directly load server_args
93
+ server_args = kwargs["server_args"]
94
+ else:
95
+ # Construct server_args from kwargs
96
+ if "log_level" not in kwargs:
97
+ # Do not print logs by default
98
+ kwargs["log_level"] = "error"
99
+ server_args = ServerArgs(**kwargs)
100
+
101
+ # Shutdown the subprocesses automatically when the program exists
102
+ atexit.register(self.shutdown)
103
+
104
+ # Launch subprocesses
105
+ tokenizer_manager, scheduler_info = _launch_subprocesses(
106
+ server_args=server_args
107
+ )
108
+ self.tokenizer_manager = tokenizer_manager
109
+ self.scheduler_info = scheduler_info
110
+
111
+ def generate(
112
+ self,
113
+ # The input prompt. It can be a single prompt or a batch of prompts.
114
+ prompt: Optional[Union[List[str], str]] = None,
115
+ sampling_params: Optional[Union[List[Dict], Dict]] = None,
116
+ # The token ids for text; one can either specify text or input_ids.
117
+ input_ids: Optional[Union[List[List[int]], List[int]]] = None,
118
+ return_logprob: Optional[Union[List[bool], bool]] = False,
119
+ logprob_start_len: Optional[Union[List[int], int]] = None,
120
+ top_logprobs_num: Optional[Union[List[int], int]] = None,
121
+ lora_path: Optional[List[Optional[str]]] = None,
122
+ custom_logit_processor: Optional[Union[List[str], str]] = None,
123
+ stream: bool = False,
124
+ ) -> Union[Dict, Iterator[Dict]]:
125
+ """
126
+ The arguments of this function is the same as `sglang/srt/managers/io_struct.py::GenerateReqInput`.
127
+ Please refer to `GenerateReqInput` for the documentation.
128
+ """
129
+ obj = GenerateReqInput(
130
+ text=prompt,
131
+ input_ids=input_ids,
132
+ sampling_params=sampling_params,
133
+ return_logprob=return_logprob,
134
+ logprob_start_len=logprob_start_len,
135
+ top_logprobs_num=top_logprobs_num,
136
+ lora_path=lora_path,
137
+ custom_logit_processor=custom_logit_processor,
138
+ stream=stream,
139
+ )
140
+ loop = asyncio.get_event_loop()
141
+ generator = self.tokenizer_manager.generate_request(obj, None)
142
+
143
+ if stream:
144
+
145
+ def generator_wrapper():
146
+ while True:
147
+ try:
148
+ chunk = loop.run_until_complete(generator.__anext__())
149
+ yield chunk
150
+ except StopAsyncIteration:
151
+ break
152
+
153
+ return generator_wrapper()
154
+ else:
155
+ ret = loop.run_until_complete(generator.__anext__())
156
+ return ret
157
+
158
+ async def async_generate(
159
+ self,
160
+ # The input prompt. It can be a single prompt or a batch of prompts.
161
+ prompt: Optional[Union[List[str], str]] = None,
162
+ sampling_params: Optional[Union[List[Dict], Dict]] = None,
163
+ # The token ids for text; one can either specify text or input_ids.
164
+ input_ids: Optional[Union[List[List[int]], List[int]]] = None,
165
+ return_logprob: Optional[Union[List[bool], bool]] = False,
166
+ logprob_start_len: Optional[Union[List[int], int]] = None,
167
+ top_logprobs_num: Optional[Union[List[int], int]] = None,
168
+ lora_path: Optional[List[Optional[str]]] = None,
169
+ custom_logit_processor: Optional[Union[List[str], str]] = None,
170
+ stream: bool = False,
171
+ ) -> Union[Dict, AsyncIterator[Dict]]:
172
+ """
173
+ The arguments of this function is the same as `sglang/srt/managers/io_struct.py::GenerateReqInput`.
174
+ Please refer to `GenerateReqInput` for the documentation.
175
+ """
176
+ obj = GenerateReqInput(
177
+ text=prompt,
178
+ input_ids=input_ids,
179
+ sampling_params=sampling_params,
180
+ return_logprob=return_logprob,
181
+ logprob_start_len=logprob_start_len,
182
+ top_logprobs_num=top_logprobs_num,
183
+ lora_path=lora_path,
184
+ stream=stream,
185
+ custom_logit_processor=custom_logit_processor,
186
+ )
187
+ generator = self.tokenizer_manager.generate_request(obj, None)
188
+
189
+ if stream is True:
190
+ return generator
191
+ else:
192
+ return await generator.__anext__()
193
+
194
+ def encode(
195
+ self,
196
+ prompt: Union[str, List[str], List[Dict], List[List[Dict]]],
197
+ ) -> Dict:
198
+ """
199
+ The arguments of this function is the same as `sglang/srt/managers/io_struct.py::EmbeddingReqInput`.
200
+ Please refer to `EmbeddingReqInput` for the documentation.
201
+ """
202
+
203
+ obj = EmbeddingReqInput(text=prompt)
204
+ loop = asyncio.get_event_loop()
205
+ generator = self.tokenizer_manager.generate_request(obj, None)
206
+ ret = loop.run_until_complete(generator.__anext__())
207
+ return ret
208
+
209
+ def shutdown(self):
210
+ """Shutdown the engine"""
211
+ kill_process_tree(os.getpid(), include_parent=False)
212
+
213
+ def start_profile(self):
214
+ self.tokenizer_manager.start_profile()
215
+
216
+ def stop_profile(self):
217
+ self.tokenizer_manager.stop_profile()
218
+
219
+ def get_server_info(self):
220
+ return {
221
+ **dataclasses.asdict(self.tokenizer_manager.server_args), # server args
222
+ **self.scheduler_info,
223
+ "version": __version__,
224
+ }
225
+
226
+ def init_weights_update_group(
227
+ self,
228
+ master_address: str,
229
+ master_port: int,
230
+ rank_offset: int,
231
+ world_size: int,
232
+ group_name: str,
233
+ backend: str = "nccl",
234
+ ):
235
+ """Initialize parameter update group."""
236
+ obj = InitWeightsUpdateGroupReqInput(
237
+ master_address=master_address,
238
+ master_port=master_port,
239
+ rank_offset=rank_offset,
240
+ world_size=world_size,
241
+ group_name=group_name,
242
+ backend=backend,
243
+ )
244
+ loop = asyncio.get_event_loop()
245
+ return loop.run_until_complete(
246
+ self.tokenizer_manager.init_weights_update_group(obj, None)
247
+ )
248
+
249
+ def update_weights_from_distributed(self, name: str, dtype, shape):
250
+ """Update weights from distributed source."""
251
+ obj = UpdateWeightsFromDistributedReqInput(
252
+ name=name,
253
+ dtype=dtype,
254
+ shape=shape,
255
+ )
256
+ loop = asyncio.get_event_loop()
257
+ return loop.run_until_complete(
258
+ self.tokenizer_manager.update_weights_from_distributed(obj, None)
259
+ )
260
+
261
+ def update_weights_from_tensor(self, named_tensors: List[Tuple[str, torch.Tensor]]):
262
+ """Update weights from distributed source."""
263
+ obj = UpdateWeightsFromTensorReqInput(
264
+ serialized_named_tensors=MultiprocessingSerializer.serialize(named_tensors)
265
+ )
266
+ loop = asyncio.get_event_loop()
267
+ return loop.run_until_complete(
268
+ self.tokenizer_manager.update_weights_from_tensor(obj, None)
269
+ )
270
+
271
+ def get_weights_by_name(self, name: str, truncate_size: int = 100):
272
+ """Get weights by parameter name."""
273
+ obj = GetWeightsByNameReqInput(name=name, truncate_size=truncate_size)
274
+ loop = asyncio.get_event_loop()
275
+ return loop.run_until_complete(
276
+ self.tokenizer_manager.get_weights_by_name(obj, None)
277
+ )
278
+
279
+ def release_memory_occupation(self):
280
+ """Release GPU occupation temporarily."""
281
+ obj = ReleaseMemoryOccupationReqInput()
282
+ loop = asyncio.get_event_loop()
283
+ return loop.run_until_complete(
284
+ self.tokenizer_manager.release_memory_occupation(obj, None)
285
+ )
286
+
287
+ def resume_memory_occupation(self):
288
+ """Resume GPU occupation."""
289
+ obj = ResumeMemoryOccupationReqInput()
290
+ loop = asyncio.get_event_loop()
291
+ return loop.run_until_complete(
292
+ self.tokenizer_manager.resume_memory_occupation(obj, None)
293
+ )
294
+
295
+
296
+ def _set_envs_and_config(server_args: ServerArgs):
297
+ # Set global environments
298
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
299
+ os.environ["NCCL_CUMEM_ENABLE"] = "0"
300
+ os.environ["NCCL_NVLS_ENABLE"] = "0"
301
+ os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
302
+ os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4"
303
+
304
+ # Set prometheus env vars
305
+ if server_args.enable_metrics:
306
+ set_prometheus_multiproc_dir()
307
+
308
+ # Set ulimit
309
+ set_ulimit()
310
+
311
+ # Fix triton bugs
312
+ if server_args.tp_size * server_args.dp_size > 1:
313
+ # FIXME: remove this after https://github.com/triton-lang/triton/pull/4295 is used as a dependency.
314
+ maybe_set_triton_cache_manager()
315
+
316
+ # Check flashinfer version
317
+ if server_args.attention_backend == "flashinfer":
318
+ assert_pkg_version(
319
+ "flashinfer",
320
+ "0.1.6",
321
+ "Please uninstall the old version and "
322
+ "reinstall the latest version by following the instructions "
323
+ "at https://docs.flashinfer.ai/installation.html.",
324
+ )
325
+
326
+ # Register the signal handler.
327
+ # The child processes will send SIGQUIT to this process when any error happens
328
+ # This process then clean up the whole process tree
329
+ def sigquit_handler(signum, frame):
330
+ logger.error(
331
+ "Received sigquit from a child proces. It usually means the child failed."
332
+ )
333
+ kill_process_tree(os.getpid())
334
+
335
+ signal.signal(signal.SIGQUIT, sigquit_handler)
336
+
337
+ # Set mp start method
338
+ mp.set_start_method("spawn", force=True)
339
+
340
+
341
+ def _launch_subprocesses(server_args: ServerArgs) -> Tuple[TokenizerManager, Dict]:
342
+ """
343
+ Launch the TokenizerManager in the main process, the Scheduler in a subprocess, and the DetokenizerManager in another subprocess.
344
+ """
345
+ # Configure global environment
346
+ configure_logger(server_args)
347
+ server_args.check_server_args()
348
+ _set_envs_and_config(server_args)
349
+
350
+ # Allocate ports for inter-process communications
351
+ port_args = PortArgs.init_new(server_args)
352
+ logger.info(f"{server_args=}")
353
+
354
+ # If using model from www.modelscope.cn, first download the model.
355
+ server_args.model_path, server_args.tokenizer_path = prepare_model_and_tokenizer(
356
+ server_args.model_path, server_args.tokenizer_path
357
+ )
358
+
359
+ scheduler_procs = []
360
+ if server_args.dp_size == 1:
361
+ # Launch tensor parallel scheduler processes
362
+ memory_saver_adapter = TorchMemorySaverAdapter.create(
363
+ enable=server_args.enable_memory_saver
364
+ )
365
+
366
+ scheduler_pipe_readers = []
367
+ tp_size_per_node = server_args.tp_size // server_args.nnodes
368
+ tp_rank_range = range(
369
+ tp_size_per_node * server_args.node_rank,
370
+ tp_size_per_node * (server_args.node_rank + 1),
371
+ )
372
+ for tp_rank in tp_rank_range:
373
+ reader, writer = mp.Pipe(duplex=False)
374
+ gpu_id = server_args.base_gpu_id + tp_rank % tp_size_per_node
375
+ proc = mp.Process(
376
+ target=run_scheduler_process,
377
+ args=(server_args, port_args, gpu_id, tp_rank, None, writer),
378
+ )
379
+ with memory_saver_adapter.configure_subprocess():
380
+ proc.start()
381
+ scheduler_procs.append(proc)
382
+ scheduler_pipe_readers.append(reader)
383
+ else:
384
+ # Launch the data parallel controller
385
+ reader, writer = mp.Pipe(duplex=False)
386
+ scheduler_pipe_readers = [reader]
387
+ proc = mp.Process(
388
+ target=run_data_parallel_controller_process,
389
+ args=(server_args, port_args, writer),
390
+ )
391
+ proc.start()
392
+ scheduler_procs.append(proc)
393
+
394
+ if server_args.node_rank >= 1:
395
+ # In multi-node cases, non-zero rank nodes do not need to run tokenizer or detokenizer,
396
+ # so they can just wait here.
397
+
398
+ for reader in scheduler_pipe_readers:
399
+ data = reader.recv()
400
+ assert data["status"] == "ready"
401
+
402
+ if os.getenv("SGLANG_BLOCK_NONZERO_RANK_CHILDREN") == "0":
403
+ # When using `Engine` as a Python API, we don't want to block here.
404
+ return None, None
405
+
406
+ launch_dummy_health_check_server(server_args.host, server_args.port)
407
+
408
+ for proc in scheduler_procs:
409
+ proc.join()
410
+ logger.error(
411
+ f"Scheduler or DataParallelController {proc.pid} terminated with {proc.exitcode}"
412
+ )
413
+ return None, None
414
+
415
+ # Launch detokenizer process
416
+ detoken_proc = mp.Process(
417
+ target=run_detokenizer_process,
418
+ args=(
419
+ server_args,
420
+ port_args,
421
+ ),
422
+ )
423
+ detoken_proc.start()
424
+
425
+ # Launch tokenizer process
426
+ tokenizer_manager = TokenizerManager(server_args, port_args)
427
+ if server_args.chat_template:
428
+ load_chat_template_for_openai_api(tokenizer_manager, server_args.chat_template)
429
+
430
+ # Wait for the model to finish loading
431
+ scheduler_infos = []
432
+ for i in range(len(scheduler_pipe_readers)):
433
+ try:
434
+ data = scheduler_pipe_readers[i].recv()
435
+ except EOFError:
436
+ logger.error(
437
+ f"Rank {i} scheduler is dead. Please check if there are relevant logs."
438
+ )
439
+ scheduler_procs[i].join()
440
+ logger.error(f"Exit code: {scheduler_procs[i].exitcode}")
441
+ raise
442
+
443
+ if data["status"] != "ready":
444
+ raise RuntimeError(
445
+ "Initialization failed. Please see the error messages above."
446
+ )
447
+ scheduler_infos.append(data)
448
+
449
+ # Assume all schedulers have the same scheduler_info
450
+ scheduler_info = scheduler_infos[0]
451
+ tokenizer_manager.max_req_input_len = scheduler_info["max_req_input_len"]
452
+ return tokenizer_manager, scheduler_info