sglang 0.4.1.post6__py3-none-any.whl → 0.4.1.post7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. sglang/__init__.py +21 -23
  2. sglang/api.py +2 -7
  3. sglang/bench_offline_throughput.py +24 -16
  4. sglang/bench_one_batch.py +51 -3
  5. sglang/bench_one_batch_server.py +1 -1
  6. sglang/bench_serving.py +37 -28
  7. sglang/lang/backend/runtime_endpoint.py +183 -4
  8. sglang/lang/chat_template.py +15 -4
  9. sglang/launch_server.py +1 -1
  10. sglang/srt/_custom_ops.py +80 -42
  11. sglang/srt/configs/device_config.py +1 -1
  12. sglang/srt/configs/model_config.py +1 -0
  13. sglang/srt/constrained/base_grammar_backend.py +21 -0
  14. sglang/srt/constrained/xgrammar_backend.py +8 -4
  15. sglang/srt/conversation.py +14 -1
  16. sglang/srt/distributed/__init__.py +3 -3
  17. sglang/srt/distributed/communication_op.py +2 -1
  18. sglang/srt/distributed/device_communicators/cuda_wrapper.py +2 -1
  19. sglang/srt/distributed/device_communicators/custom_all_reduce.py +107 -40
  20. sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +2 -2
  21. sglang/srt/distributed/device_communicators/hpu_communicator.py +2 -1
  22. sglang/srt/distributed/device_communicators/pynccl.py +80 -1
  23. sglang/srt/distributed/device_communicators/pynccl_wrapper.py +112 -2
  24. sglang/srt/distributed/device_communicators/shm_broadcast.py +5 -72
  25. sglang/srt/distributed/device_communicators/xpu_communicator.py +2 -1
  26. sglang/srt/distributed/parallel_state.py +1 -1
  27. sglang/srt/distributed/utils.py +2 -1
  28. sglang/srt/entrypoints/engine.py +449 -0
  29. sglang/srt/entrypoints/http_server.py +579 -0
  30. sglang/srt/layers/activation.py +3 -3
  31. sglang/srt/layers/attention/flashinfer_backend.py +10 -9
  32. sglang/srt/layers/attention/triton_backend.py +4 -6
  33. sglang/srt/layers/attention/vision.py +204 -0
  34. sglang/srt/layers/dp_attention.py +69 -0
  35. sglang/srt/layers/linear.py +41 -5
  36. sglang/srt/layers/logits_processor.py +48 -63
  37. sglang/srt/layers/moe/ep_moe/layer.py +4 -4
  38. sglang/srt/layers/moe/fused_moe_native.py +69 -0
  39. sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +9 -6
  40. sglang/srt/layers/moe/fused_moe_triton/layer.py +29 -5
  41. sglang/srt/layers/parameter.py +2 -1
  42. sglang/srt/layers/quantization/__init__.py +20 -23
  43. sglang/srt/layers/quantization/fp8.py +6 -3
  44. sglang/srt/layers/quantization/modelopt_quant.py +1 -2
  45. sglang/srt/layers/quantization/w8a8_int8.py +1 -1
  46. sglang/srt/layers/radix_attention.py +2 -2
  47. sglang/srt/layers/rotary_embedding.py +1179 -31
  48. sglang/srt/layers/sampler.py +39 -1
  49. sglang/srt/layers/vocab_parallel_embedding.py +2 -2
  50. sglang/srt/lora/lora.py +1 -9
  51. sglang/srt/managers/configure_logging.py +3 -0
  52. sglang/srt/managers/data_parallel_controller.py +79 -72
  53. sglang/srt/managers/detokenizer_manager.py +23 -6
  54. sglang/srt/managers/image_processor.py +158 -2
  55. sglang/srt/managers/io_struct.py +25 -2
  56. sglang/srt/managers/schedule_batch.py +49 -22
  57. sglang/srt/managers/schedule_policy.py +26 -12
  58. sglang/srt/managers/scheduler.py +277 -178
  59. sglang/srt/managers/session_controller.py +1 -0
  60. sglang/srt/managers/tokenizer_manager.py +206 -121
  61. sglang/srt/managers/tp_worker.py +6 -4
  62. sglang/srt/managers/tp_worker_overlap_thread.py +5 -8
  63. sglang/srt/managers/utils.py +44 -0
  64. sglang/srt/mem_cache/memory_pool.py +10 -32
  65. sglang/srt/metrics/collector.py +15 -6
  66. sglang/srt/model_executor/cuda_graph_runner.py +4 -6
  67. sglang/srt/model_executor/model_runner.py +37 -15
  68. sglang/srt/model_loader/loader.py +8 -6
  69. sglang/srt/model_loader/weight_utils.py +55 -2
  70. sglang/srt/models/baichuan.py +6 -6
  71. sglang/srt/models/chatglm.py +2 -2
  72. sglang/srt/models/commandr.py +3 -3
  73. sglang/srt/models/dbrx.py +4 -4
  74. sglang/srt/models/deepseek.py +3 -3
  75. sglang/srt/models/deepseek_v2.py +8 -8
  76. sglang/srt/models/exaone.py +2 -2
  77. sglang/srt/models/gemma.py +2 -2
  78. sglang/srt/models/gemma2.py +6 -24
  79. sglang/srt/models/gpt2.py +3 -5
  80. sglang/srt/models/gpt_bigcode.py +1 -1
  81. sglang/srt/models/granite.py +2 -2
  82. sglang/srt/models/grok.py +3 -3
  83. sglang/srt/models/internlm2.py +2 -2
  84. sglang/srt/models/llama.py +7 -5
  85. sglang/srt/models/minicpm.py +2 -2
  86. sglang/srt/models/minicpm3.py +6 -6
  87. sglang/srt/models/minicpmv.py +1238 -0
  88. sglang/srt/models/mixtral.py +3 -3
  89. sglang/srt/models/mixtral_quant.py +3 -3
  90. sglang/srt/models/mllama.py +2 -2
  91. sglang/srt/models/olmo.py +3 -3
  92. sglang/srt/models/olmo2.py +4 -4
  93. sglang/srt/models/olmoe.py +7 -13
  94. sglang/srt/models/phi3_small.py +2 -2
  95. sglang/srt/models/qwen.py +2 -2
  96. sglang/srt/models/qwen2.py +41 -4
  97. sglang/srt/models/qwen2_moe.py +3 -3
  98. sglang/srt/models/qwen2_vl.py +22 -122
  99. sglang/srt/models/stablelm.py +2 -2
  100. sglang/srt/models/torch_native_llama.py +3 -3
  101. sglang/srt/models/xverse.py +6 -6
  102. sglang/srt/models/xverse_moe.py +6 -6
  103. sglang/srt/openai_api/protocol.py +2 -0
  104. sglang/srt/sampling/custom_logit_processor.py +38 -0
  105. sglang/srt/sampling/sampling_batch_info.py +139 -4
  106. sglang/srt/sampling/sampling_params.py +3 -1
  107. sglang/srt/server.py +4 -1090
  108. sglang/srt/server_args.py +57 -14
  109. sglang/srt/utils.py +103 -65
  110. sglang/test/runners.py +8 -13
  111. sglang/test/test_programs.py +1 -1
  112. sglang/test/test_utils.py +3 -1
  113. sglang/utils.py +12 -2
  114. sglang/version.py +1 -1
  115. {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/METADATA +16 -5
  116. {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/RECORD +119 -115
  117. sglang/launch_server_llavavid.py +0 -25
  118. sglang/srt/constrained/__init__.py +0 -16
  119. sglang/srt/distributed/device_communicators/__init__.py +0 -0
  120. {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/LICENSE +0 -0
  121. {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/WHEEL +0 -0
  122. {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,449 @@
1
+ # Copyright 2023-2024 SGLang Team
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ==============================================================================
14
+ """
15
+ The entry point of inference server. (SRT = SGLang Runtime)
16
+
17
+ This file implements python APIs for the inference engine.
18
+ """
19
+
20
+ import asyncio
21
+ import atexit
22
+ import dataclasses
23
+ import logging
24
+ import multiprocessing as mp
25
+ import os
26
+ import signal
27
+ import threading
28
+ from typing import AsyncIterator, Dict, Iterator, List, Optional, Tuple, Union
29
+
30
+ # Fix a bug of Python threading
31
+ setattr(threading, "_register_atexit", lambda *args, **kwargs: None)
32
+
33
+ import torch
34
+ import uvloop
35
+
36
+ from sglang.srt.managers.data_parallel_controller import (
37
+ run_data_parallel_controller_process,
38
+ )
39
+ from sglang.srt.managers.detokenizer_manager import run_detokenizer_process
40
+ from sglang.srt.managers.io_struct import (
41
+ EmbeddingReqInput,
42
+ GenerateReqInput,
43
+ GetWeightsByNameReqInput,
44
+ InitWeightsUpdateGroupReqInput,
45
+ ReleaseMemoryOccupationReqInput,
46
+ ResumeMemoryOccupationReqInput,
47
+ UpdateWeightsFromDistributedReqInput,
48
+ UpdateWeightsFromTensorReqInput,
49
+ )
50
+ from sglang.srt.managers.scheduler import run_scheduler_process
51
+ from sglang.srt.managers.tokenizer_manager import TokenizerManager
52
+ from sglang.srt.openai_api.adapter import load_chat_template_for_openai_api
53
+ from sglang.srt.server_args import PortArgs, ServerArgs
54
+ from sglang.srt.torch_memory_saver_adapter import TorchMemorySaverAdapter
55
+ from sglang.srt.utils import (
56
+ MultiprocessingSerializer,
57
+ assert_pkg_version,
58
+ configure_logger,
59
+ kill_process_tree,
60
+ maybe_set_triton_cache_manager,
61
+ prepare_model_and_tokenizer,
62
+ set_prometheus_multiproc_dir,
63
+ set_ulimit,
64
+ )
65
+ from sglang.version import __version__
66
+
67
+ logger = logging.getLogger(__name__)
68
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
69
+
70
+
71
+ class Engine:
72
+ """
73
+ The entry point to the inference engine.
74
+
75
+ - The engine consists of three components:
76
+ 1. TokenizerManager: Tokenizes the requests and sends them to the scheduler.
77
+ 2. Scheduler (subprocess): Receives requests from the Tokenizer Manager, schedules batches, forwards them, and sends the output tokens to the Detokenizer Manager.
78
+ 3. DetokenizerManager (subprocess): Detokenizes the output tokens and sends the result back to the Tokenizer Manager.
79
+
80
+ Note:
81
+ 1. The HTTP server, Engine, and TokenizerManager both run in the main process.
82
+ 2. Inter-process communication is done through ICP (each process uses a different port) via the ZMQ library.
83
+ """
84
+
85
+ def __init__(self, **kwargs):
86
+ """
87
+ The arguments of this function is the same as `sglang/srt/server_args.py::ServerArgs`.
88
+ Please refer to `ServerArgs` for the documentation.
89
+ """
90
+ if "server_args" in kwargs:
91
+ # Directly load server_args
92
+ server_args = kwargs["server_args"]
93
+ else:
94
+ # Construct server_args from kwargs
95
+ if "log_level" not in kwargs:
96
+ # Do not print logs by default
97
+ kwargs["log_level"] = "error"
98
+ server_args = ServerArgs(**kwargs)
99
+
100
+ # Shutdown the subprocesses automatically when the program exists
101
+ atexit.register(self.shutdown)
102
+
103
+ # Launch subprocesses
104
+ tokenizer_manager, scheduler_info = _launch_subprocesses(
105
+ server_args=server_args
106
+ )
107
+ self.tokenizer_manager = tokenizer_manager
108
+ self.scheduler_info = scheduler_info
109
+
110
+ def generate(
111
+ self,
112
+ # The input prompt. It can be a single prompt or a batch of prompts.
113
+ prompt: Optional[Union[List[str], str]] = None,
114
+ sampling_params: Optional[Union[List[Dict], Dict]] = None,
115
+ # The token ids for text; one can either specify text or input_ids.
116
+ input_ids: Optional[Union[List[List[int]], List[int]]] = None,
117
+ return_logprob: Optional[Union[List[bool], bool]] = False,
118
+ logprob_start_len: Optional[Union[List[int], int]] = None,
119
+ top_logprobs_num: Optional[Union[List[int], int]] = None,
120
+ lora_path: Optional[List[Optional[str]]] = None,
121
+ custom_logit_processor: Optional[Union[List[str], str]] = None,
122
+ stream: bool = False,
123
+ ) -> Union[Dict, Iterator[Dict]]:
124
+ """
125
+ The arguments of this function is the same as `sglang/srt/managers/io_struct.py::GenerateReqInput`.
126
+ Please refer to `GenerateReqInput` for the documentation.
127
+ """
128
+ obj = GenerateReqInput(
129
+ text=prompt,
130
+ input_ids=input_ids,
131
+ sampling_params=sampling_params,
132
+ return_logprob=return_logprob,
133
+ logprob_start_len=logprob_start_len,
134
+ top_logprobs_num=top_logprobs_num,
135
+ lora_path=lora_path,
136
+ custom_logit_processor=custom_logit_processor,
137
+ stream=stream,
138
+ )
139
+ loop = asyncio.get_event_loop()
140
+ generator = self.tokenizer_manager.generate_request(obj, None)
141
+
142
+ if stream:
143
+
144
+ def generator_wrapper():
145
+ while True:
146
+ try:
147
+ chunk = loop.run_until_complete(generator.__anext__())
148
+ yield chunk
149
+ except StopAsyncIteration:
150
+ break
151
+
152
+ return generator_wrapper()
153
+ else:
154
+ ret = loop.run_until_complete(generator.__anext__())
155
+ return ret
156
+
157
+ async def async_generate(
158
+ self,
159
+ # The input prompt. It can be a single prompt or a batch of prompts.
160
+ prompt: Optional[Union[List[str], str]] = None,
161
+ sampling_params: Optional[Union[List[Dict], Dict]] = None,
162
+ # The token ids for text; one can either specify text or input_ids.
163
+ input_ids: Optional[Union[List[List[int]], List[int]]] = None,
164
+ return_logprob: Optional[Union[List[bool], bool]] = False,
165
+ logprob_start_len: Optional[Union[List[int], int]] = None,
166
+ top_logprobs_num: Optional[Union[List[int], int]] = None,
167
+ lora_path: Optional[List[Optional[str]]] = None,
168
+ custom_logit_processor: Optional[Union[List[str], str]] = None,
169
+ stream: bool = False,
170
+ ) -> Union[Dict, AsyncIterator[Dict]]:
171
+ """
172
+ The arguments of this function is the same as `sglang/srt/managers/io_struct.py::GenerateReqInput`.
173
+ Please refer to `GenerateReqInput` for the documentation.
174
+ """
175
+ obj = GenerateReqInput(
176
+ text=prompt,
177
+ input_ids=input_ids,
178
+ sampling_params=sampling_params,
179
+ return_logprob=return_logprob,
180
+ logprob_start_len=logprob_start_len,
181
+ top_logprobs_num=top_logprobs_num,
182
+ lora_path=lora_path,
183
+ stream=stream,
184
+ custom_logit_processor=custom_logit_processor,
185
+ )
186
+ generator = self.tokenizer_manager.generate_request(obj, None)
187
+
188
+ if stream is True:
189
+ return generator
190
+ else:
191
+ return await generator.__anext__()
192
+
193
+ def encode(
194
+ self,
195
+ prompt: Union[str, List[str], List[Dict], List[List[Dict]]],
196
+ ) -> Dict:
197
+ """
198
+ The arguments of this function is the same as `sglang/srt/managers/io_struct.py::EmbeddingReqInput`.
199
+ Please refer to `EmbeddingReqInput` for the documentation.
200
+ """
201
+
202
+ obj = EmbeddingReqInput(text=prompt)
203
+ loop = asyncio.get_event_loop()
204
+ generator = self.tokenizer_manager.generate_request(obj, None)
205
+ ret = loop.run_until_complete(generator.__anext__())
206
+ return ret
207
+
208
+ def shutdown(self):
209
+ """Shutdown the engine"""
210
+ kill_process_tree(os.getpid(), include_parent=False)
211
+
212
+ def start_profile(self):
213
+ self.tokenizer_manager.start_profile()
214
+
215
+ def stop_profile(self):
216
+ self.tokenizer_manager.stop_profile()
217
+
218
+ def get_server_info(self):
219
+ return {
220
+ **dataclasses.asdict(self.tokenizer_manager.server_args), # server args
221
+ **self.scheduler_info,
222
+ "version": __version__,
223
+ }
224
+
225
+ def init_weights_update_group(
226
+ self,
227
+ master_address: str,
228
+ master_port: int,
229
+ rank_offset: int,
230
+ world_size: int,
231
+ group_name: str,
232
+ backend: str = "nccl",
233
+ ):
234
+ """Initialize parameter update group."""
235
+ obj = InitWeightsUpdateGroupReqInput(
236
+ master_address=master_address,
237
+ master_port=master_port,
238
+ rank_offset=rank_offset,
239
+ world_size=world_size,
240
+ group_name=group_name,
241
+ backend=backend,
242
+ )
243
+ loop = asyncio.get_event_loop()
244
+ return loop.run_until_complete(
245
+ self.tokenizer_manager.init_weights_update_group(obj, None)
246
+ )
247
+
248
+ def update_weights_from_distributed(self, name: str, dtype, shape):
249
+ """Update weights from distributed source."""
250
+ obj = UpdateWeightsFromDistributedReqInput(
251
+ name=name,
252
+ dtype=dtype,
253
+ shape=shape,
254
+ )
255
+ loop = asyncio.get_event_loop()
256
+ return loop.run_until_complete(
257
+ self.tokenizer_manager.update_weights_from_distributed(obj, None)
258
+ )
259
+
260
+ def update_weights_from_tensor(self, named_tensors: List[Tuple[str, torch.Tensor]]):
261
+ """Update weights from distributed source."""
262
+ obj = UpdateWeightsFromTensorReqInput(
263
+ serialized_named_tensors=MultiprocessingSerializer.serialize(named_tensors)
264
+ )
265
+ loop = asyncio.get_event_loop()
266
+ return loop.run_until_complete(
267
+ self.tokenizer_manager.update_weights_from_tensor(obj, None)
268
+ )
269
+
270
+ def get_weights_by_name(self, name: str, truncate_size: int = 100):
271
+ """Get weights by parameter name."""
272
+ obj = GetWeightsByNameReqInput(name=name, truncate_size=truncate_size)
273
+ loop = asyncio.get_event_loop()
274
+ return loop.run_until_complete(
275
+ self.tokenizer_manager.get_weights_by_name(obj, None)
276
+ )
277
+
278
+ def release_memory_occupation(self):
279
+ """Release GPU occupation temporarily."""
280
+ obj = ReleaseMemoryOccupationReqInput()
281
+ loop = asyncio.get_event_loop()
282
+ return loop.run_until_complete(
283
+ self.tokenizer_manager.release_memory_occupation(obj, None)
284
+ )
285
+
286
+ def resume_memory_occupation(self):
287
+ """Resume GPU occupation."""
288
+ obj = ResumeMemoryOccupationReqInput()
289
+ loop = asyncio.get_event_loop()
290
+ return loop.run_until_complete(
291
+ self.tokenizer_manager.resume_memory_occupation(obj, None)
292
+ )
293
+
294
+
295
+ def _set_envs_and_config(server_args: ServerArgs):
296
+ # Set global environments
297
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
298
+ os.environ["NCCL_CUMEM_ENABLE"] = "0"
299
+ os.environ["NCCL_NVLS_ENABLE"] = "0"
300
+ os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
301
+ os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4"
302
+
303
+ # Set prometheus env vars
304
+ if server_args.enable_metrics:
305
+ set_prometheus_multiproc_dir()
306
+
307
+ # Set ulimit
308
+ set_ulimit()
309
+
310
+ # Fix triton bugs
311
+ if server_args.tp_size * server_args.dp_size > 1:
312
+ # FIXME: remove this after https://github.com/triton-lang/triton/pull/4295 is used as a dependency.
313
+ maybe_set_triton_cache_manager()
314
+
315
+ # Check flashinfer version
316
+ if server_args.attention_backend == "flashinfer":
317
+ assert_pkg_version(
318
+ "flashinfer",
319
+ "0.1.6",
320
+ "Please uninstall the old version and "
321
+ "reinstall the latest version by following the instructions "
322
+ "at https://docs.flashinfer.ai/installation.html.",
323
+ )
324
+
325
+ # Register the signal handler.
326
+ # The child processes will send SIGQUIT to this process when any error happens
327
+ # This process then clean up the whole process tree
328
+ def sigquit_handler(signum, frame):
329
+ logger.error(
330
+ "Received sigquit from a child proces. It usually means the child failed."
331
+ )
332
+ kill_process_tree(os.getpid())
333
+
334
+ signal.signal(signal.SIGQUIT, sigquit_handler)
335
+
336
+ # Set mp start method
337
+ mp.set_start_method("spawn", force=True)
338
+
339
+
340
+ def _launch_subprocesses(server_args: ServerArgs) -> Tuple[TokenizerManager, Dict]:
341
+ """
342
+ Launch the TokenizerManager in the main process, the Scheduler in a subprocess, and the DetokenizerManager in another subprocess.
343
+ """
344
+ # Configure global environment
345
+ configure_logger(server_args)
346
+ server_args.check_server_args()
347
+ _set_envs_and_config(server_args)
348
+
349
+ # Allocate ports for inter-process communications
350
+ port_args = PortArgs.init_new(server_args)
351
+ logger.info(f"{server_args=}")
352
+
353
+ # If using model from www.modelscope.cn, first download the model.
354
+ server_args.model_path, server_args.tokenizer_path = prepare_model_and_tokenizer(
355
+ server_args.model_path, server_args.tokenizer_path
356
+ )
357
+
358
+ scheduler_procs = []
359
+ if server_args.dp_size == 1:
360
+ # Launch tensor parallel scheduler processes
361
+ memory_saver_adapter = TorchMemorySaverAdapter.create(
362
+ enable=server_args.enable_memory_saver
363
+ )
364
+
365
+ scheduler_pipe_readers = []
366
+ tp_size_per_node = server_args.tp_size // server_args.nnodes
367
+ tp_rank_range = range(
368
+ tp_size_per_node * server_args.node_rank,
369
+ tp_size_per_node * (server_args.node_rank + 1),
370
+ )
371
+ for tp_rank in tp_rank_range:
372
+ reader, writer = mp.Pipe(duplex=False)
373
+ gpu_id = server_args.base_gpu_id + tp_rank % tp_size_per_node
374
+ proc = mp.Process(
375
+ target=run_scheduler_process,
376
+ args=(server_args, port_args, gpu_id, tp_rank, None, writer),
377
+ )
378
+ with memory_saver_adapter.configure_subprocess():
379
+ proc.start()
380
+ scheduler_procs.append(proc)
381
+ scheduler_pipe_readers.append(reader)
382
+ else:
383
+ # Launch the data parallel controller
384
+ reader, writer = mp.Pipe(duplex=False)
385
+ scheduler_pipe_readers = [reader]
386
+ proc = mp.Process(
387
+ target=run_data_parallel_controller_process,
388
+ args=(server_args, port_args, writer),
389
+ )
390
+ proc.start()
391
+ scheduler_procs.append(proc)
392
+
393
+ if server_args.node_rank >= 1:
394
+ # In multi-node cases, non-zero rank nodes do not need to run tokenizer or detokenizer,
395
+ # so they can just wait here.
396
+
397
+ for reader in scheduler_pipe_readers:
398
+ data = reader.recv()
399
+ assert data["status"] == "ready"
400
+
401
+ if os.getenv("SGLANG_BLOCK_NONZERO_RANK_CHILDREN") == "0":
402
+ # When using `Engine` as a Python API, we don't want to block here.
403
+ return
404
+
405
+ for proc in scheduler_procs:
406
+ proc.join()
407
+ logger.error(
408
+ f"Scheduler or DataParallelController {proc.pid} terminated with {proc.exitcode}"
409
+ )
410
+ return
411
+
412
+ # Launch detokenizer process
413
+ detoken_proc = mp.Process(
414
+ target=run_detokenizer_process,
415
+ args=(
416
+ server_args,
417
+ port_args,
418
+ ),
419
+ )
420
+ detoken_proc.start()
421
+
422
+ # Launch tokenizer process
423
+ tokenizer_manager = TokenizerManager(server_args, port_args)
424
+ if server_args.chat_template:
425
+ load_chat_template_for_openai_api(tokenizer_manager, server_args.chat_template)
426
+
427
+ # Wait for the model to finish loading
428
+ scheduler_infos = []
429
+ for i in range(len(scheduler_pipe_readers)):
430
+ try:
431
+ data = scheduler_pipe_readers[i].recv()
432
+ except EOFError:
433
+ logger.error(
434
+ f"Rank {i} scheduler is dead. Please check if there are relevant logs."
435
+ )
436
+ scheduler_procs[i].join()
437
+ logger.error(f"Exit code: {scheduler_procs[i].exitcode}")
438
+ raise
439
+
440
+ if data["status"] != "ready":
441
+ raise RuntimeError(
442
+ "Initialization failed. Please see the error messages above."
443
+ )
444
+ scheduler_infos.append(data)
445
+
446
+ # Assume all schedulers have the same scheduler_info
447
+ scheduler_info = scheduler_infos[0]
448
+ tokenizer_manager.max_req_input_len = scheduler_info["max_req_input_len"]
449
+ return tokenizer_manager, scheduler_info