sglang 0.4.1.post6__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. sglang/__init__.py +21 -23
  2. sglang/api.py +2 -7
  3. sglang/bench_offline_throughput.py +41 -27
  4. sglang/bench_one_batch.py +60 -4
  5. sglang/bench_one_batch_server.py +1 -1
  6. sglang/bench_serving.py +83 -71
  7. sglang/lang/backend/runtime_endpoint.py +183 -4
  8. sglang/lang/chat_template.py +46 -4
  9. sglang/launch_server.py +1 -1
  10. sglang/srt/_custom_ops.py +80 -42
  11. sglang/srt/configs/device_config.py +1 -1
  12. sglang/srt/configs/load_config.py +1 -0
  13. sglang/srt/configs/model_config.py +1 -0
  14. sglang/srt/constrained/base_grammar_backend.py +21 -0
  15. sglang/srt/constrained/xgrammar_backend.py +8 -4
  16. sglang/srt/conversation.py +14 -1
  17. sglang/srt/distributed/__init__.py +3 -3
  18. sglang/srt/distributed/communication_op.py +2 -1
  19. sglang/srt/distributed/device_communicators/cuda_wrapper.py +2 -1
  20. sglang/srt/distributed/device_communicators/custom_all_reduce.py +112 -42
  21. sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +2 -2
  22. sglang/srt/distributed/device_communicators/hpu_communicator.py +2 -1
  23. sglang/srt/distributed/device_communicators/pynccl.py +80 -1
  24. sglang/srt/distributed/device_communicators/pynccl_wrapper.py +112 -2
  25. sglang/srt/distributed/device_communicators/shm_broadcast.py +5 -72
  26. sglang/srt/distributed/device_communicators/xpu_communicator.py +2 -1
  27. sglang/srt/distributed/parallel_state.py +1 -1
  28. sglang/srt/distributed/utils.py +2 -1
  29. sglang/srt/entrypoints/engine.py +452 -0
  30. sglang/srt/entrypoints/http_server.py +603 -0
  31. sglang/srt/function_call_parser.py +494 -0
  32. sglang/srt/layers/activation.py +8 -8
  33. sglang/srt/layers/attention/flashinfer_backend.py +10 -9
  34. sglang/srt/layers/attention/triton_backend.py +4 -6
  35. sglang/srt/layers/attention/vision.py +204 -0
  36. sglang/srt/layers/dp_attention.py +71 -0
  37. sglang/srt/layers/layernorm.py +5 -5
  38. sglang/srt/layers/linear.py +65 -14
  39. sglang/srt/layers/logits_processor.py +49 -64
  40. sglang/srt/layers/moe/ep_moe/layer.py +24 -16
  41. sglang/srt/layers/moe/fused_moe_native.py +84 -1
  42. sglang/srt/layers/moe/fused_moe_triton/configs/E=256,N=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  43. sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +27 -7
  44. sglang/srt/layers/moe/fused_moe_triton/layer.py +38 -5
  45. sglang/srt/layers/parameter.py +18 -8
  46. sglang/srt/layers/quantization/__init__.py +20 -23
  47. sglang/srt/layers/quantization/configs/N=1536,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  48. sglang/srt/layers/quantization/configs/N=3072,K=1536,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  49. sglang/srt/layers/quantization/configs/N=4096,K=512,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  50. sglang/srt/layers/quantization/configs/N=4608,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  51. sglang/srt/layers/quantization/configs/N=512,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  52. sglang/srt/layers/quantization/configs/N=576,K=7168,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  53. sglang/srt/layers/quantization/configs/N=7168,K=2048,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  54. sglang/srt/layers/quantization/configs/N=7168,K=2304,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  55. sglang/srt/layers/quantization/configs/N=7168,K=256,device_name=AMD_Instinct_MI300X,dtype=fp8_w8a8,block_shape=[128, 128].json +164 -0
  56. sglang/srt/layers/quantization/fp8.py +10 -4
  57. sglang/srt/layers/quantization/modelopt_quant.py +1 -2
  58. sglang/srt/layers/quantization/w8a8_int8.py +1 -1
  59. sglang/srt/layers/radix_attention.py +2 -2
  60. sglang/srt/layers/rotary_embedding.py +1184 -31
  61. sglang/srt/layers/sampler.py +64 -6
  62. sglang/srt/layers/torchao_utils.py +12 -6
  63. sglang/srt/layers/vocab_parallel_embedding.py +2 -2
  64. sglang/srt/lora/lora.py +1 -9
  65. sglang/srt/managers/configure_logging.py +3 -0
  66. sglang/srt/managers/data_parallel_controller.py +79 -72
  67. sglang/srt/managers/detokenizer_manager.py +24 -6
  68. sglang/srt/managers/image_processor.py +158 -2
  69. sglang/srt/managers/io_struct.py +57 -3
  70. sglang/srt/managers/schedule_batch.py +78 -45
  71. sglang/srt/managers/schedule_policy.py +26 -12
  72. sglang/srt/managers/scheduler.py +326 -201
  73. sglang/srt/managers/session_controller.py +1 -0
  74. sglang/srt/managers/tokenizer_manager.py +210 -121
  75. sglang/srt/managers/tp_worker.py +6 -4
  76. sglang/srt/managers/tp_worker_overlap_thread.py +5 -8
  77. sglang/srt/managers/utils.py +44 -0
  78. sglang/srt/mem_cache/memory_pool.py +10 -32
  79. sglang/srt/metrics/collector.py +15 -6
  80. sglang/srt/model_executor/cuda_graph_runner.py +26 -30
  81. sglang/srt/model_executor/forward_batch_info.py +5 -7
  82. sglang/srt/model_executor/model_runner.py +44 -19
  83. sglang/srt/model_loader/loader.py +83 -6
  84. sglang/srt/model_loader/weight_utils.py +145 -6
  85. sglang/srt/models/baichuan.py +6 -6
  86. sglang/srt/models/chatglm.py +2 -2
  87. sglang/srt/models/commandr.py +17 -5
  88. sglang/srt/models/dbrx.py +13 -5
  89. sglang/srt/models/deepseek.py +3 -3
  90. sglang/srt/models/deepseek_v2.py +11 -11
  91. sglang/srt/models/exaone.py +2 -2
  92. sglang/srt/models/gemma.py +2 -2
  93. sglang/srt/models/gemma2.py +15 -25
  94. sglang/srt/models/gpt2.py +3 -5
  95. sglang/srt/models/gpt_bigcode.py +1 -1
  96. sglang/srt/models/granite.py +2 -2
  97. sglang/srt/models/grok.py +4 -3
  98. sglang/srt/models/internlm2.py +2 -2
  99. sglang/srt/models/llama.py +7 -5
  100. sglang/srt/models/minicpm.py +2 -2
  101. sglang/srt/models/minicpm3.py +9 -9
  102. sglang/srt/models/minicpmv.py +1238 -0
  103. sglang/srt/models/mixtral.py +3 -3
  104. sglang/srt/models/mixtral_quant.py +3 -3
  105. sglang/srt/models/mllama.py +2 -2
  106. sglang/srt/models/olmo.py +3 -3
  107. sglang/srt/models/olmo2.py +4 -4
  108. sglang/srt/models/olmoe.py +7 -13
  109. sglang/srt/models/phi3_small.py +2 -2
  110. sglang/srt/models/qwen.py +2 -2
  111. sglang/srt/models/qwen2.py +41 -4
  112. sglang/srt/models/qwen2_moe.py +3 -3
  113. sglang/srt/models/qwen2_vl.py +22 -122
  114. sglang/srt/models/stablelm.py +2 -2
  115. sglang/srt/models/torch_native_llama.py +20 -7
  116. sglang/srt/models/xverse.py +6 -6
  117. sglang/srt/models/xverse_moe.py +6 -6
  118. sglang/srt/openai_api/adapter.py +139 -37
  119. sglang/srt/openai_api/protocol.py +7 -4
  120. sglang/srt/sampling/custom_logit_processor.py +38 -0
  121. sglang/srt/sampling/penaltylib/penalizers/repetition_penalty.py +11 -14
  122. sglang/srt/sampling/sampling_batch_info.py +143 -18
  123. sglang/srt/sampling/sampling_params.py +3 -1
  124. sglang/srt/server.py +4 -1090
  125. sglang/srt/server_args.py +77 -15
  126. sglang/srt/speculative/eagle_utils.py +37 -15
  127. sglang/srt/speculative/eagle_worker.py +11 -13
  128. sglang/srt/utils.py +164 -129
  129. sglang/test/runners.py +8 -13
  130. sglang/test/test_programs.py +2 -1
  131. sglang/test/test_utils.py +83 -22
  132. sglang/utils.py +12 -2
  133. sglang/version.py +1 -1
  134. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/METADATA +21 -10
  135. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/RECORD +138 -123
  136. sglang/launch_server_llavavid.py +0 -25
  137. sglang/srt/constrained/__init__.py +0 -16
  138. sglang/srt/distributed/device_communicators/__init__.py +0 -0
  139. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/LICENSE +0 -0
  140. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/WHEEL +0 -0
  141. {sglang-0.4.1.post6.dist-info → sglang-0.4.2.dist-info}/top_level.txt +0 -0
sglang/srt/server.py CHANGED
@@ -11,1094 +11,8 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ==============================================================================
14
- """
15
- The entry point of inference server.
16
- SRT = SGLang Runtime.
17
- """
18
14
 
19
- import asyncio
20
- import atexit
21
- import dataclasses
22
- import json
23
- import logging
24
- import multiprocessing as mp
25
- import os
26
- import signal
27
- import threading
28
- import time
29
- from http import HTTPStatus
30
- from typing import AsyncIterator, Dict, List, Optional, Tuple, Union
31
-
32
- import torch
33
-
34
- from sglang.srt.torch_memory_saver_adapter import TorchMemorySaverAdapter
35
-
36
- # Fix a bug of Python threading
37
- setattr(threading, "_register_atexit", lambda *args, **kwargs: None)
38
-
39
- import aiohttp
40
- import orjson
41
- import requests
42
- import uvicorn
43
- import uvloop
44
- from fastapi import FastAPI, File, Form, Request, UploadFile
45
- from fastapi.middleware.cors import CORSMiddleware
46
- from fastapi.responses import ORJSONResponse, Response, StreamingResponse
47
- from uvicorn.config import LOGGING_CONFIG
48
-
49
- from sglang.lang.backend.runtime_endpoint import RuntimeEndpoint
50
- from sglang.srt.hf_transformers_utils import get_tokenizer
51
- from sglang.srt.managers.data_parallel_controller import (
52
- run_data_parallel_controller_process,
53
- )
54
- from sglang.srt.managers.detokenizer_manager import run_detokenizer_process
55
- from sglang.srt.managers.io_struct import (
56
- CloseSessionReqInput,
57
- ConfigureLoggingReq,
58
- EmbeddingReqInput,
59
- GenerateReqInput,
60
- GetWeightsByNameReqInput,
61
- InitWeightsUpdateGroupReqInput,
62
- OpenSessionReqInput,
63
- ReleaseMemoryOccupationReqInput,
64
- ResumeMemoryOccupationReqInput,
65
- UpdateWeightFromDiskReqInput,
66
- UpdateWeightsFromDistributedReqInput,
67
- UpdateWeightsFromTensorReqInput,
68
- )
69
- from sglang.srt.managers.scheduler import run_scheduler_process
70
- from sglang.srt.managers.tokenizer_manager import TokenizerManager
71
- from sglang.srt.metrics.func_timer import enable_func_timer, time_func_latency
72
- from sglang.srt.openai_api.adapter import (
73
- load_chat_template_for_openai_api,
74
- v1_batches,
75
- v1_cancel_batch,
76
- v1_chat_completions,
77
- v1_completions,
78
- v1_delete_file,
79
- v1_embeddings,
80
- v1_files_create,
81
- v1_retrieve_batch,
82
- v1_retrieve_file,
83
- v1_retrieve_file_content,
84
- )
85
- from sglang.srt.openai_api.protocol import ModelCard, ModelList
86
- from sglang.srt.server_args import PortArgs, ServerArgs
87
- from sglang.srt.utils import (
88
- MultiprocessingSerializer,
89
- add_api_key_middleware,
90
- add_prometheus_middleware,
91
- assert_pkg_version,
92
- configure_logger,
93
- delete_directory,
94
- is_port_available,
95
- kill_process_tree,
96
- maybe_set_triton_cache_manager,
97
- prepare_model_and_tokenizer,
98
- set_prometheus_multiproc_dir,
99
- set_ulimit,
100
- )
101
- from sglang.utils import get_exception_traceback
102
- from sglang.version import __version__
103
-
104
- logger = logging.getLogger(__name__)
105
-
106
- asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
107
-
108
- # Fast API
109
- app = FastAPI()
110
- app.add_middleware(
111
- CORSMiddleware,
112
- allow_origins=["*"],
113
- allow_credentials=True,
114
- allow_methods=["*"],
115
- allow_headers=["*"],
116
- )
117
-
118
- tokenizer_manager: TokenizerManager = None
119
- scheduler_info: Dict = None
120
-
121
-
122
- ##### Native API endpoints #####
123
-
124
-
125
- @app.get("/health")
126
- async def health() -> Response:
127
- """Check the health of the http server."""
128
- return Response(status_code=200)
129
-
130
-
131
- @app.get("/health_generate")
132
- async def health_generate(request: Request) -> Response:
133
- """Check the health of the inference server by generating one token."""
134
-
135
- sampling_params = {"max_new_tokens": 1, "temperature": 0.7}
136
-
137
- if tokenizer_manager.is_generation:
138
- gri = GenerateReqInput(input_ids=[0], sampling_params=sampling_params)
139
- else:
140
- gri = EmbeddingReqInput(input_ids=[0], sampling_params=sampling_params)
141
-
142
- try:
143
- async for _ in tokenizer_manager.generate_request(gri, request):
144
- break
145
- return Response(status_code=200)
146
- except Exception as e:
147
- logger.exception(e)
148
- return Response(status_code=503)
149
-
150
-
151
- @app.get("/get_model_info")
152
- async def get_model_info():
153
- """Get the model information."""
154
- result = {
155
- "model_path": tokenizer_manager.model_path,
156
- "tokenizer_path": tokenizer_manager.server_args.tokenizer_path,
157
- "is_generation": tokenizer_manager.is_generation,
158
- }
159
- return result
160
-
161
-
162
- @app.get("/get_server_info")
163
- async def get_server_info():
164
- return {
165
- **dataclasses.asdict(tokenizer_manager.server_args),
166
- **scheduler_info,
167
- "version": __version__,
168
- }
169
-
170
-
171
- # fastapi implicitly converts json in the request to obj (dataclass)
172
- @app.api_route("/generate", methods=["POST", "PUT"])
173
- @time_func_latency
174
- async def generate_request(obj: GenerateReqInput, request: Request):
175
- """Handle a generate request."""
176
- if obj.stream:
177
-
178
- async def stream_results() -> AsyncIterator[bytes]:
179
- try:
180
- async for out in tokenizer_manager.generate_request(obj, request):
181
- yield b"data: " + orjson.dumps(
182
- out, option=orjson.OPT_NON_STR_KEYS
183
- ) + b"\n\n"
184
- except ValueError as e:
185
- out = {"error": {"message": str(e)}}
186
- yield b"data: " + orjson.dumps(
187
- out, option=orjson.OPT_NON_STR_KEYS
188
- ) + b"\n\n"
189
- yield b"data: [DONE]\n\n"
190
-
191
- return StreamingResponse(
192
- stream_results(),
193
- media_type="text/event-stream",
194
- background=tokenizer_manager.create_abort_task(obj),
195
- )
196
- else:
197
- try:
198
- ret = await tokenizer_manager.generate_request(obj, request).__anext__()
199
- return ret
200
- except ValueError as e:
201
- logger.error(f"Error: {e}")
202
- return _create_error_response(e)
203
-
204
-
205
- @app.api_route("/encode", methods=["POST", "PUT"])
206
- @time_func_latency
207
- async def encode_request(obj: EmbeddingReqInput, request: Request):
208
- """Handle an embedding request."""
209
- try:
210
- ret = await tokenizer_manager.generate_request(obj, request).__anext__()
211
- return ret
212
- except ValueError as e:
213
- return _create_error_response(e)
214
-
215
-
216
- @app.api_route("/classify", methods=["POST", "PUT"])
217
- @time_func_latency
218
- async def classify_request(obj: EmbeddingReqInput, request: Request):
219
- """Handle a reward model request. Now the arguments and return values are the same as embedding models."""
220
- try:
221
- ret = await tokenizer_manager.generate_request(obj, request).__anext__()
222
- return ret
223
- except ValueError as e:
224
- return _create_error_response(e)
225
-
226
-
227
- @app.post("/flush_cache")
228
- async def flush_cache():
229
- """Flush the radix cache."""
230
- tokenizer_manager.flush_cache()
231
- return Response(
232
- content="Cache flushed.\nPlease check backend logs for more details. "
233
- "(When there are running or waiting requests, the operation will not be performed.)\n",
234
- status_code=200,
235
- )
236
-
237
-
238
- @app.api_route("/start_profile", methods=["GET", "POST"])
239
- async def start_profile_async():
240
- """Start profiling."""
241
- tokenizer_manager.start_profile()
242
- return Response(
243
- content="Start profiling.\n",
244
- status_code=200,
245
- )
246
-
247
-
248
- @app.api_route("/stop_profile", methods=["GET", "POST"])
249
- async def stop_profile_async():
250
- """Stop profiling."""
251
- tokenizer_manager.stop_profile()
252
- return Response(
253
- content="Stop profiling. This will take some time.\n",
254
- status_code=200,
255
- )
256
-
257
-
258
- @app.post("/update_weights_from_disk")
259
- @time_func_latency
260
- async def update_weights_from_disk(obj: UpdateWeightFromDiskReqInput, request: Request):
261
- """Update the weights from disk in-place without re-launching the server."""
262
- success, message = await tokenizer_manager.update_weights_from_disk(obj, request)
263
- content = {"success": success, "message": message}
264
- if success:
265
- return ORJSONResponse(
266
- content,
267
- status_code=HTTPStatus.OK,
268
- )
269
- else:
270
- return ORJSONResponse(
271
- content,
272
- status_code=HTTPStatus.BAD_REQUEST,
273
- )
274
-
275
-
276
- @app.post("/init_weights_update_group")
277
- async def init_weights_update_group(
278
- obj: InitWeightsUpdateGroupReqInput, request: Request
279
- ):
280
- """Initialize the parameter update group."""
281
- success, message = await tokenizer_manager.init_weights_update_group(obj, request)
282
- content = {"success": success, "message": message}
283
- if success:
284
- return ORJSONResponse(content, status_code=200)
285
- else:
286
- return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
287
-
288
-
289
- @app.post("/update_weights_from_distributed")
290
- async def update_weights_from_distributed(
291
- obj: UpdateWeightsFromDistributedReqInput, request: Request
292
- ):
293
- """Update model parameter from distributed online."""
294
- success, message = await tokenizer_manager.update_weights_from_distributed(
295
- obj, request
296
- )
297
- content = {"success": success, "message": message}
298
- if success:
299
- return ORJSONResponse(content, status_code=200)
300
- else:
301
- return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
302
-
303
-
304
- @app.api_route("/get_weights_by_name", methods=["GET", "POST"])
305
- async def get_weights_by_name(obj: GetWeightsByNameReqInput, request: Request):
306
- """Get model parameter by name."""
307
- try:
308
- ret = await tokenizer_manager.get_weights_by_name(obj, request)
309
- if ret is None:
310
- return _create_error_response("Get parameter by name failed")
311
- else:
312
- return ORJSONResponse(ret, status_code=200)
313
- except Exception as e:
314
- return _create_error_response(e)
315
-
316
-
317
- @app.api_route("/release_memory_occupation", methods=["GET", "POST"])
318
- async def release_memory_occupation(
319
- obj: ReleaseMemoryOccupationReqInput, request: Request
320
- ):
321
- """Release GPU occupation temporarily"""
322
- try:
323
- await tokenizer_manager.release_memory_occupation(obj, request)
324
- except Exception as e:
325
- return _create_error_response(e)
326
-
327
-
328
- @app.api_route("/resume_memory_occupation", methods=["GET", "POST"])
329
- async def resume_memory_occupation(
330
- obj: ResumeMemoryOccupationReqInput, request: Request
331
- ):
332
- """Resume GPU occupation"""
333
- try:
334
- await tokenizer_manager.resume_memory_occupation(obj, request)
335
- except Exception as e:
336
- return _create_error_response(e)
337
-
338
-
339
- @app.api_route("/open_session", methods=["GET", "POST"])
340
- async def open_session(obj: OpenSessionReqInput, request: Request):
341
- """Open a session, and return its unique session id."""
342
- try:
343
- session_id = await tokenizer_manager.open_session(obj, request)
344
- if session_id is None:
345
- raise Exception(
346
- "Failed to open the session. Check if a session with the same id is still open."
347
- )
348
- return session_id
349
- except Exception as e:
350
- return _create_error_response(e)
351
-
352
-
353
- @app.api_route("/close_session", methods=["GET", "POST"])
354
- async def close_session(obj: CloseSessionReqInput, request: Request):
355
- """Close the session"""
356
- try:
357
- await tokenizer_manager.close_session(obj, request)
358
- return Response(status_code=200)
359
- except Exception as e:
360
- return _create_error_response(e)
361
-
362
-
363
- @app.api_route("/configure_logging", methods=["GET", "POST"])
364
- async def configure_logging(obj: ConfigureLoggingReq, request: Request):
365
- """Close the session"""
366
- tokenizer_manager.configure_logging(obj)
367
- return Response(status_code=200)
368
-
369
-
370
- ##### OpenAI-compatible API endpoints #####
371
-
372
-
373
- @app.post("/v1/completions")
374
- @time_func_latency
375
- async def openai_v1_completions(raw_request: Request):
376
- return await v1_completions(tokenizer_manager, raw_request)
377
-
378
-
379
- @app.post("/v1/chat/completions")
380
- @time_func_latency
381
- async def openai_v1_chat_completions(raw_request: Request):
382
- return await v1_chat_completions(tokenizer_manager, raw_request)
383
-
384
-
385
- @app.post("/v1/embeddings", response_class=ORJSONResponse)
386
- @time_func_latency
387
- async def openai_v1_embeddings(raw_request: Request):
388
- response = await v1_embeddings(tokenizer_manager, raw_request)
389
- return response
390
-
391
-
392
- @app.get("/v1/models", response_class=ORJSONResponse)
393
- def available_models():
394
- """Show available models."""
395
- served_model_names = [tokenizer_manager.served_model_name]
396
- model_cards = []
397
- for served_model_name in served_model_names:
398
- model_cards.append(ModelCard(id=served_model_name, root=served_model_name))
399
- return ModelList(data=model_cards)
400
-
401
-
402
- @app.post("/v1/files")
403
- async def openai_v1_files(file: UploadFile = File(...), purpose: str = Form("batch")):
404
- return await v1_files_create(
405
- file, purpose, tokenizer_manager.server_args.file_storage_pth
406
- )
407
-
408
-
409
- @app.delete("/v1/files/{file_id}")
410
- async def delete_file(file_id: str):
411
- # https://platform.openai.com/docs/api-reference/files/delete
412
- return await v1_delete_file(file_id)
413
-
414
-
415
- @app.post("/v1/batches")
416
- async def openai_v1_batches(raw_request: Request):
417
- return await v1_batches(tokenizer_manager, raw_request)
418
-
419
-
420
- @app.post("/v1/batches/{batch_id}/cancel")
421
- async def cancel_batches(batch_id: str):
422
- # https://platform.openai.com/docs/api-reference/batch/cancel
423
- return await v1_cancel_batch(tokenizer_manager, batch_id)
424
-
425
-
426
- @app.get("/v1/batches/{batch_id}")
427
- async def retrieve_batch(batch_id: str):
428
- return await v1_retrieve_batch(batch_id)
429
-
430
-
431
- @app.get("/v1/files/{file_id}")
432
- async def retrieve_file(file_id: str):
433
- # https://platform.openai.com/docs/api-reference/files/retrieve
434
- return await v1_retrieve_file(file_id)
435
-
436
-
437
- @app.get("/v1/files/{file_id}/content")
438
- async def retrieve_file_content(file_id: str):
439
- # https://platform.openai.com/docs/api-reference/files/retrieve-contents
440
- return await v1_retrieve_file_content(file_id)
441
-
442
-
443
- def _create_error_response(e):
444
- return ORJSONResponse(
445
- {"error": {"message": str(e)}}, status_code=HTTPStatus.BAD_REQUEST
446
- )
447
-
448
-
449
- def launch_engine(
450
- server_args: ServerArgs,
451
- ):
452
- """
453
- Launch the TokenizerManager in the main process, the Scheduler in a subprocess, and the DetokenizerManager in another subprocess.
454
- """
455
-
456
- global tokenizer_manager
457
- global scheduler_info
458
-
459
- # Configure global environment
460
- configure_logger(server_args)
461
- server_args.check_server_args()
462
- _set_envs_and_config(server_args)
463
-
464
- # Allocate ports for inter-process communications
465
- port_args = PortArgs.init_new(server_args)
466
- logger.info(f"{server_args=}")
467
-
468
- # If using model from www.modelscope.cn, first download the model.
469
- server_args.model_path, server_args.tokenizer_path = prepare_model_and_tokenizer(
470
- server_args.model_path, server_args.tokenizer_path
471
- )
472
-
473
- memory_saver_adapter = TorchMemorySaverAdapter.create(
474
- enable=server_args.enable_memory_saver
475
- )
476
-
477
- if server_args.dp_size == 1:
478
- # Launch tensor parallel scheduler processes
479
- scheduler_procs = []
480
- scheduler_pipe_readers = []
481
- tp_size_per_node = server_args.tp_size // server_args.nnodes
482
- tp_rank_range = range(
483
- tp_size_per_node * server_args.node_rank,
484
- tp_size_per_node * (server_args.node_rank + 1),
485
- )
486
- for tp_rank in tp_rank_range:
487
- reader, writer = mp.Pipe(duplex=False)
488
- gpu_id = server_args.base_gpu_id + tp_rank % tp_size_per_node
489
- proc = mp.Process(
490
- target=run_scheduler_process,
491
- args=(server_args, port_args, gpu_id, tp_rank, None, writer),
492
- )
493
- with memory_saver_adapter.configure_subprocess():
494
- proc.start()
495
- scheduler_procs.append(proc)
496
- scheduler_pipe_readers.append(reader)
497
-
498
- if server_args.node_rank >= 1:
499
- # For other nodes, they do not need to run tokenizer or detokenizer,
500
- # so they can just wait here.
501
- for proc in scheduler_procs:
502
- proc.join()
503
- else:
504
- # Launch the data parallel controller
505
- reader, writer = mp.Pipe(duplex=False)
506
- scheduler_pipe_readers = [reader]
507
- proc = mp.Process(
508
- target=run_data_parallel_controller_process,
509
- args=(server_args, port_args, writer),
510
- )
511
- with memory_saver_adapter.configure_subprocess():
512
- proc.start()
513
-
514
- # Launch detokenizer process
515
- detoken_proc = mp.Process(
516
- target=run_detokenizer_process,
517
- args=(
518
- server_args,
519
- port_args,
520
- ),
521
- )
522
- detoken_proc.start()
523
-
524
- # Launch tokenizer process
525
- tokenizer_manager = TokenizerManager(server_args, port_args)
526
- if server_args.chat_template:
527
- load_chat_template_for_openai_api(tokenizer_manager, server_args.chat_template)
528
-
529
- # Wait for model to finish loading
530
- scheduler_infos = []
531
- for i in range(len(scheduler_pipe_readers)):
532
- try:
533
- data = scheduler_pipe_readers[i].recv()
534
- except EOFError as e:
535
- logger.exception(e)
536
- logger.error(
537
- f"Rank {i} scheduler is dead. Please check if there are relevant logs."
538
- )
539
- scheduler_procs[i].join()
540
- logger.error(f"Exit code: {scheduler_procs[i].exitcode}")
541
- raise
542
-
543
- if data["status"] != "ready":
544
- raise RuntimeError(
545
- "Initialization failed. Please see the error messages above."
546
- )
547
- scheduler_infos.append(data)
548
-
549
- # Assume all schedulers have same scheduler_info
550
- scheduler_info = scheduler_infos[0]
551
-
552
-
553
- def launch_server(
554
- server_args: ServerArgs,
555
- pipe_finish_writer: Optional[mp.connection.Connection] = None,
556
- ):
557
- """
558
- Launch SRT (SGLang Runtime) Server
559
-
560
- The SRT server consists of an HTTP server and the SRT engine.
561
-
562
- 1. HTTP server: A FastAPI server that routes requests to the engine.
563
- 2. SRT engine:
564
- 1. TokenizerManager: Tokenizes the requests and sends them to the scheduler.
565
- 2. Scheduler (subprocess): Receives requests from the Tokenizer Manager, schedules batches, forwards them, and sends the output tokens to the Detokenizer Manager.
566
- 3. DetokenizerManager (subprocess): Detokenizes the output tokens and sends the result back to the Tokenizer Manager.
567
-
568
- Note:
569
- 1. The HTTP server and TokenizerManager both run in the main process.
570
- 2. Inter-process communication is done through ICP (each process uses a different port) via the ZMQ library.
571
- """
572
- launch_engine(server_args=server_args)
573
-
574
- # Add api key authorization
575
- if server_args.api_key:
576
- add_api_key_middleware(app, server_args.api_key)
577
-
578
- # Add prometheus middleware
579
- if server_args.enable_metrics:
580
- add_prometheus_middleware(app)
581
- enable_func_timer()
582
-
583
- # Send a warmup request
584
- t = threading.Thread(
585
- target=_wait_and_warmup,
586
- args=(
587
- server_args,
588
- pipe_finish_writer,
589
- tokenizer_manager.image_token_id,
590
- ),
591
- )
592
- t.start()
593
-
594
- try:
595
- # Update logging configs
596
- LOGGING_CONFIG["formatters"]["default"][
597
- "fmt"
598
- ] = "[%(asctime)s] %(levelprefix)s %(message)s"
599
- LOGGING_CONFIG["formatters"]["default"]["datefmt"] = "%Y-%m-%d %H:%M:%S"
600
- LOGGING_CONFIG["formatters"]["access"][
601
- "fmt"
602
- ] = '[%(asctime)s] %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'
603
- LOGGING_CONFIG["formatters"]["access"]["datefmt"] = "%Y-%m-%d %H:%M:%S"
604
-
605
- # Listen for HTTP requests
606
- uvicorn.run(
607
- app,
608
- host=server_args.host,
609
- port=server_args.port,
610
- log_level=server_args.log_level_http or server_args.log_level,
611
- timeout_keep_alive=5,
612
- loop="uvloop",
613
- )
614
- finally:
615
- t.join()
616
-
617
-
618
- def _set_envs_and_config(server_args: ServerArgs):
619
- # Set global environments
620
- os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
621
- os.environ["NCCL_CUMEM_ENABLE"] = "0"
622
- os.environ["NCCL_NVLS_ENABLE"] = "0"
623
- os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
624
- os.environ["CUDA_DEVICE_MAX_CONNECTIONS"] = "4"
625
-
626
- # Set prometheus env vars
627
- if server_args.enable_metrics:
628
- set_prometheus_multiproc_dir()
629
-
630
- # Set ulimit
631
- set_ulimit()
632
-
633
- # Fix triton bugs
634
- if server_args.tp_size * server_args.dp_size > 1:
635
- # FIXME: remove this after https://github.com/triton-lang/triton/pull/4295 is used as a dependency.
636
- maybe_set_triton_cache_manager()
637
-
638
- # Check flashinfer version
639
- if server_args.attention_backend == "flashinfer":
640
- assert_pkg_version(
641
- "flashinfer",
642
- "0.1.6",
643
- "Please uninstall the old version and "
644
- "reinstall the latest version by following the instructions "
645
- "at https://docs.flashinfer.ai/installation.html.",
646
- )
647
-
648
- # Register the signal handler.
649
- # The child processes will send SIGQUIT to this process when any error happens
650
- # This process then clean up the whole process tree
651
- def sigquit_handler(signum, frame):
652
- logger.error(
653
- "Received sigquit from a child proces. It usually means the child failed."
654
- )
655
- kill_process_tree(os.getpid())
656
-
657
- signal.signal(signal.SIGQUIT, sigquit_handler)
658
-
659
- # Set mp start method
660
- mp.set_start_method("spawn", force=True)
661
-
662
-
663
- def _wait_and_warmup(server_args, pipe_finish_writer, image_token_text):
664
- headers = {}
665
- url = server_args.url()
666
- if server_args.api_key:
667
- headers["Authorization"] = f"Bearer {server_args.api_key}"
668
-
669
- # Wait until the server is launched
670
- success = False
671
- for _ in range(120):
672
- time.sleep(1)
673
- try:
674
- res = requests.get(url + "/get_model_info", timeout=5, headers=headers)
675
- assert res.status_code == 200, f"{res=}, {res.text=}"
676
- success = True
677
- break
678
- except (AssertionError, requests.exceptions.RequestException):
679
- last_traceback = get_exception_traceback()
680
- pass
681
-
682
- if not success:
683
- if pipe_finish_writer is not None:
684
- pipe_finish_writer.send(last_traceback)
685
- logger.error(f"Initialization failed. warmup error: {last_traceback}")
686
- kill_process_tree(os.getpid())
687
- return
688
-
689
- model_info = res.json()
690
-
691
- # Send a warmup request
692
- request_name = "/generate" if model_info["is_generation"] else "/encode"
693
- max_new_tokens = 8 if model_info["is_generation"] else 1
694
- json_data = {
695
- "sampling_params": {
696
- "temperature": 0,
697
- "max_new_tokens": max_new_tokens,
698
- },
699
- }
700
- if server_args.skip_tokenizer_init:
701
- json_data["input_ids"] = [10, 11, 12]
702
- else:
703
- json_data["text"] = "The capital city of France is"
704
-
705
- try:
706
- for _ in range(server_args.dp_size):
707
- res = requests.post(
708
- url + request_name,
709
- json=json_data,
710
- headers=headers,
711
- timeout=600,
712
- )
713
- assert res.status_code == 200, f"{res}"
714
- except Exception:
715
- last_traceback = get_exception_traceback()
716
- if pipe_finish_writer is not None:
717
- pipe_finish_writer.send(last_traceback)
718
- logger.error(f"Initialization failed. warmup error: {last_traceback}")
719
- kill_process_tree(os.getpid())
720
- return
721
-
722
- # Debug print
723
- # logger.info(f"{res.json()=}")
724
-
725
- logger.info("The server is fired up and ready to roll!")
726
- if pipe_finish_writer is not None:
727
- pipe_finish_writer.send("ready")
728
-
729
- if server_args.delete_ckpt_after_loading:
730
- delete_directory(server_args.model_path)
731
-
732
-
733
- STREAM_END_SYMBOL = b"data: [DONE]"
734
- STREAM_CHUNK_START_SYMBOL = b"data:"
735
-
736
-
737
- class Engine:
738
- """
739
- SRT Engine without an HTTP server layer.
740
-
741
- This class provides a direct inference engine without the need for an HTTP server. It is designed for use cases where
742
- launching the HTTP server adds unnecessary complexity or overhead,
743
- """
744
-
745
- def __init__(self, log_level: str = "error", *args, **kwargs):
746
- """See the arguments in server_args.py::ServerArgs"""
747
-
748
- # before python program terminates, call shutdown implicitly. Therefore, users don't have to explicitly call .shutdown()
749
- atexit.register(self.shutdown)
750
-
751
- server_args = ServerArgs(*args, log_level=log_level, **kwargs)
752
- launch_engine(server_args=server_args)
753
-
754
- def generate(
755
- self,
756
- # The input prompt. It can be a single prompt or a batch of prompts.
757
- prompt: Optional[Union[List[str], str]] = None,
758
- sampling_params: Optional[Union[List[Dict], Dict]] = None,
759
- # The token ids for text; one can either specify text or input_ids.
760
- input_ids: Optional[Union[List[List[int]], List[int]]] = None,
761
- return_logprob: Optional[Union[List[bool], bool]] = False,
762
- logprob_start_len: Optional[Union[List[int], int]] = None,
763
- top_logprobs_num: Optional[Union[List[int], int]] = None,
764
- lora_path: Optional[List[Optional[str]]] = None,
765
- stream: bool = False,
766
- ):
767
- obj = GenerateReqInput(
768
- text=prompt,
769
- input_ids=input_ids,
770
- sampling_params=sampling_params,
771
- return_logprob=return_logprob,
772
- logprob_start_len=logprob_start_len,
773
- top_logprobs_num=top_logprobs_num,
774
- lora_path=lora_path,
775
- stream=stream,
776
- )
777
-
778
- # get the current event loop
779
- loop = asyncio.get_event_loop()
780
- ret = loop.run_until_complete(generate_request(obj, None))
781
-
782
- if stream is True:
783
-
784
- def generator_wrapper():
785
- offset = 0
786
- loop = asyncio.get_event_loop()
787
- generator = ret.body_iterator
788
- while True:
789
- chunk = loop.run_until_complete(generator.__anext__())
790
-
791
- if chunk.startswith(STREAM_END_SYMBOL):
792
- break
793
- else:
794
- data = json.loads(chunk[len(STREAM_CHUNK_START_SYMBOL) :])
795
- data["text"] = data["text"][offset:]
796
- offset += len(data["text"])
797
- yield data
798
-
799
- # we cannot yield in the scope of generate() because python does not allow yield + return in the same function
800
- # however, it allows to wrap the generator as a subfunction and return
801
- return generator_wrapper()
802
- else:
803
- return ret
804
-
805
- async def async_generate(
806
- self,
807
- # The input prompt. It can be a single prompt or a batch of prompts.
808
- prompt: Optional[Union[List[str], str]] = None,
809
- sampling_params: Optional[Dict] = None,
810
- # The token ids for text; one can either specify text or input_ids.
811
- input_ids: Optional[Union[List[List[int]], List[int]]] = None,
812
- return_logprob: Optional[Union[List[bool], bool]] = False,
813
- logprob_start_len: Optional[Union[List[int], int]] = None,
814
- top_logprobs_num: Optional[Union[List[int], int]] = None,
815
- lora_path: Optional[List[Optional[str]]] = None,
816
- stream: bool = False,
817
- ):
818
- obj = GenerateReqInput(
819
- text=prompt,
820
- input_ids=input_ids,
821
- sampling_params=sampling_params,
822
- return_logprob=return_logprob,
823
- logprob_start_len=logprob_start_len,
824
- top_logprobs_num=top_logprobs_num,
825
- lora_path=lora_path,
826
- stream=stream,
827
- )
828
-
829
- ret = await generate_request(obj, None)
830
-
831
- if stream is True:
832
- generator = ret.body_iterator
833
-
834
- async def generator_wrapper():
835
-
836
- offset = 0
837
-
838
- while True:
839
- chunk = await generator.__anext__()
840
-
841
- if chunk.startswith(STREAM_END_SYMBOL):
842
- break
843
- else:
844
- data = json.loads(chunk[len(STREAM_CHUNK_START_SYMBOL) :])
845
- data["text"] = data["text"][offset:]
846
- offset += len(data["text"])
847
- yield data
848
-
849
- return generator_wrapper()
850
- else:
851
- return ret
852
-
853
- def shutdown(self):
854
- kill_process_tree(os.getpid(), include_parent=False)
855
-
856
- def get_tokenizer(self):
857
- global tokenizer_manager
858
-
859
- if tokenizer_manager is None:
860
- raise ReferenceError("Tokenizer Manager is not initialized.")
861
- else:
862
- return tokenizer_manager.tokenizer
863
-
864
- def encode(
865
- self,
866
- prompt: Union[str, List[str], List[Dict], List[List[Dict]]],
867
- ):
868
- obj = EmbeddingReqInput(text=prompt)
869
-
870
- # get the current event loop
871
- loop = asyncio.get_event_loop()
872
- return loop.run_until_complete(encode_request(obj, None))
873
-
874
- def start_profile(self):
875
- tokenizer_manager.start_profile()
876
-
877
- def stop_profile(self):
878
- tokenizer_manager.stop_profile()
879
-
880
- def get_server_info(self):
881
- return {
882
- **dataclasses.asdict(tokenizer_manager.server_args), # server args
883
- **scheduler_info,
884
- "version": __version__,
885
- }
886
-
887
- def init_weights_update_group(
888
- self,
889
- master_address: str,
890
- master_port: int,
891
- rank_offset: int,
892
- world_size: int,
893
- group_name: str,
894
- backend: str = "nccl",
895
- ):
896
- """Initialize parameter update group."""
897
- obj = InitWeightsUpdateGroupReqInput(
898
- master_address=master_address,
899
- master_port=master_port,
900
- rank_offset=rank_offset,
901
- world_size=world_size,
902
- group_name=group_name,
903
- backend=backend,
904
- )
905
- loop = asyncio.get_event_loop()
906
- return loop.run_until_complete(
907
- tokenizer_manager.init_weights_update_group(obj, None)
908
- )
909
-
910
- def update_weights_from_distributed(self, name, dtype, shape):
911
- """Update weights from distributed source."""
912
- obj = UpdateWeightsFromDistributedReqInput(
913
- name=name,
914
- dtype=dtype,
915
- shape=shape,
916
- )
917
- loop = asyncio.get_event_loop()
918
- return loop.run_until_complete(
919
- tokenizer_manager.update_weights_from_distributed(obj, None)
920
- )
921
-
922
- def update_weights_from_tensor(self, named_tensors: List[Tuple[str, torch.Tensor]]):
923
- """Update weights from distributed source."""
924
- obj = UpdateWeightsFromTensorReqInput(
925
- serialized_named_tensors=MultiprocessingSerializer.serialize(named_tensors)
926
- )
927
- loop = asyncio.get_event_loop()
928
- return loop.run_until_complete(
929
- tokenizer_manager.update_weights_from_tensor(obj, None)
930
- )
931
-
932
- def get_weights_by_name(self, name, truncate_size=100):
933
- """Get weights by parameter name."""
934
- obj = GetWeightsByNameReqInput(name=name, truncate_size=truncate_size)
935
- loop = asyncio.get_event_loop()
936
- return loop.run_until_complete(tokenizer_manager.get_weights_by_name(obj, None))
937
-
938
- def release_memory_occupation(self):
939
- """Release GPU occupation temporarily"""
940
- obj = ReleaseMemoryOccupationReqInput()
941
- loop = asyncio.get_event_loop()
942
- loop.run_until_complete(tokenizer_manager.release_memory_occupation(obj, None))
943
-
944
- def resume_memory_occupation(self):
945
- """Resume GPU occupation"""
946
- obj = ResumeMemoryOccupationReqInput()
947
- loop = asyncio.get_event_loop()
948
- loop.run_until_complete(tokenizer_manager.resume_memory_occupation(obj, None))
949
-
950
-
951
- class Runtime:
952
- """
953
- A wrapper for the HTTP server.
954
- This is used for launching the server in a python program without
955
- using the commond line interface.
956
-
957
- It is mainly used for the frontend language.
958
- You should use the Engine class above if you want to do normal offline processing.
959
- """
960
-
961
- def __init__(
962
- self,
963
- log_level: str = "error",
964
- *args,
965
- **kwargs,
966
- ):
967
- """See the arguments in server_args.py::ServerArgs"""
968
- self.server_args = ServerArgs(*args, log_level=log_level, **kwargs)
969
-
970
- # before python program terminates, call shutdown implicitly. Therefore, users don't have to explicitly call .shutdown()
971
- atexit.register(self.shutdown)
972
-
973
- # Pre-allocate ports
974
- for port in range(self.server_args.port, 40000):
975
- if is_port_available(port):
976
- break
977
- self.server_args.port = port
978
-
979
- self.url = self.server_args.url()
980
- self.generate_url = self.url + "/generate"
981
-
982
- # NOTE: We store pid instead of proc to fix some issues during __delete__
983
- self.pid = None
984
- pipe_reader, pipe_writer = mp.Pipe(duplex=False)
985
-
986
- proc = mp.Process(
987
- target=launch_server,
988
- args=(self.server_args, pipe_writer),
989
- )
990
- proc.start()
991
- pipe_writer.close()
992
- self.pid = proc.pid
993
-
994
- try:
995
- init_state = pipe_reader.recv()
996
- except EOFError:
997
- init_state = ""
998
-
999
- if init_state != "ready":
1000
- self.shutdown()
1001
- raise RuntimeError(
1002
- "Initialization failed. Please see the error messages above."
1003
- )
1004
-
1005
- self.endpoint = RuntimeEndpoint(self.url)
1006
-
1007
- def shutdown(self):
1008
- if self.pid is not None:
1009
- kill_process_tree(self.pid)
1010
- self.pid = None
1011
-
1012
- def cache_prefix(self, prefix: str):
1013
- self.endpoint.cache_prefix(prefix)
1014
-
1015
- def get_tokenizer(self):
1016
- return get_tokenizer(
1017
- self.server_args.tokenizer_path,
1018
- tokenizer_mode=self.server_args.tokenizer_mode,
1019
- trust_remote_code=self.server_args.trust_remote_code,
1020
- )
1021
-
1022
- async def async_generate(
1023
- self,
1024
- prompt: str,
1025
- sampling_params: Optional[Dict] = None,
1026
- ):
1027
- if self.server_args.skip_tokenizer_init:
1028
- json_data = {
1029
- "input_ids": prompt,
1030
- "sampling_params": sampling_params,
1031
- "stream": True,
1032
- }
1033
- else:
1034
- json_data = {
1035
- "text": prompt,
1036
- "sampling_params": sampling_params,
1037
- "stream": True,
1038
- }
1039
- pos = 0
1040
-
1041
- timeout = aiohttp.ClientTimeout(total=3 * 3600)
1042
- async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
1043
- async with session.post(self.generate_url, json=json_data) as response:
1044
- async for chunk, _ in response.content.iter_chunks():
1045
- chunk = chunk.decode("utf-8")
1046
- if chunk and chunk.startswith("data:"):
1047
- if chunk == "data: [DONE]\n\n":
1048
- break
1049
- data = json.loads(chunk[5:].strip("\n"))
1050
- if "text" in data:
1051
- cur = data["text"][pos:]
1052
- if cur:
1053
- yield cur
1054
- pos += len(cur)
1055
- else:
1056
- yield data
1057
-
1058
- add_request = async_generate
1059
-
1060
- def generate(
1061
- self,
1062
- prompt: Union[str, List[str]],
1063
- sampling_params: Optional[Dict] = None,
1064
- return_logprob: Optional[Union[List[bool], bool]] = False,
1065
- logprob_start_len: Optional[Union[List[int], int]] = None,
1066
- top_logprobs_num: Optional[Union[List[int], int]] = None,
1067
- lora_path: Optional[List[Optional[str]]] = None,
1068
- ):
1069
- json_data = {
1070
- "text": prompt,
1071
- "sampling_params": sampling_params,
1072
- "return_logprob": return_logprob,
1073
- "logprob_start_len": logprob_start_len,
1074
- "top_logprobs_num": top_logprobs_num,
1075
- "lora_path": lora_path,
1076
- }
1077
- assert not isinstance(lora_path, list) or len(lora_path) == len(prompt)
1078
- response = requests.post(
1079
- self.url + "/generate",
1080
- json=json_data,
1081
- )
1082
- return json.dumps(response.json())
1083
-
1084
- def encode(
1085
- self,
1086
- prompt: Union[str, List[str], List[Dict], List[List[Dict]]],
1087
- ):
1088
- json_data = {"text": prompt}
1089
- response = requests.post(self.url + "/encode", json=json_data)
1090
- return json.dumps(response.json())
1091
-
1092
- async def get_server_info(self):
1093
- async with aiohttp.ClientSession() as session:
1094
- async with session.get(f"{self.url}/get_server_info") as response:
1095
- if response.status == 200:
1096
- return await response.json()
1097
- else:
1098
- error_data = await response.json()
1099
- raise RuntimeError(
1100
- f"Failed to get server info. {error_data['error']['message']}"
1101
- )
1102
-
1103
- def __del__(self):
1104
- self.shutdown()
15
+ # Some shortcuts for backward compatibility.
16
+ # They will be removed in new versions.
17
+ from sglang.srt.entrypoints.engine import Engine
18
+ from sglang.srt.entrypoints.http_server import kill_process_tree, launch_server