sglang 0.4.1.post6__py3-none-any.whl → 0.4.1.post7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/__init__.py +21 -23
- sglang/api.py +2 -7
- sglang/bench_offline_throughput.py +24 -16
- sglang/bench_one_batch.py +51 -3
- sglang/bench_one_batch_server.py +1 -1
- sglang/bench_serving.py +37 -28
- sglang/lang/backend/runtime_endpoint.py +183 -4
- sglang/lang/chat_template.py +15 -4
- sglang/launch_server.py +1 -1
- sglang/srt/_custom_ops.py +80 -42
- sglang/srt/configs/device_config.py +1 -1
- sglang/srt/configs/model_config.py +1 -0
- sglang/srt/constrained/base_grammar_backend.py +21 -0
- sglang/srt/constrained/xgrammar_backend.py +8 -4
- sglang/srt/conversation.py +14 -1
- sglang/srt/distributed/__init__.py +3 -3
- sglang/srt/distributed/communication_op.py +2 -1
- sglang/srt/distributed/device_communicators/cuda_wrapper.py +2 -1
- sglang/srt/distributed/device_communicators/custom_all_reduce.py +107 -40
- sglang/srt/distributed/device_communicators/custom_all_reduce_utils.py +2 -2
- sglang/srt/distributed/device_communicators/hpu_communicator.py +2 -1
- sglang/srt/distributed/device_communicators/pynccl.py +80 -1
- sglang/srt/distributed/device_communicators/pynccl_wrapper.py +112 -2
- sglang/srt/distributed/device_communicators/shm_broadcast.py +5 -72
- sglang/srt/distributed/device_communicators/xpu_communicator.py +2 -1
- sglang/srt/distributed/parallel_state.py +1 -1
- sglang/srt/distributed/utils.py +2 -1
- sglang/srt/entrypoints/engine.py +449 -0
- sglang/srt/entrypoints/http_server.py +579 -0
- sglang/srt/layers/activation.py +3 -3
- sglang/srt/layers/attention/flashinfer_backend.py +10 -9
- sglang/srt/layers/attention/triton_backend.py +4 -6
- sglang/srt/layers/attention/vision.py +204 -0
- sglang/srt/layers/dp_attention.py +69 -0
- sglang/srt/layers/linear.py +41 -5
- sglang/srt/layers/logits_processor.py +48 -63
- sglang/srt/layers/moe/ep_moe/layer.py +4 -4
- sglang/srt/layers/moe/fused_moe_native.py +69 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe.py +9 -6
- sglang/srt/layers/moe/fused_moe_triton/layer.py +29 -5
- sglang/srt/layers/parameter.py +2 -1
- sglang/srt/layers/quantization/__init__.py +20 -23
- sglang/srt/layers/quantization/fp8.py +6 -3
- sglang/srt/layers/quantization/modelopt_quant.py +1 -2
- sglang/srt/layers/quantization/w8a8_int8.py +1 -1
- sglang/srt/layers/radix_attention.py +2 -2
- sglang/srt/layers/rotary_embedding.py +1179 -31
- sglang/srt/layers/sampler.py +39 -1
- sglang/srt/layers/vocab_parallel_embedding.py +2 -2
- sglang/srt/lora/lora.py +1 -9
- sglang/srt/managers/configure_logging.py +3 -0
- sglang/srt/managers/data_parallel_controller.py +79 -72
- sglang/srt/managers/detokenizer_manager.py +23 -6
- sglang/srt/managers/image_processor.py +158 -2
- sglang/srt/managers/io_struct.py +25 -2
- sglang/srt/managers/schedule_batch.py +49 -22
- sglang/srt/managers/schedule_policy.py +26 -12
- sglang/srt/managers/scheduler.py +277 -178
- sglang/srt/managers/session_controller.py +1 -0
- sglang/srt/managers/tokenizer_manager.py +206 -121
- sglang/srt/managers/tp_worker.py +6 -4
- sglang/srt/managers/tp_worker_overlap_thread.py +5 -8
- sglang/srt/managers/utils.py +44 -0
- sglang/srt/mem_cache/memory_pool.py +10 -32
- sglang/srt/metrics/collector.py +15 -6
- sglang/srt/model_executor/cuda_graph_runner.py +4 -6
- sglang/srt/model_executor/model_runner.py +37 -15
- sglang/srt/model_loader/loader.py +8 -6
- sglang/srt/model_loader/weight_utils.py +55 -2
- sglang/srt/models/baichuan.py +6 -6
- sglang/srt/models/chatglm.py +2 -2
- sglang/srt/models/commandr.py +3 -3
- sglang/srt/models/dbrx.py +4 -4
- sglang/srt/models/deepseek.py +3 -3
- sglang/srt/models/deepseek_v2.py +8 -8
- sglang/srt/models/exaone.py +2 -2
- sglang/srt/models/gemma.py +2 -2
- sglang/srt/models/gemma2.py +6 -24
- sglang/srt/models/gpt2.py +3 -5
- sglang/srt/models/gpt_bigcode.py +1 -1
- sglang/srt/models/granite.py +2 -2
- sglang/srt/models/grok.py +3 -3
- sglang/srt/models/internlm2.py +2 -2
- sglang/srt/models/llama.py +7 -5
- sglang/srt/models/minicpm.py +2 -2
- sglang/srt/models/minicpm3.py +6 -6
- sglang/srt/models/minicpmv.py +1238 -0
- sglang/srt/models/mixtral.py +3 -3
- sglang/srt/models/mixtral_quant.py +3 -3
- sglang/srt/models/mllama.py +2 -2
- sglang/srt/models/olmo.py +3 -3
- sglang/srt/models/olmo2.py +4 -4
- sglang/srt/models/olmoe.py +7 -13
- sglang/srt/models/phi3_small.py +2 -2
- sglang/srt/models/qwen.py +2 -2
- sglang/srt/models/qwen2.py +41 -4
- sglang/srt/models/qwen2_moe.py +3 -3
- sglang/srt/models/qwen2_vl.py +22 -122
- sglang/srt/models/stablelm.py +2 -2
- sglang/srt/models/torch_native_llama.py +3 -3
- sglang/srt/models/xverse.py +6 -6
- sglang/srt/models/xverse_moe.py +6 -6
- sglang/srt/openai_api/protocol.py +2 -0
- sglang/srt/sampling/custom_logit_processor.py +38 -0
- sglang/srt/sampling/sampling_batch_info.py +139 -4
- sglang/srt/sampling/sampling_params.py +3 -1
- sglang/srt/server.py +4 -1090
- sglang/srt/server_args.py +57 -14
- sglang/srt/utils.py +103 -65
- sglang/test/runners.py +8 -13
- sglang/test/test_programs.py +1 -1
- sglang/test/test_utils.py +3 -1
- sglang/utils.py +12 -2
- sglang/version.py +1 -1
- {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/METADATA +16 -5
- {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/RECORD +119 -115
- sglang/launch_server_llavavid.py +0 -25
- sglang/srt/constrained/__init__.py +0 -16
- sglang/srt/distributed/device_communicators/__init__.py +0 -0
- {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/LICENSE +0 -0
- {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/WHEEL +0 -0
- {sglang-0.4.1.post6.dist-info → sglang-0.4.1.post7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,579 @@
|
|
1
|
+
# Copyright 2023-2024 SGLang Team
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
3
|
+
# you may not use this file except in compliance with the License.
|
4
|
+
# You may obtain a copy of the License at
|
5
|
+
#
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
7
|
+
#
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
11
|
+
# See the License for the specific language governing permissions and
|
12
|
+
# limitations under the License.
|
13
|
+
# ==============================================================================
|
14
|
+
"""
|
15
|
+
The entry point of inference server. (SRT = SGLang Runtime)
|
16
|
+
|
17
|
+
This file implements HTTP APIs for the inferenc engine via fastapi.
|
18
|
+
"""
|
19
|
+
|
20
|
+
import asyncio
|
21
|
+
import dataclasses
|
22
|
+
import logging
|
23
|
+
import multiprocessing as multiprocessing
|
24
|
+
import os
|
25
|
+
import threading
|
26
|
+
import time
|
27
|
+
from http import HTTPStatus
|
28
|
+
from typing import AsyncIterator, Dict, Optional
|
29
|
+
|
30
|
+
# Fix a bug of Python threading
|
31
|
+
setattr(threading, "_register_atexit", lambda *args, **kwargs: None)
|
32
|
+
|
33
|
+
import orjson
|
34
|
+
import requests
|
35
|
+
import uvicorn
|
36
|
+
import uvloop
|
37
|
+
from fastapi import FastAPI, File, Form, Request, UploadFile
|
38
|
+
from fastapi.middleware.cors import CORSMiddleware
|
39
|
+
from fastapi.responses import ORJSONResponse, Response, StreamingResponse
|
40
|
+
|
41
|
+
from sglang.srt.entrypoints.engine import _launch_subprocesses
|
42
|
+
from sglang.srt.managers.io_struct import (
|
43
|
+
CloseSessionReqInput,
|
44
|
+
ConfigureLoggingReq,
|
45
|
+
EmbeddingReqInput,
|
46
|
+
GenerateReqInput,
|
47
|
+
GetWeightsByNameReqInput,
|
48
|
+
InitWeightsUpdateGroupReqInput,
|
49
|
+
OpenSessionReqInput,
|
50
|
+
ReleaseMemoryOccupationReqInput,
|
51
|
+
ResumeMemoryOccupationReqInput,
|
52
|
+
UpdateWeightFromDiskReqInput,
|
53
|
+
UpdateWeightsFromDistributedReqInput,
|
54
|
+
)
|
55
|
+
from sglang.srt.managers.tokenizer_manager import TokenizerManager
|
56
|
+
from sglang.srt.metrics.func_timer import enable_func_timer
|
57
|
+
from sglang.srt.openai_api.adapter import (
|
58
|
+
v1_batches,
|
59
|
+
v1_cancel_batch,
|
60
|
+
v1_chat_completions,
|
61
|
+
v1_completions,
|
62
|
+
v1_delete_file,
|
63
|
+
v1_embeddings,
|
64
|
+
v1_files_create,
|
65
|
+
v1_retrieve_batch,
|
66
|
+
v1_retrieve_file,
|
67
|
+
v1_retrieve_file_content,
|
68
|
+
)
|
69
|
+
from sglang.srt.openai_api.protocol import ModelCard, ModelList
|
70
|
+
from sglang.srt.server_args import ServerArgs
|
71
|
+
from sglang.srt.utils import (
|
72
|
+
add_api_key_middleware,
|
73
|
+
add_prometheus_middleware,
|
74
|
+
delete_directory,
|
75
|
+
kill_process_tree,
|
76
|
+
set_uvicorn_logging_configs,
|
77
|
+
)
|
78
|
+
from sglang.utils import get_exception_traceback
|
79
|
+
from sglang.version import __version__
|
80
|
+
|
81
|
+
logger = logging.getLogger(__name__)
|
82
|
+
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
83
|
+
|
84
|
+
# Fast API
|
85
|
+
app = FastAPI()
|
86
|
+
app.add_middleware(
|
87
|
+
CORSMiddleware,
|
88
|
+
allow_origins=["*"],
|
89
|
+
allow_credentials=True,
|
90
|
+
allow_methods=["*"],
|
91
|
+
allow_headers=["*"],
|
92
|
+
)
|
93
|
+
|
94
|
+
|
95
|
+
# Store global states
|
96
|
+
@dataclasses.dataclass
|
97
|
+
class _GlobalState:
|
98
|
+
tokenizer_manager: TokenizerManager
|
99
|
+
scheduler_info: Dict
|
100
|
+
|
101
|
+
|
102
|
+
_global_state: Optional[_GlobalState] = None
|
103
|
+
|
104
|
+
|
105
|
+
def set_global_state(global_state: _GlobalState):
|
106
|
+
global _global_state
|
107
|
+
_global_state = global_state
|
108
|
+
|
109
|
+
|
110
|
+
##### Native API endpoints #####
|
111
|
+
|
112
|
+
|
113
|
+
@app.get("/health")
|
114
|
+
async def health() -> Response:
|
115
|
+
"""Check the health of the http server."""
|
116
|
+
return Response(status_code=200)
|
117
|
+
|
118
|
+
|
119
|
+
@app.get("/health_generate")
|
120
|
+
async def health_generate(request: Request) -> Response:
|
121
|
+
"""Check the health of the inference server by generating one token."""
|
122
|
+
|
123
|
+
sampling_params = {"max_new_tokens": 1, "temperature": 0.7}
|
124
|
+
|
125
|
+
if _global_state.tokenizer_manager.is_generation:
|
126
|
+
gri = GenerateReqInput(
|
127
|
+
input_ids=[0], sampling_params=sampling_params, log_metrics=False
|
128
|
+
)
|
129
|
+
else:
|
130
|
+
gri = EmbeddingReqInput(
|
131
|
+
input_ids=[0], sampling_params=sampling_params, log_metrics=False
|
132
|
+
)
|
133
|
+
|
134
|
+
try:
|
135
|
+
async for _ in _global_state.tokenizer_manager.generate_request(gri, request):
|
136
|
+
break
|
137
|
+
return Response(status_code=200)
|
138
|
+
except Exception as e:
|
139
|
+
logger.exception(e)
|
140
|
+
return Response(status_code=503)
|
141
|
+
|
142
|
+
|
143
|
+
@app.get("/get_model_info")
|
144
|
+
async def get_model_info():
|
145
|
+
"""Get the model information."""
|
146
|
+
result = {
|
147
|
+
"model_path": _global_state.tokenizer_manager.model_path,
|
148
|
+
"tokenizer_path": _global_state.tokenizer_manager.server_args.tokenizer_path,
|
149
|
+
"is_generation": _global_state.tokenizer_manager.is_generation,
|
150
|
+
}
|
151
|
+
return result
|
152
|
+
|
153
|
+
|
154
|
+
@app.get("/get_server_info")
|
155
|
+
async def get_server_info():
|
156
|
+
return {
|
157
|
+
**dataclasses.asdict(_global_state.tokenizer_manager.server_args),
|
158
|
+
**_global_state.scheduler_info,
|
159
|
+
"version": __version__,
|
160
|
+
}
|
161
|
+
|
162
|
+
|
163
|
+
# fastapi implicitly converts json in the request to obj (dataclass)
|
164
|
+
@app.api_route("/generate", methods=["POST", "PUT"])
|
165
|
+
async def generate_request(obj: GenerateReqInput, request: Request):
|
166
|
+
"""Handle a generate request."""
|
167
|
+
if obj.stream:
|
168
|
+
|
169
|
+
async def stream_results() -> AsyncIterator[bytes]:
|
170
|
+
try:
|
171
|
+
async for out in _global_state.tokenizer_manager.generate_request(
|
172
|
+
obj, request
|
173
|
+
):
|
174
|
+
yield b"data: " + orjson.dumps(
|
175
|
+
out, option=orjson.OPT_NON_STR_KEYS
|
176
|
+
) + b"\n\n"
|
177
|
+
except ValueError as e:
|
178
|
+
out = {"error": {"message": str(e)}}
|
179
|
+
yield b"data: " + orjson.dumps(
|
180
|
+
out, option=orjson.OPT_NON_STR_KEYS
|
181
|
+
) + b"\n\n"
|
182
|
+
yield b"data: [DONE]\n\n"
|
183
|
+
|
184
|
+
return StreamingResponse(
|
185
|
+
stream_results(),
|
186
|
+
media_type="text/event-stream",
|
187
|
+
background=_global_state.tokenizer_manager.create_abort_task(obj),
|
188
|
+
)
|
189
|
+
else:
|
190
|
+
try:
|
191
|
+
ret = await _global_state.tokenizer_manager.generate_request(
|
192
|
+
obj, request
|
193
|
+
).__anext__()
|
194
|
+
return ret
|
195
|
+
except ValueError as e:
|
196
|
+
logger.error(f"Error: {e}")
|
197
|
+
return _create_error_response(e)
|
198
|
+
|
199
|
+
|
200
|
+
@app.api_route("/encode", methods=["POST", "PUT"])
|
201
|
+
async def encode_request(obj: EmbeddingReqInput, request: Request):
|
202
|
+
"""Handle an embedding request."""
|
203
|
+
try:
|
204
|
+
ret = await _global_state.tokenizer_manager.generate_request(
|
205
|
+
obj, request
|
206
|
+
).__anext__()
|
207
|
+
return ret
|
208
|
+
except ValueError as e:
|
209
|
+
return _create_error_response(e)
|
210
|
+
|
211
|
+
|
212
|
+
@app.api_route("/classify", methods=["POST", "PUT"])
|
213
|
+
async def classify_request(obj: EmbeddingReqInput, request: Request):
|
214
|
+
"""Handle a reward model request. Now the arguments and return values are the same as embedding models."""
|
215
|
+
try:
|
216
|
+
ret = await _global_state.tokenizer_manager.generate_request(
|
217
|
+
obj, request
|
218
|
+
).__anext__()
|
219
|
+
return ret
|
220
|
+
except ValueError as e:
|
221
|
+
return _create_error_response(e)
|
222
|
+
|
223
|
+
|
224
|
+
@app.post("/flush_cache")
|
225
|
+
async def flush_cache():
|
226
|
+
"""Flush the radix cache."""
|
227
|
+
_global_state.tokenizer_manager.flush_cache()
|
228
|
+
return Response(
|
229
|
+
content="Cache flushed.\nPlease check backend logs for more details. "
|
230
|
+
"(When there are running or waiting requests, the operation will not be performed.)\n",
|
231
|
+
status_code=200,
|
232
|
+
)
|
233
|
+
|
234
|
+
|
235
|
+
@app.api_route("/start_profile", methods=["GET", "POST"])
|
236
|
+
async def start_profile_async():
|
237
|
+
"""Start profiling."""
|
238
|
+
_global_state.tokenizer_manager.start_profile()
|
239
|
+
return Response(
|
240
|
+
content="Start profiling.\n",
|
241
|
+
status_code=200,
|
242
|
+
)
|
243
|
+
|
244
|
+
|
245
|
+
@app.api_route("/stop_profile", methods=["GET", "POST"])
|
246
|
+
async def stop_profile_async():
|
247
|
+
"""Stop profiling."""
|
248
|
+
_global_state.tokenizer_manager.stop_profile()
|
249
|
+
return Response(
|
250
|
+
content="Stop profiling. This will take some time.\n",
|
251
|
+
status_code=200,
|
252
|
+
)
|
253
|
+
|
254
|
+
|
255
|
+
@app.post("/update_weights_from_disk")
|
256
|
+
async def update_weights_from_disk(obj: UpdateWeightFromDiskReqInput, request: Request):
|
257
|
+
"""Update the weights from disk in-place without re-launching the server."""
|
258
|
+
success, message = await _global_state.tokenizer_manager.update_weights_from_disk(
|
259
|
+
obj, request
|
260
|
+
)
|
261
|
+
content = {"success": success, "message": message}
|
262
|
+
if success:
|
263
|
+
return ORJSONResponse(
|
264
|
+
content,
|
265
|
+
status_code=HTTPStatus.OK,
|
266
|
+
)
|
267
|
+
else:
|
268
|
+
return ORJSONResponse(
|
269
|
+
content,
|
270
|
+
status_code=HTTPStatus.BAD_REQUEST,
|
271
|
+
)
|
272
|
+
|
273
|
+
|
274
|
+
@app.post("/init_weights_update_group")
|
275
|
+
async def init_weights_update_group(
|
276
|
+
obj: InitWeightsUpdateGroupReqInput, request: Request
|
277
|
+
):
|
278
|
+
"""Initialize the parameter update group."""
|
279
|
+
success, message = await _global_state.tokenizer_manager.init_weights_update_group(
|
280
|
+
obj, request
|
281
|
+
)
|
282
|
+
content = {"success": success, "message": message}
|
283
|
+
if success:
|
284
|
+
return ORJSONResponse(content, status_code=200)
|
285
|
+
else:
|
286
|
+
return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
|
287
|
+
|
288
|
+
|
289
|
+
@app.post("/update_weights_from_distributed")
|
290
|
+
async def update_weights_from_distributed(
|
291
|
+
obj: UpdateWeightsFromDistributedReqInput, request: Request
|
292
|
+
):
|
293
|
+
"""Update model parameter from distributed online."""
|
294
|
+
success, message = (
|
295
|
+
await _global_state.tokenizer_manager.update_weights_from_distributed(
|
296
|
+
obj, request
|
297
|
+
)
|
298
|
+
)
|
299
|
+
content = {"success": success, "message": message}
|
300
|
+
if success:
|
301
|
+
return ORJSONResponse(content, status_code=200)
|
302
|
+
else:
|
303
|
+
return ORJSONResponse(content, status_code=HTTPStatus.BAD_REQUEST)
|
304
|
+
|
305
|
+
|
306
|
+
@app.api_route("/get_weights_by_name", methods=["GET", "POST"])
|
307
|
+
async def get_weights_by_name(obj: GetWeightsByNameReqInput, request: Request):
|
308
|
+
"""Get model parameter by name."""
|
309
|
+
try:
|
310
|
+
ret = await _global_state.tokenizer_manager.get_weights_by_name(obj, request)
|
311
|
+
if ret is None:
|
312
|
+
return _create_error_response("Get parameter by name failed")
|
313
|
+
else:
|
314
|
+
return ORJSONResponse(ret, status_code=200)
|
315
|
+
except Exception as e:
|
316
|
+
return _create_error_response(e)
|
317
|
+
|
318
|
+
|
319
|
+
@app.api_route("/release_memory_occupation", methods=["GET", "POST"])
|
320
|
+
async def release_memory_occupation(
|
321
|
+
obj: ReleaseMemoryOccupationReqInput, request: Request
|
322
|
+
):
|
323
|
+
"""Release GPU occupation temporarily"""
|
324
|
+
try:
|
325
|
+
await _global_state.tokenizer_manager.release_memory_occupation(obj, request)
|
326
|
+
except Exception as e:
|
327
|
+
return _create_error_response(e)
|
328
|
+
|
329
|
+
|
330
|
+
@app.api_route("/resume_memory_occupation", methods=["GET", "POST"])
|
331
|
+
async def resume_memory_occupation(
|
332
|
+
obj: ResumeMemoryOccupationReqInput, request: Request
|
333
|
+
):
|
334
|
+
"""Resume GPU occupation"""
|
335
|
+
try:
|
336
|
+
await _global_state.tokenizer_manager.resume_memory_occupation(obj, request)
|
337
|
+
except Exception as e:
|
338
|
+
return _create_error_response(e)
|
339
|
+
|
340
|
+
|
341
|
+
@app.api_route("/open_session", methods=["GET", "POST"])
|
342
|
+
async def open_session(obj: OpenSessionReqInput, request: Request):
|
343
|
+
"""Open a session, and return its unique session id."""
|
344
|
+
try:
|
345
|
+
session_id = await _global_state.tokenizer_manager.open_session(obj, request)
|
346
|
+
if session_id is None:
|
347
|
+
raise Exception(
|
348
|
+
"Failed to open the session. Check if a session with the same id is still open."
|
349
|
+
)
|
350
|
+
return session_id
|
351
|
+
except Exception as e:
|
352
|
+
return _create_error_response(e)
|
353
|
+
|
354
|
+
|
355
|
+
@app.api_route("/close_session", methods=["GET", "POST"])
|
356
|
+
async def close_session(obj: CloseSessionReqInput, request: Request):
|
357
|
+
"""Close the session"""
|
358
|
+
try:
|
359
|
+
await _global_state.tokenizer_manager.close_session(obj, request)
|
360
|
+
return Response(status_code=200)
|
361
|
+
except Exception as e:
|
362
|
+
return _create_error_response(e)
|
363
|
+
|
364
|
+
|
365
|
+
@app.api_route("/configure_logging", methods=["GET", "POST"])
|
366
|
+
async def configure_logging(obj: ConfigureLoggingReq, request: Request):
|
367
|
+
"""Close the session"""
|
368
|
+
_global_state.tokenizer_manager.configure_logging(obj)
|
369
|
+
return Response(status_code=200)
|
370
|
+
|
371
|
+
|
372
|
+
##### OpenAI-compatible API endpoints #####
|
373
|
+
|
374
|
+
|
375
|
+
@app.post("/v1/completions")
|
376
|
+
async def openai_v1_completions(raw_request: Request):
|
377
|
+
return await v1_completions(_global_state.tokenizer_manager, raw_request)
|
378
|
+
|
379
|
+
|
380
|
+
@app.post("/v1/chat/completions")
|
381
|
+
async def openai_v1_chat_completions(raw_request: Request):
|
382
|
+
return await v1_chat_completions(_global_state.tokenizer_manager, raw_request)
|
383
|
+
|
384
|
+
|
385
|
+
@app.post("/v1/embeddings", response_class=ORJSONResponse)
|
386
|
+
async def openai_v1_embeddings(raw_request: Request):
|
387
|
+
response = await v1_embeddings(_global_state.tokenizer_manager, raw_request)
|
388
|
+
return response
|
389
|
+
|
390
|
+
|
391
|
+
@app.get("/v1/models", response_class=ORJSONResponse)
|
392
|
+
def available_models():
|
393
|
+
"""Show available models."""
|
394
|
+
served_model_names = [_global_state.tokenizer_manager.served_model_name]
|
395
|
+
model_cards = []
|
396
|
+
for served_model_name in served_model_names:
|
397
|
+
model_cards.append(ModelCard(id=served_model_name, root=served_model_name))
|
398
|
+
return ModelList(data=model_cards)
|
399
|
+
|
400
|
+
|
401
|
+
@app.post("/v1/files")
|
402
|
+
async def openai_v1_files(file: UploadFile = File(...), purpose: str = Form("batch")):
|
403
|
+
return await v1_files_create(
|
404
|
+
file, purpose, _global_state.tokenizer_manager.server_args.file_storage_pth
|
405
|
+
)
|
406
|
+
|
407
|
+
|
408
|
+
@app.delete("/v1/files/{file_id}")
|
409
|
+
async def delete_file(file_id: str):
|
410
|
+
# https://platform.openai.com/docs/api-reference/files/delete
|
411
|
+
return await v1_delete_file(file_id)
|
412
|
+
|
413
|
+
|
414
|
+
@app.post("/v1/batches")
|
415
|
+
async def openai_v1_batches(raw_request: Request):
|
416
|
+
return await v1_batches(_global_state.tokenizer_manager, raw_request)
|
417
|
+
|
418
|
+
|
419
|
+
@app.post("/v1/batches/{batch_id}/cancel")
|
420
|
+
async def cancel_batches(batch_id: str):
|
421
|
+
# https://platform.openai.com/docs/api-reference/batch/cancel
|
422
|
+
return await v1_cancel_batch(_global_state.tokenizer_manager, batch_id)
|
423
|
+
|
424
|
+
|
425
|
+
@app.get("/v1/batches/{batch_id}")
|
426
|
+
async def retrieve_batch(batch_id: str):
|
427
|
+
return await v1_retrieve_batch(batch_id)
|
428
|
+
|
429
|
+
|
430
|
+
@app.get("/v1/files/{file_id}")
|
431
|
+
async def retrieve_file(file_id: str):
|
432
|
+
# https://platform.openai.com/docs/api-reference/files/retrieve
|
433
|
+
return await v1_retrieve_file(file_id)
|
434
|
+
|
435
|
+
|
436
|
+
@app.get("/v1/files/{file_id}/content")
|
437
|
+
async def retrieve_file_content(file_id: str):
|
438
|
+
# https://platform.openai.com/docs/api-reference/files/retrieve-contents
|
439
|
+
return await v1_retrieve_file_content(file_id)
|
440
|
+
|
441
|
+
|
442
|
+
def _create_error_response(e):
|
443
|
+
return ORJSONResponse(
|
444
|
+
{"error": {"message": str(e)}}, status_code=HTTPStatus.BAD_REQUEST
|
445
|
+
)
|
446
|
+
|
447
|
+
|
448
|
+
def launch_server(
|
449
|
+
server_args: ServerArgs,
|
450
|
+
pipe_finish_writer: Optional[multiprocessing.connection.Connection] = None,
|
451
|
+
):
|
452
|
+
"""
|
453
|
+
Launch SRT (SGLang Runtime) Server.
|
454
|
+
|
455
|
+
The SRT server consists of an HTTP server and an SRT engine.
|
456
|
+
|
457
|
+
- HTTP server: A FastAPI server that routes requests to the engine.
|
458
|
+
- The engine consists of three components:
|
459
|
+
1. TokenizerManager: Tokenizes the requests and sends them to the scheduler.
|
460
|
+
2. Scheduler (subprocess): Receives requests from the Tokenizer Manager, schedules batches, forwards them, and sends the output tokens to the Detokenizer Manager.
|
461
|
+
3. DetokenizerManager (subprocess): Detokenizes the output tokens and sends the result back to the Tokenizer Manager.
|
462
|
+
|
463
|
+
Note:
|
464
|
+
1. The HTTP server, Engine, and TokenizerManager both run in the main process.
|
465
|
+
2. Inter-process communication is done through ICP (each process uses a different port) via the ZMQ library.
|
466
|
+
"""
|
467
|
+
tokenizer_manager, scheduler_info = _launch_subprocesses(server_args=server_args)
|
468
|
+
set_global_state(
|
469
|
+
_GlobalState(
|
470
|
+
tokenizer_manager=tokenizer_manager,
|
471
|
+
scheduler_info=scheduler_info,
|
472
|
+
)
|
473
|
+
)
|
474
|
+
|
475
|
+
# Add api key authorization
|
476
|
+
if server_args.api_key:
|
477
|
+
add_api_key_middleware(app, server_args.api_key)
|
478
|
+
|
479
|
+
# Add prometheus middleware
|
480
|
+
if server_args.enable_metrics:
|
481
|
+
add_prometheus_middleware(app)
|
482
|
+
enable_func_timer()
|
483
|
+
|
484
|
+
# Send a warmup request
|
485
|
+
t = threading.Thread(
|
486
|
+
target=_wait_and_warmup,
|
487
|
+
args=(
|
488
|
+
server_args,
|
489
|
+
pipe_finish_writer,
|
490
|
+
_global_state.tokenizer_manager.image_token_id,
|
491
|
+
),
|
492
|
+
)
|
493
|
+
t.start()
|
494
|
+
|
495
|
+
try:
|
496
|
+
# Update logging configs
|
497
|
+
set_uvicorn_logging_configs()
|
498
|
+
|
499
|
+
# Listen for HTTP requests
|
500
|
+
uvicorn.run(
|
501
|
+
app,
|
502
|
+
host=server_args.host,
|
503
|
+
port=server_args.port,
|
504
|
+
log_level=server_args.log_level_http or server_args.log_level,
|
505
|
+
timeout_keep_alive=5,
|
506
|
+
loop="uvloop",
|
507
|
+
)
|
508
|
+
finally:
|
509
|
+
t.join()
|
510
|
+
|
511
|
+
|
512
|
+
def _wait_and_warmup(server_args, pipe_finish_writer, image_token_text):
|
513
|
+
headers = {}
|
514
|
+
url = server_args.url()
|
515
|
+
if server_args.api_key:
|
516
|
+
headers["Authorization"] = f"Bearer {server_args.api_key}"
|
517
|
+
|
518
|
+
# Wait until the server is launched
|
519
|
+
success = False
|
520
|
+
for _ in range(120):
|
521
|
+
time.sleep(1)
|
522
|
+
try:
|
523
|
+
res = requests.get(url + "/get_model_info", timeout=5, headers=headers)
|
524
|
+
assert res.status_code == 200, f"{res=}, {res.text=}"
|
525
|
+
success = True
|
526
|
+
break
|
527
|
+
except (AssertionError, requests.exceptions.RequestException):
|
528
|
+
last_traceback = get_exception_traceback()
|
529
|
+
pass
|
530
|
+
|
531
|
+
if not success:
|
532
|
+
if pipe_finish_writer is not None:
|
533
|
+
pipe_finish_writer.send(last_traceback)
|
534
|
+
logger.error(f"Initialization failed. warmup error: {last_traceback}")
|
535
|
+
kill_process_tree(os.getpid())
|
536
|
+
return
|
537
|
+
|
538
|
+
model_info = res.json()
|
539
|
+
|
540
|
+
# Send a warmup request
|
541
|
+
request_name = "/generate" if model_info["is_generation"] else "/encode"
|
542
|
+
max_new_tokens = 8 if model_info["is_generation"] else 1
|
543
|
+
json_data = {
|
544
|
+
"sampling_params": {
|
545
|
+
"temperature": 0,
|
546
|
+
"max_new_tokens": max_new_tokens,
|
547
|
+
},
|
548
|
+
}
|
549
|
+
if server_args.skip_tokenizer_init:
|
550
|
+
json_data["input_ids"] = [10, 11, 12]
|
551
|
+
else:
|
552
|
+
json_data["text"] = "The capital city of France is"
|
553
|
+
|
554
|
+
try:
|
555
|
+
for _ in range(server_args.dp_size):
|
556
|
+
res = requests.post(
|
557
|
+
url + request_name,
|
558
|
+
json=json_data,
|
559
|
+
headers=headers,
|
560
|
+
timeout=600,
|
561
|
+
)
|
562
|
+
assert res.status_code == 200, f"{res}"
|
563
|
+
except Exception:
|
564
|
+
last_traceback = get_exception_traceback()
|
565
|
+
if pipe_finish_writer is not None:
|
566
|
+
pipe_finish_writer.send(last_traceback)
|
567
|
+
logger.error(f"Initialization failed. warmup error: {last_traceback}")
|
568
|
+
kill_process_tree(os.getpid())
|
569
|
+
return
|
570
|
+
|
571
|
+
# Debug print
|
572
|
+
# logger.info(f"{res.json()=}")
|
573
|
+
|
574
|
+
logger.info("The server is fired up and ready to roll!")
|
575
|
+
if pipe_finish_writer is not None:
|
576
|
+
pipe_finish_writer.send("ready")
|
577
|
+
|
578
|
+
if server_args.delete_ckpt_after_loading:
|
579
|
+
delete_directory(server_args.model_path)
|
sglang/srt/layers/activation.py
CHANGED
@@ -25,13 +25,13 @@ from sglang.srt.utils import is_flashinfer_available
|
|
25
25
|
if is_flashinfer_available():
|
26
26
|
from flashinfer.activation import gelu_and_mul, gelu_tanh_and_mul, silu_and_mul
|
27
27
|
|
28
|
-
from vllm.
|
28
|
+
from vllm.model_executor.custom_op import CustomOp
|
29
|
+
|
30
|
+
from sglang.srt.distributed import (
|
29
31
|
divide,
|
30
32
|
get_tensor_model_parallel_rank,
|
31
33
|
get_tensor_model_parallel_world_size,
|
32
34
|
)
|
33
|
-
from vllm.model_executor.custom_op import CustomOp
|
34
|
-
|
35
35
|
from sglang.srt.layers.custom_op_util import register_custom_op
|
36
36
|
from sglang.srt.layers.quantization.base_config import QuantizationConfig
|
37
37
|
from sglang.srt.utils import set_weight_attrs
|
@@ -18,6 +18,7 @@ import triton.language as tl
|
|
18
18
|
|
19
19
|
from sglang.global_config import global_config
|
20
20
|
from sglang.srt.layers.attention import AttentionBackend
|
21
|
+
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
21
22
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
22
23
|
from sglang.srt.utils import is_flashinfer_available
|
23
24
|
|
@@ -62,9 +63,9 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
62
63
|
self.decode_use_tensor_cores = should_use_tensor_core(
|
63
64
|
kv_cache_dtype=model_runner.kv_cache_dtype,
|
64
65
|
num_attention_heads=model_runner.model_config.num_attention_heads
|
65
|
-
//
|
66
|
+
// get_attention_tp_size(),
|
66
67
|
num_kv_heads=model_runner.model_config.get_num_kv_heads(
|
67
|
-
|
68
|
+
get_attention_tp_size()
|
68
69
|
),
|
69
70
|
)
|
70
71
|
self.max_context_len = model_runner.model_config.context_len
|
@@ -147,7 +148,7 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
147
148
|
self.prefill_cuda_graph_metadata = {}
|
148
149
|
|
149
150
|
def init_forward_metadata(self, forward_batch: ForwardBatch):
|
150
|
-
if forward_batch.forward_mode.
|
151
|
+
if forward_batch.forward_mode.is_decode_or_idle():
|
151
152
|
self.indices_updater_decode.update(
|
152
153
|
forward_batch.req_pool_indices,
|
153
154
|
forward_batch.seq_lens,
|
@@ -238,7 +239,7 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
238
239
|
forward_mode: ForwardMode,
|
239
240
|
spec_info: Optional[SpecInfo],
|
240
241
|
):
|
241
|
-
if forward_mode.
|
242
|
+
if forward_mode.is_decode_or_idle():
|
242
243
|
decode_wrappers = []
|
243
244
|
for i in range(self.num_wrappers):
|
244
245
|
decode_wrappers.append(
|
@@ -307,7 +308,7 @@ class FlashInferAttnBackend(AttentionBackend):
|
|
307
308
|
forward_mode: ForwardMode,
|
308
309
|
spec_info: Optional[SpecInfo],
|
309
310
|
):
|
310
|
-
if forward_mode.
|
311
|
+
if forward_mode.is_decode_or_idle():
|
311
312
|
self.indices_updater_decode.update(
|
312
313
|
req_pool_indices[:bs],
|
313
314
|
seq_lens[:bs],
|
@@ -453,10 +454,10 @@ class FlashInferIndicesUpdaterDecode:
|
|
453
454
|
def __init__(self, model_runner: ModelRunner, attn_backend: AttentionBackend):
|
454
455
|
# Parse Constants
|
455
456
|
self.num_qo_heads = (
|
456
|
-
model_runner.model_config.num_attention_heads //
|
457
|
+
model_runner.model_config.num_attention_heads // get_attention_tp_size()
|
457
458
|
)
|
458
459
|
self.num_kv_heads = model_runner.model_config.get_num_kv_heads(
|
459
|
-
|
460
|
+
get_attention_tp_size()
|
460
461
|
)
|
461
462
|
self.head_dim = model_runner.model_config.head_dim
|
462
463
|
self.data_type = model_runner.kv_cache_dtype
|
@@ -625,10 +626,10 @@ class FlashInferIndicesUpdaterPrefill:
|
|
625
626
|
def __init__(self, model_runner: ModelRunner, attn_backend: AttentionBackend):
|
626
627
|
# Parse Constants
|
627
628
|
self.num_qo_heads = (
|
628
|
-
model_runner.model_config.num_attention_heads //
|
629
|
+
model_runner.model_config.num_attention_heads // get_attention_tp_size()
|
629
630
|
)
|
630
631
|
self.num_kv_heads = model_runner.model_config.get_num_kv_heads(
|
631
|
-
|
632
|
+
get_attention_tp_size()
|
632
633
|
)
|
633
634
|
self.head_dim = model_runner.model_config.head_dim
|
634
635
|
self.data_type = model_runner.kv_cache_dtype
|
@@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Optional
|
|
5
5
|
import torch
|
6
6
|
|
7
7
|
from sglang.srt.layers.attention import AttentionBackend
|
8
|
+
from sglang.srt.layers.dp_attention import get_attention_tp_size
|
8
9
|
from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMode
|
9
10
|
|
10
11
|
if TYPE_CHECKING:
|
@@ -28,12 +29,9 @@ class TritonAttnBackend(AttentionBackend):
|
|
28
29
|
self.decode_attention_fwd = decode_attention_fwd
|
29
30
|
self.extend_attention_fwd = extend_attention_fwd
|
30
31
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
self.num_head = (
|
35
|
-
model_runner.model_config.num_attention_heads // model_runner.tp_size
|
36
|
-
)
|
32
|
+
self.num_head = (
|
33
|
+
model_runner.model_config.num_attention_heads // get_attention_tp_size()
|
34
|
+
)
|
37
35
|
|
38
36
|
self.num_kv_splits = model_runner.server_args.triton_attention_num_kv_splits
|
39
37
|
self.v_head_dim = model_runner.token_to_kv_pool.get_value_buffer(0).shape[-1]
|