sglang 0.2.15__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_latency.py +10 -6
- sglang/bench_serving.py +33 -38
- sglang/global_config.py +0 -4
- sglang/lang/backend/runtime_endpoint.py +13 -6
- sglang/lang/interpreter.py +1 -1
- sglang/launch_server.py +3 -6
- sglang/launch_server_llavavid.py +7 -8
- sglang/srt/{model_config.py → configs/model_config.py} +5 -0
- sglang/srt/constrained/__init__.py +2 -0
- sglang/srt/constrained/fsm_cache.py +29 -38
- sglang/srt/constrained/jump_forward.py +0 -1
- sglang/srt/conversation.py +4 -1
- sglang/srt/hf_transformers_utils.py +2 -4
- sglang/srt/layers/attention_backend.py +480 -0
- sglang/srt/layers/flashinfer_utils.py +235 -0
- sglang/srt/layers/logits_processor.py +64 -77
- sglang/srt/layers/radix_attention.py +11 -161
- sglang/srt/layers/sampler.py +40 -35
- sglang/srt/layers/torchao_utils.py +75 -0
- sglang/srt/layers/{decode_attention.py → triton_attention/decode_attention.py} +67 -63
- sglang/srt/layers/{extend_attention.py → triton_attention/extend_attention.py} +40 -132
- sglang/srt/layers/{prefill_attention.py → triton_attention/prefill_attention.py} +13 -7
- sglang/srt/lora/lora.py +403 -0
- sglang/srt/lora/lora_config.py +43 -0
- sglang/srt/lora/lora_manager.py +256 -0
- sglang/srt/managers/controller_multi.py +1 -5
- sglang/srt/managers/controller_single.py +0 -5
- sglang/srt/managers/io_struct.py +16 -1
- sglang/srt/managers/policy_scheduler.py +122 -5
- sglang/srt/managers/schedule_batch.py +110 -74
- sglang/srt/managers/tokenizer_manager.py +24 -15
- sglang/srt/managers/tp_worker.py +181 -115
- sglang/srt/model_executor/cuda_graph_runner.py +60 -133
- sglang/srt/model_executor/forward_batch_info.py +35 -312
- sglang/srt/model_executor/model_runner.py +118 -141
- sglang/srt/models/baichuan.py +416 -0
- sglang/srt/models/chatglm.py +6 -8
- sglang/srt/models/commandr.py +1 -5
- sglang/srt/models/dbrx.py +1 -5
- sglang/srt/models/deepseek.py +1 -5
- sglang/srt/models/deepseek_v2.py +1 -5
- sglang/srt/models/exaone.py +8 -43
- sglang/srt/models/gemma.py +1 -5
- sglang/srt/models/gemma2.py +1 -5
- sglang/srt/models/gpt_bigcode.py +1 -5
- sglang/srt/models/grok.py +1 -5
- sglang/srt/models/internlm2.py +1 -5
- sglang/srt/models/{llama2.py → llama.py} +48 -26
- sglang/srt/models/llama_classification.py +14 -40
- sglang/srt/models/llama_embedding.py +7 -6
- sglang/srt/models/llava.py +38 -16
- sglang/srt/models/llavavid.py +7 -8
- sglang/srt/models/minicpm.py +1 -5
- sglang/srt/models/minicpm3.py +665 -0
- sglang/srt/models/mistral.py +2 -3
- sglang/srt/models/mixtral.py +6 -5
- sglang/srt/models/mixtral_quant.py +1 -5
- sglang/srt/models/qwen.py +1 -5
- sglang/srt/models/qwen2.py +1 -5
- sglang/srt/models/qwen2_moe.py +6 -5
- sglang/srt/models/stablelm.py +1 -5
- sglang/srt/models/xverse.py +375 -0
- sglang/srt/models/xverse_moe.py +445 -0
- sglang/srt/openai_api/adapter.py +65 -46
- sglang/srt/openai_api/protocol.py +11 -3
- sglang/srt/sampling/sampling_batch_info.py +67 -58
- sglang/srt/server.py +24 -14
- sglang/srt/server_args.py +130 -28
- sglang/srt/utils.py +12 -0
- sglang/test/few_shot_gsm8k.py +132 -0
- sglang/test/runners.py +114 -22
- sglang/test/test_programs.py +70 -0
- sglang/test/test_utils.py +89 -1
- sglang/utils.py +38 -4
- sglang/version.py +1 -1
- {sglang-0.2.15.dist-info → sglang-0.3.1.dist-info}/METADATA +31 -18
- sglang-0.3.1.dist-info/RECORD +129 -0
- {sglang-0.2.15.dist-info → sglang-0.3.1.dist-info}/WHEEL +1 -1
- sglang-0.2.15.dist-info/RECORD +0 -118
- {sglang-0.2.15.dist-info → sglang-0.3.1.dist-info}/LICENSE +0 -0
- {sglang-0.2.15.dist-info → sglang-0.3.1.dist-info}/top_level.txt +0 -0
@@ -71,12 +71,10 @@ class ControllerMulti:
|
|
71
71
|
self,
|
72
72
|
server_args: ServerArgs,
|
73
73
|
port_args: PortArgs,
|
74
|
-
model_override_args,
|
75
74
|
):
|
76
75
|
# Parse args
|
77
76
|
self.server_args = server_args
|
78
77
|
self.port_args = port_args
|
79
|
-
self.model_override_args = model_override_args
|
80
78
|
self.load_balance_method = LoadBalanceMethod.from_str(
|
81
79
|
server_args.load_balance_method
|
82
80
|
)
|
@@ -114,7 +112,6 @@ class ControllerMulti:
|
|
114
112
|
self.server_args,
|
115
113
|
self.port_args,
|
116
114
|
pipe_controller_writer,
|
117
|
-
self.model_override_args,
|
118
115
|
True,
|
119
116
|
gpu_ids,
|
120
117
|
dp_worker_id,
|
@@ -189,14 +186,13 @@ def start_controller_process(
|
|
189
186
|
server_args: ServerArgs,
|
190
187
|
port_args: PortArgs,
|
191
188
|
pipe_writer,
|
192
|
-
model_override_args: dict,
|
193
189
|
):
|
194
190
|
"""Start a controller process."""
|
195
191
|
|
196
192
|
configure_logger(server_args)
|
197
193
|
|
198
194
|
try:
|
199
|
-
controller = ControllerMulti(server_args, port_args
|
195
|
+
controller = ControllerMulti(server_args, port_args)
|
200
196
|
except Exception:
|
201
197
|
pipe_writer.send(get_exception_traceback())
|
202
198
|
raise
|
@@ -40,7 +40,6 @@ class ControllerSingle:
|
|
40
40
|
self,
|
41
41
|
server_args: ServerArgs,
|
42
42
|
port_args: PortArgs,
|
43
|
-
model_override_args: dict,
|
44
43
|
gpu_ids: List[int],
|
45
44
|
is_data_parallel_worker: bool,
|
46
45
|
dp_worker_id: int,
|
@@ -76,7 +75,6 @@ class ControllerSingle:
|
|
76
75
|
tp_rank_range,
|
77
76
|
server_args,
|
78
77
|
port_args.nccl_ports[dp_worker_id],
|
79
|
-
model_override_args,
|
80
78
|
)
|
81
79
|
|
82
80
|
# Launch tp rank 0
|
@@ -85,7 +83,6 @@ class ControllerSingle:
|
|
85
83
|
0,
|
86
84
|
server_args,
|
87
85
|
port_args.nccl_ports[dp_worker_id],
|
88
|
-
model_override_args,
|
89
86
|
)
|
90
87
|
self.tp_cpu_group = self.tp_server.model_runner.tp_group.cpu_group
|
91
88
|
|
@@ -126,7 +123,6 @@ def start_controller_process(
|
|
126
123
|
server_args: ServerArgs,
|
127
124
|
port_args: PortArgs,
|
128
125
|
pipe_writer: multiprocessing.connection.Connection,
|
129
|
-
model_override_args: dict,
|
130
126
|
is_data_parallel_worker: bool = False,
|
131
127
|
gpu_ids: List[int] = None,
|
132
128
|
dp_worker_id: int = None,
|
@@ -149,7 +145,6 @@ def start_controller_process(
|
|
149
145
|
controller = ControllerSingle(
|
150
146
|
server_args,
|
151
147
|
port_args,
|
152
|
-
model_override_args,
|
153
148
|
gpu_ids,
|
154
149
|
is_data_parallel_worker,
|
155
150
|
dp_worker_id,
|
sglang/srt/managers/io_struct.py
CHANGED
@@ -20,7 +20,7 @@ processes (TokenizerManager, DetokenizerManager, Controller).
|
|
20
20
|
|
21
21
|
import copy
|
22
22
|
import uuid
|
23
|
-
from dataclasses import dataclass
|
23
|
+
from dataclasses import dataclass
|
24
24
|
from typing import Dict, List, Optional, Union
|
25
25
|
|
26
26
|
from sglang.srt.managers.schedule_batch import BaseFinishReason
|
@@ -43,6 +43,7 @@ class GenerateReqInput:
|
|
43
43
|
# Whether to return logprobs.
|
44
44
|
return_logprob: Optional[Union[List[bool], bool]] = None
|
45
45
|
# If return logprobs, the start location in the prompt for returning logprobs.
|
46
|
+
# By default, this value is "-1", which means it will only return logprobs for output tokens.
|
46
47
|
logprob_start_len: Optional[Union[List[int], int]] = None
|
47
48
|
# If return logprobs, the number of top logprobs to return at each position.
|
48
49
|
top_logprobs_num: Optional[Union[List[int], int]] = None
|
@@ -50,6 +51,13 @@ class GenerateReqInput:
|
|
50
51
|
return_text_in_logprobs: bool = False
|
51
52
|
# Whether to stream output.
|
52
53
|
stream: bool = False
|
54
|
+
# The modalities of the image data [image, multi-images, video]
|
55
|
+
modalities: Optional[List[str]] = None
|
56
|
+
|
57
|
+
is_single: bool = True
|
58
|
+
|
59
|
+
# LoRA related
|
60
|
+
lora_path: Optional[Union[List[Optional[str]], Optional[str]]] = None
|
53
61
|
|
54
62
|
def post_init(self):
|
55
63
|
if (self.text is None and self.input_ids is None) or (
|
@@ -177,6 +185,11 @@ class TokenizedGenerateReqInput:
|
|
177
185
|
top_logprobs_num: int
|
178
186
|
# Whether to stream output
|
179
187
|
stream: bool
|
188
|
+
# Modalities of the input images
|
189
|
+
modalites: Optional[List[str]] = None
|
190
|
+
|
191
|
+
# LoRA related
|
192
|
+
lora_path: Optional[str] = None # None means just use the base model
|
180
193
|
|
181
194
|
|
182
195
|
@dataclass
|
@@ -190,6 +203,8 @@ class EmbeddingReqInput:
|
|
190
203
|
# Dummy sampling params for compatibility
|
191
204
|
sampling_params: Union[List[Dict], Dict] = None
|
192
205
|
|
206
|
+
is_single: bool = True
|
207
|
+
|
193
208
|
def post_init(self):
|
194
209
|
if (self.text is None and self.input_ids is None) or (
|
195
210
|
self.text is not None and self.input_ids is not None
|
@@ -108,18 +108,25 @@ class PrefillAdder:
|
|
108
108
|
def __init__(
|
109
109
|
self,
|
110
110
|
tree_cache: BasePrefixCache,
|
111
|
+
running_batch: ScheduleBatch,
|
112
|
+
new_token_ratio: float,
|
111
113
|
rem_total_tokens: int,
|
112
114
|
rem_input_tokens: int,
|
113
115
|
rem_chunk_tokens: Optional[int],
|
114
116
|
mixed_with_decode_tokens: int = 0,
|
115
117
|
):
|
116
118
|
self.tree_cache = tree_cache
|
119
|
+
self.running_batch = running_batch
|
120
|
+
self.new_token_ratio = new_token_ratio
|
117
121
|
self.rem_total_tokens = rem_total_tokens - mixed_with_decode_tokens
|
122
|
+
self.rem_total_tokens_ = self.rem_total_tokens
|
123
|
+
self.total_tokens = rem_total_tokens
|
118
124
|
self.rem_input_tokens = rem_input_tokens - mixed_with_decode_tokens
|
119
125
|
self.rem_chunk_tokens = rem_chunk_tokens
|
120
126
|
if self.rem_chunk_tokens is not None:
|
121
127
|
self.rem_chunk_tokens -= mixed_with_decode_tokens
|
122
128
|
|
129
|
+
self.req_states = None
|
123
130
|
self.can_run_list = []
|
124
131
|
self.new_inflight_req = None
|
125
132
|
self.log_hit_tokens = 0
|
@@ -136,16 +143,20 @@ class PrefillAdder:
|
|
136
143
|
)
|
137
144
|
)
|
138
145
|
|
139
|
-
def remove_running_tokens(
|
140
|
-
self, running_batch: ScheduleBatch, new_token_ratio: float
|
141
|
-
):
|
146
|
+
def remove_running_tokens(self, running_batch: ScheduleBatch):
|
142
147
|
self.rem_total_tokens -= sum(
|
143
148
|
[
|
144
149
|
min(
|
145
150
|
(r.sampling_params.max_new_tokens - len(r.output_ids)),
|
146
151
|
CLIP_MAX_NEW_TOKENS,
|
147
152
|
)
|
148
|
-
* new_token_ratio
|
153
|
+
* self.new_token_ratio
|
154
|
+
for r in running_batch.reqs
|
155
|
+
]
|
156
|
+
)
|
157
|
+
self.rem_total_tokens_ -= sum(
|
158
|
+
[
|
159
|
+
r.sampling_params.max_new_tokens - len(r.output_ids)
|
149
160
|
for r in running_batch.reqs
|
150
161
|
]
|
151
162
|
)
|
@@ -154,6 +165,7 @@ class PrefillAdder:
|
|
154
165
|
self, prefix_len: int, extend_input_len: int, max_new_tokens: int
|
155
166
|
):
|
156
167
|
self.rem_total_tokens -= extend_input_len + max_new_tokens
|
168
|
+
self.rem_total_tokens_ -= extend_input_len + max_new_tokens
|
157
169
|
self.rem_input_tokens -= extend_input_len
|
158
170
|
if self.rem_chunk_tokens is not None:
|
159
171
|
self.rem_chunk_tokens -= extend_input_len
|
@@ -161,7 +173,29 @@ class PrefillAdder:
|
|
161
173
|
self.log_hit_tokens += prefix_len
|
162
174
|
self.log_input_tokens += extend_input_len
|
163
175
|
|
176
|
+
def add_inflight_req_ignore_eos(self, req: Req):
|
177
|
+
truncated = req.extend_input_len > self.rem_chunk_tokens
|
178
|
+
req.extend_input_len = min(req.extend_input_len, self.rem_chunk_tokens)
|
179
|
+
req.fill_ids = req.fill_ids[: len(req.prefix_indices) + req.extend_input_len]
|
180
|
+
self.can_run_list.append(req)
|
181
|
+
|
182
|
+
self._prefill_one_req(
|
183
|
+
0,
|
184
|
+
req.extend_input_len,
|
185
|
+
(
|
186
|
+
min(req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS)
|
187
|
+
if not truncated
|
188
|
+
else 0
|
189
|
+
),
|
190
|
+
)
|
191
|
+
|
192
|
+
# Return if chunked prefill not finished
|
193
|
+
return req if truncated else None
|
194
|
+
|
164
195
|
def add_inflight_req(self, req: Req):
|
196
|
+
if req.sampling_params.ignore_eos:
|
197
|
+
return self.add_inflight_req_ignore_eos(req)
|
198
|
+
|
165
199
|
truncated = req.extend_input_len > self.rem_chunk_tokens
|
166
200
|
req.extend_input_len = min(req.extend_input_len, self.rem_chunk_tokens)
|
167
201
|
req.fill_ids = req.fill_ids[: len(req.prefix_indices) + req.extend_input_len]
|
@@ -190,7 +224,90 @@ class PrefillAdder:
|
|
190
224
|
delta = self.tree_cache.dec_lock_ref(last_node)
|
191
225
|
self.rem_total_tokens += delta
|
192
226
|
|
227
|
+
def add_one_req_ignore_eos(self, req: Req):
|
228
|
+
def get_req_state(r):
|
229
|
+
new_token_ratio = (
|
230
|
+
1.0 if r.sampling_params.ignore_eos else self.new_token_ratio
|
231
|
+
)
|
232
|
+
tokens_left = r.sampling_params.max_new_tokens * new_token_ratio - len(
|
233
|
+
r.output_ids
|
234
|
+
)
|
235
|
+
tokens_occupied = len(r.origin_input_ids) + len(r.output_ids)
|
236
|
+
|
237
|
+
if tokens_left > 0:
|
238
|
+
return (tokens_left, tokens_occupied)
|
239
|
+
|
240
|
+
return None
|
241
|
+
|
242
|
+
# Quick Check
|
243
|
+
can_run = False
|
244
|
+
if (
|
245
|
+
req.extend_input_len + req.sampling_params.max_new_tokens
|
246
|
+
<= self.rem_total_tokens
|
247
|
+
):
|
248
|
+
can_run = True
|
249
|
+
|
250
|
+
if not can_run:
|
251
|
+
if self.req_states is None:
|
252
|
+
self.req_states = []
|
253
|
+
if self.running_batch is not None:
|
254
|
+
for r in self.running_batch.reqs:
|
255
|
+
state = get_req_state(r)
|
256
|
+
if state is not None:
|
257
|
+
self.req_states.append(state)
|
258
|
+
for r in self.can_run_list:
|
259
|
+
state = get_req_state(r)
|
260
|
+
if state is not None:
|
261
|
+
self.req_states.append(state)
|
262
|
+
state = get_req_state(req)
|
263
|
+
if state is not None:
|
264
|
+
self.req_states.append(state)
|
265
|
+
|
266
|
+
self.req_states.sort(key=lambda x: x[0])
|
267
|
+
else:
|
268
|
+
state = get_req_state(req)
|
269
|
+
if state is not None:
|
270
|
+
for i, (tokens_left, tokens_occupied) in enumerate(self.req_states):
|
271
|
+
if tokens_left >= state[0]:
|
272
|
+
self.req_states.insert(i, state)
|
273
|
+
break
|
274
|
+
else:
|
275
|
+
self.req_states.append(state)
|
276
|
+
|
277
|
+
tokens_freed = 0
|
278
|
+
for i, (tokens_left, tokens_occupied) in enumerate(self.req_states):
|
279
|
+
decode_steps = (
|
280
|
+
self.req_states[i + 1][0]
|
281
|
+
if i + 1 < len(self.req_states)
|
282
|
+
else tokens_left
|
283
|
+
)
|
284
|
+
bs = len(self.req_states) - i
|
285
|
+
if self.total_tokens + tokens_freed - decode_steps * bs <= 0:
|
286
|
+
return False
|
287
|
+
tokens_freed += tokens_occupied
|
288
|
+
|
289
|
+
if req.extend_input_len <= self.rem_chunk_tokens:
|
290
|
+
self.can_run_list.append(req)
|
291
|
+
self._prefill_one_req(
|
292
|
+
0,
|
293
|
+
req.extend_input_len,
|
294
|
+
min(req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS),
|
295
|
+
)
|
296
|
+
else:
|
297
|
+
# Chunked prefill
|
298
|
+
trunc_len = self.rem_chunk_tokens
|
299
|
+
req.extend_input_len = trunc_len
|
300
|
+
req.fill_ids = req.fill_ids[:trunc_len]
|
301
|
+
self.can_run_list.append(req)
|
302
|
+
self.new_inflight_req = req
|
303
|
+
self._prefill_one_req(0, trunc_len, 0)
|
304
|
+
|
305
|
+
return True
|
306
|
+
|
193
307
|
def add_one_req(self, req: Req):
|
308
|
+
if req.sampling_params.ignore_eos and self.tree_cache.disable:
|
309
|
+
return self.add_one_req_ignore_eos(req)
|
310
|
+
|
194
311
|
total_tokens = req.extend_input_len + min(
|
195
312
|
req.sampling_params.max_new_tokens, CLIP_MAX_NEW_TOKENS
|
196
313
|
)
|
@@ -233,4 +350,4 @@ class PrefillAdder:
|
|
233
350
|
self.tree_cache.inc_lock_ref(req.last_node)
|
234
351
|
self._prefill_one_req(prefix_len, trunc_len, 0)
|
235
352
|
|
236
|
-
return True
|
353
|
+
return True and not self.no_remaining_tokens()
|