sglang 0.1.18__py3-none-any.whl → 0.1.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. sglang/__init__.py +1 -1
  2. sglang/api.py +26 -0
  3. sglang/backend/runtime_endpoint.py +18 -14
  4. sglang/bench_latency.py +40 -18
  5. sglang/global_config.py +21 -16
  6. sglang/lang/chat_template.py +41 -6
  7. sglang/lang/interpreter.py +5 -1
  8. sglang/lang/ir.py +61 -25
  9. sglang/srt/constrained/__init__.py +3 -2
  10. sglang/srt/hf_transformers_utils.py +7 -3
  11. sglang/srt/layers/extend_attention.py +2 -1
  12. sglang/srt/layers/fused_moe.py +181 -167
  13. sglang/srt/layers/logits_processor.py +55 -19
  14. sglang/srt/layers/radix_attention.py +33 -59
  15. sglang/srt/layers/token_attention.py +4 -8
  16. sglang/srt/managers/controller/cuda_graph_runner.py +172 -0
  17. sglang/srt/managers/controller/infer_batch.py +244 -36
  18. sglang/srt/managers/controller/manager_single.py +1 -1
  19. sglang/srt/managers/controller/model_runner.py +69 -284
  20. sglang/srt/managers/controller/tp_worker.py +39 -20
  21. sglang/srt/managers/detokenizer_manager.py +4 -2
  22. sglang/srt/managers/io_struct.py +1 -1
  23. sglang/srt/managers/tokenizer_manager.py +14 -13
  24. sglang/srt/memory_pool.py +33 -6
  25. sglang/srt/model_config.py +6 -0
  26. sglang/srt/models/gemma2.py +436 -0
  27. sglang/srt/models/llama2.py +3 -3
  28. sglang/srt/models/llama_classification.py +10 -7
  29. sglang/srt/models/minicpm.py +373 -0
  30. sglang/srt/models/qwen2_moe.py +454 -0
  31. sglang/srt/openai_api_adapter.py +2 -2
  32. sglang/srt/openai_protocol.py +1 -1
  33. sglang/srt/server.py +18 -8
  34. sglang/srt/server_args.py +24 -20
  35. sglang/srt/utils.py +68 -35
  36. {sglang-0.1.18.dist-info → sglang-0.1.20.dist-info}/METADATA +19 -13
  37. {sglang-0.1.18.dist-info → sglang-0.1.20.dist-info}/RECORD +40 -36
  38. {sglang-0.1.18.dist-info → sglang-0.1.20.dist-info}/WHEEL +1 -1
  39. {sglang-0.1.18.dist-info → sglang-0.1.20.dist-info}/LICENSE +0 -0
  40. {sglang-0.1.18.dist-info → sglang-0.1.20.dist-info}/top_level.txt +0 -0
sglang/lang/ir.py CHANGED
@@ -23,6 +23,10 @@ class SglSamplingParams:
23
23
  frequency_penalty: float = 0.0
24
24
  presence_penalty: float = 0.0
25
25
  ignore_eos: bool = False
26
+ return_logprob: Optional[bool] = None
27
+ logprob_start_len: Optional[int] = None,
28
+ top_logprobs_num: Optional[int] = None,
29
+ return_text_in_logprobs: Optional[bool] = None,
26
30
 
27
31
  # for constrained generation, not included in to_xxx_kwargs
28
32
  dtype: Optional[str] = None
@@ -37,6 +41,11 @@ class SglSamplingParams:
37
41
  self.top_k,
38
42
  self.frequency_penalty,
39
43
  self.presence_penalty,
44
+ self.ignore_eos,
45
+ self.return_logprob,
46
+ self.logprob_start_len,
47
+ self.top_logprobs_num,
48
+ self.return_text_in_logprobs,
40
49
  )
41
50
 
42
51
  def to_openai_kwargs(self):
@@ -139,6 +148,10 @@ class SglFunction:
139
148
  frequency_penalty: float = 0.0,
140
149
  presence_penalty: float = 0.0,
141
150
  ignore_eos: bool = False,
151
+ return_logprob: Optional[bool] = None,
152
+ logprob_start_len: Optional[int] = None,
153
+ top_logprobs_num: Optional[int] = None,
154
+ return_text_in_logprobs: Optional[bool] = None,
142
155
  stream: bool = False,
143
156
  backend=None,
144
157
  **kwargs,
@@ -154,6 +167,10 @@ class SglFunction:
154
167
  frequency_penalty=frequency_penalty,
155
168
  presence_penalty=presence_penalty,
156
169
  ignore_eos=ignore_eos,
170
+ return_logprob=return_logprob,
171
+ logprob_start_len=logprob_start_len,
172
+ top_logprobs_num=top_logprobs_num,
173
+ return_text_in_logprobs=return_text_in_logprobs,
157
174
  )
158
175
  backend = backend or global_config.default_backend
159
176
  return run_program(self, backend, args, kwargs, default_sampling_para, stream)
@@ -170,6 +187,10 @@ class SglFunction:
170
187
  frequency_penalty: float = 0.0,
171
188
  presence_penalty: float = 0.0,
172
189
  ignore_eos: bool = False,
190
+ return_logprob: Optional[bool] = None,
191
+ logprob_start_len: Optional[int] = None,
192
+ top_logprobs_num: Optional[int] = None,
193
+ return_text_in_logprobs: Optional[bool] = None,
173
194
  backend=None,
174
195
  num_threads: Union[str, int] = "auto",
175
196
  progress_bar: bool = False,
@@ -185,8 +206,10 @@ class SglFunction:
185
206
  batch_kwargs = [
186
207
  {self.arg_names[i]: v for i, v in enumerate(arg_values)}
187
208
  for arg_values in batch_kwargs
188
- if isinstance(arg_values, (list, tuple)) and
189
- len(self.arg_names) - len(self.arg_defaults) <= len(arg_values) <= len(self.arg_names)
209
+ if isinstance(arg_values, (list, tuple))
210
+ and len(self.arg_names) - len(self.arg_defaults)
211
+ <= len(arg_values)
212
+ <= len(self.arg_names)
190
213
  ]
191
214
  # Ensure to raise an exception if the number of arguments mismatch
192
215
  if len(batch_kwargs) != num_programs:
@@ -201,6 +224,10 @@ class SglFunction:
201
224
  frequency_penalty=frequency_penalty,
202
225
  presence_penalty=presence_penalty,
203
226
  ignore_eos=ignore_eos,
227
+ return_logprob=return_logprob,
228
+ logprob_start_len=logprob_start_len,
229
+ top_logprobs_num=top_logprobs_num,
230
+ return_text_in_logprobs=return_text_in_logprobs,
204
231
  )
205
232
  backend = backend or global_config.default_backend
206
233
  return run_program_batch(
@@ -348,7 +375,7 @@ class SglArgument(SglExpr):
348
375
 
349
376
 
350
377
  class SglImage(SglExpr):
351
- def __init__(self, path):
378
+ def __init__(self, path: str):
352
379
  self.path = path
353
380
 
354
381
  def __repr__(self) -> str:
@@ -356,7 +383,7 @@ class SglImage(SglExpr):
356
383
 
357
384
 
358
385
  class SglVideo(SglExpr):
359
- def __init__(self, path, num_frames):
386
+ def __init__(self, path: str, num_frames: int):
360
387
  self.path = path
361
388
  self.num_frames = num_frames
362
389
 
@@ -367,18 +394,23 @@ class SglVideo(SglExpr):
367
394
  class SglGen(SglExpr):
368
395
  def __init__(
369
396
  self,
370
- name,
371
- max_new_tokens,
372
- stop,
373
- temperature,
374
- top_p,
375
- top_k,
376
- frequency_penalty,
377
- presence_penalty,
378
- ignore_eos,
379
- dtype,
380
- regex,
397
+ name: Optional[str] = None,
398
+ max_new_tokens: Optional[int] = None,
399
+ stop: Optional[Union[str, List[str]]] = None,
400
+ temperature: Optional[float] = None,
401
+ top_p: Optional[float] = None,
402
+ top_k: Optional[int] = None,
403
+ frequency_penalty: Optional[float] = None,
404
+ presence_penalty: Optional[float] = None,
405
+ ignore_eos: Optional[bool] = None,
406
+ return_logprob: Optional[bool] = None,
407
+ logprob_start_len: Optional[int] = None,
408
+ top_logprobs_num: Optional[int] = None,
409
+ return_text_in_logprobs: Optional[bool] = None,
410
+ dtype: Optional[type] = None,
411
+ regex: Optional[str] = None,
381
412
  ):
413
+ """Call the model to generate. See the meaning of the arguments in docs/sampling_params.md"""
382
414
  super().__init__()
383
415
  self.name = name
384
416
  self.sampling_params = SglSamplingParams(
@@ -390,6 +422,10 @@ class SglGen(SglExpr):
390
422
  frequency_penalty=frequency_penalty,
391
423
  presence_penalty=presence_penalty,
392
424
  ignore_eos=ignore_eos,
425
+ return_logprob=return_logprob,
426
+ logprob_start_len=logprob_start_len,
427
+ top_logprobs_num=top_logprobs_num,
428
+ return_text_in_logprobs=return_text_in_logprobs,
393
429
  dtype=dtype,
394
430
  regex=regex,
395
431
  )
@@ -399,7 +435,7 @@ class SglGen(SglExpr):
399
435
 
400
436
 
401
437
  class SglConstantText(SglExpr):
402
- def __init__(self, value):
438
+ def __init__(self, value: str):
403
439
  super().__init__()
404
440
  self.value = value
405
441
 
@@ -408,7 +444,7 @@ class SglConstantText(SglExpr):
408
444
 
409
445
 
410
446
  class SglRoleBegin(SglExpr):
411
- def __init__(self, role):
447
+ def __init__(self, role: str):
412
448
  super().__init__()
413
449
  self.role = role
414
450
 
@@ -417,7 +453,7 @@ class SglRoleBegin(SglExpr):
417
453
 
418
454
 
419
455
  class SglRoleEnd(SglExpr):
420
- def __init__(self, role):
456
+ def __init__(self, role: str):
421
457
  super().__init__()
422
458
  self.role = role
423
459
 
@@ -426,7 +462,7 @@ class SglRoleEnd(SglExpr):
426
462
 
427
463
 
428
464
  class SglSelect(SglExpr):
429
- def __init__(self, name, choices, temperature):
465
+ def __init__(self, name: str, choices: List[str], temperature: float):
430
466
  super().__init__()
431
467
  self.name = name
432
468
  self.choices = choices
@@ -437,7 +473,7 @@ class SglSelect(SglExpr):
437
473
 
438
474
 
439
475
  class SglFork(SglExpr):
440
- def __init__(self, number, position_ids_offset=None):
476
+ def __init__(self, number: int, position_ids_offset=None):
441
477
  super().__init__()
442
478
  self.number = number
443
479
  self.position_ids_offset = position_ids_offset
@@ -450,7 +486,7 @@ class SglFork(SglExpr):
450
486
 
451
487
 
452
488
  class SglGetForkItem(SglExpr):
453
- def __init__(self, index):
489
+ def __init__(self, index: int):
454
490
  super().__init__()
455
491
  self.index = index
456
492
 
@@ -459,7 +495,7 @@ class SglGetForkItem(SglExpr):
459
495
 
460
496
 
461
497
  class SglVariable(SglExpr):
462
- def __init__(self, name, source):
498
+ def __init__(self, name: str, source):
463
499
  super().__init__()
464
500
  self.name = name
465
501
  self.source = source
@@ -469,7 +505,7 @@ class SglVariable(SglExpr):
469
505
 
470
506
 
471
507
  class SglVarScopeBegin(SglExpr):
472
- def __init__(self, name):
508
+ def __init__(self, name: str):
473
509
  super().__init__()
474
510
  self.name = name
475
511
 
@@ -478,7 +514,7 @@ class SglVarScopeBegin(SglExpr):
478
514
 
479
515
 
480
516
  class SglVarScopeEnd(SglExpr):
481
- def __init__(self, name):
517
+ def __init__(self, name: str):
482
518
  super().__init__()
483
519
  self.name = name
484
520
 
@@ -500,4 +536,4 @@ class SglCommitLazy(SglExpr):
500
536
  super().__init__()
501
537
 
502
538
  def __repr__(self):
503
- return f"CommitLazy()"
539
+ return "CommitLazy()"
@@ -5,13 +5,14 @@ from pydantic import BaseModel
5
5
 
6
6
  try:
7
7
  from outlines.caching import cache as disk_cache
8
- from outlines.fsm.guide import RegexGuide
9
8
  from outlines.caching import disable_cache
10
9
  from outlines.fsm.guide import RegexGuide
11
10
  from outlines.fsm.regex import FSMInfo, make_byte_level_fsm, make_deterministic_fsm
12
11
  from outlines.models.transformers import TransformerTokenizer
13
12
  except ImportError as e:
14
- print(f'\nError: {e}. Please install a new version of outlines by `pip install "outlines>=0.0.44"`\n')
13
+ print(
14
+ f'\nError: {e}. Please install a new version of outlines by `pip install "outlines>=0.0.44"`\n'
15
+ )
15
16
  raise
16
17
 
17
18
  try:
@@ -264,7 +264,9 @@ class TiktokenTokenizer:
264
264
  return self.tokenizer.decode_batch(batch)
265
265
 
266
266
  def apply_chat_template(self, messages, tokenize, add_generation_prompt):
267
- ret = self.chat_template.render(messages=messages, add_generation_prompt=add_generation_prompt)
267
+ ret = self.chat_template.render(
268
+ messages=messages, add_generation_prompt=add_generation_prompt
269
+ )
268
270
  return self.encode(ret) if tokenize else ret
269
271
 
270
272
 
@@ -297,5 +299,7 @@ class SentencePieceTokenizer:
297
299
  return self.tokenizer.decode(batch)
298
300
 
299
301
  def apply_chat_template(self, messages, tokenize, add_generation_prompt):
300
- ret = self.chat_template.render(messages=messages, add_generation_prompt=add_generation_prompt)
301
- return self.encode(ret) if tokenize else ret
302
+ ret = self.chat_template.render(
303
+ messages=messages, add_generation_prompt=add_generation_prompt
304
+ )
305
+ return self.encode(ret) if tokenize else ret
@@ -191,6 +191,7 @@ def extend_attention_fwd(
191
191
  b_seq_len_extend,
192
192
  max_len_in_batch,
193
193
  max_len_extend,
194
+ sm_scale=None,
194
195
  logit_cap=-1,
195
196
  ):
196
197
  """
@@ -213,7 +214,7 @@ def extend_attention_fwd(
213
214
  else:
214
215
  BLOCK_M, BLOCK_N = (64, 64) if Lq <= 128 else (32, 32)
215
216
 
216
- sm_scale = 1.0 / (Lq**0.5)
217
+ sm_scale = 1.0 / (Lq**0.5) if sm_scale is None else sm_scale
217
218
  batch_size, head_num = b_seq_len.shape[0], q_extend.shape[1]
218
219
  kv_group_num = q_extend.shape[1] // k_extend.shape[1]
219
220