sglang 0.2.14__tar.gz → 0.2.14.post1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. {sglang-0.2.14/sglang.egg-info → sglang-0.2.14.post1}/PKG-INFO +10 -4
  2. {sglang-0.2.14 → sglang-0.2.14.post1}/README.md +9 -3
  3. {sglang-0.2.14 → sglang-0.2.14.post1}/pyproject.toml +1 -1
  4. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/constrained/fsm_cache.py +11 -2
  5. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/constrained/jump_forward.py +1 -0
  6. sglang-0.2.14.post1/sglang/srt/layers/activation.py +131 -0
  7. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/layernorm.py +0 -3
  8. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/logits_processor.py +4 -4
  9. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/sampler.py +15 -68
  10. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/schedule_batch.py +15 -20
  11. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/tp_worker.py +40 -33
  12. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/model_executor/cuda_graph_runner.py +17 -31
  13. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/model_executor/forward_batch_info.py +1 -8
  14. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/model_executor/model_runner.py +5 -11
  15. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/chatglm.py +12 -4
  16. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/commandr.py +1 -5
  17. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/dbrx.py +1 -5
  18. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/deepseek.py +1 -5
  19. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/deepseek_v2.py +1 -5
  20. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/gemma.py +1 -5
  21. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/gemma2.py +1 -5
  22. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/gpt_bigcode.py +2 -6
  23. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/grok.py +1 -5
  24. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/internlm2.py +1 -5
  25. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/llama2.py +3 -7
  26. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/llama_classification.py +2 -2
  27. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/minicpm.py +1 -5
  28. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/mixtral.py +1 -5
  29. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/mixtral_quant.py +1 -5
  30. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/qwen.py +2 -5
  31. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/qwen2.py +2 -6
  32. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/qwen2_moe.py +14 -5
  33. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/stablelm.py +1 -5
  34. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/openai_api/adapter.py +85 -4
  35. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/openai_api/protocol.py +2 -0
  36. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/sampling_batch_info.py +1 -74
  37. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/sampling_params.py +4 -0
  38. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/server.py +8 -1
  39. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/runners.py +1 -1
  40. sglang-0.2.14.post1/sglang/version.py +1 -0
  41. {sglang-0.2.14 → sglang-0.2.14.post1/sglang.egg-info}/PKG-INFO +10 -4
  42. sglang-0.2.14/sglang/srt/layers/activation.py +0 -55
  43. sglang-0.2.14/sglang/version.py +0 -1
  44. {sglang-0.2.14 → sglang-0.2.14.post1}/LICENSE +0 -0
  45. {sglang-0.2.14 → sglang-0.2.14.post1}/setup.cfg +0 -0
  46. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/__init__.py +0 -0
  47. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/api.py +0 -0
  48. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/bench_latency.py +0 -0
  49. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/bench_serving.py +0 -0
  50. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/check_env.py +0 -0
  51. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/global_config.py +0 -0
  52. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/__init__.py +0 -0
  53. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/__init__.py +0 -0
  54. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/anthropic.py +0 -0
  55. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/base_backend.py +0 -0
  56. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/litellm.py +0 -0
  57. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/openai.py +0 -0
  58. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/runtime_endpoint.py +0 -0
  59. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/backend/vertexai.py +0 -0
  60. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/chat_template.py +0 -0
  61. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/choices.py +0 -0
  62. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/compiler.py +0 -0
  63. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/interpreter.py +0 -0
  64. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/ir.py +0 -0
  65. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/lang/tracer.py +0 -0
  66. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/launch_server.py +0 -0
  67. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/constrained/__init__.py +0 -0
  68. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/constrained/base_tool_cache.py +0 -0
  69. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/conversation.py +0 -0
  70. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/hf_transformers_utils.py +0 -0
  71. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/decode_attention.py +0 -0
  72. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/extend_attention.py +0 -0
  73. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/fused_moe/__init__.py +0 -0
  74. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/fused_moe/fused_moe.py +0 -0
  75. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/fused_moe/layer.py +0 -0
  76. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/pooler.py +0 -0
  77. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/prefill_attention.py +0 -0
  78. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/layers/radix_attention.py +0 -0
  79. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/controller_multi.py +0 -0
  80. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/controller_single.py +0 -0
  81. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/detokenizer_manager.py +0 -0
  82. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/io_struct.py +0 -0
  83. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/policy_scheduler.py +0 -0
  84. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/managers/tokenizer_manager.py +0 -0
  85. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/mem_cache/base_prefix_cache.py +0 -0
  86. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/mem_cache/chunk_cache.py +0 -0
  87. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/mem_cache/flush_cache.py +0 -0
  88. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/mem_cache/memory_pool.py +0 -0
  89. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/mem_cache/radix_cache.py +0 -0
  90. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/mm_utils.py +0 -0
  91. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/model_config.py +0 -0
  92. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/llama_embedding.py +0 -0
  93. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/llava.py +0 -0
  94. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/llavavid.py +0 -0
  95. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/mistral.py +0 -0
  96. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/models/yivl.py +0 -0
  97. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/penaltylib/__init__.py +0 -0
  98. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/penaltylib/orchestrator.py +0 -0
  99. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/penaltylib/penalizers/frequency_penalty.py +0 -0
  100. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/penaltylib/penalizers/min_new_tokens.py +0 -0
  101. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/penaltylib/penalizers/presence_penalty.py +0 -0
  102. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/sampling/penaltylib/penalizers/repetition_penalty.py +0 -0
  103. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/server_args.py +0 -0
  104. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/srt/utils.py +0 -0
  105. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/run_eval.py +0 -0
  106. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/simple_eval_common.py +0 -0
  107. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/simple_eval_gpqa.py +0 -0
  108. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/simple_eval_humaneval.py +0 -0
  109. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/simple_eval_math.py +0 -0
  110. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/simple_eval_mgsm.py +0 -0
  111. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/simple_eval_mmlu.py +0 -0
  112. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/srt/sampling/penaltylib/utils.py +0 -0
  113. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/test_activation.py +0 -0
  114. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/test_layernorm.py +0 -0
  115. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/test_programs.py +0 -0
  116. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/test/test_utils.py +0 -0
  117. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang/utils.py +0 -0
  118. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang.egg-info/SOURCES.txt +0 -0
  119. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang.egg-info/dependency_links.txt +0 -0
  120. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang.egg-info/requires.txt +0 -0
  121. {sglang-0.2.14 → sglang-0.2.14.post1}/sglang.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sglang
3
- Version: 0.2.14
3
+ Version: 0.2.14.post1
4
4
  Summary: SGLang is yet another fast serving framework for large language models and vision language models.
5
5
  License: Apache License
6
6
  Version 2.0, January 2004
@@ -312,7 +312,7 @@ pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/
312
312
  ### Method 2: From source
313
313
  ```
314
314
  # Use the last release branch
315
- git clone -b v0.2.14 https://github.com/sgl-project/sglang.git
315
+ git clone -b v0.2.14.post1 https://github.com/sgl-project/sglang.git
316
316
  cd sglang
317
317
 
318
318
  pip install --upgrade pip
@@ -339,6 +339,7 @@ docker run --gpus all \
339
339
  ### Method 4: Using docker compose
340
340
 
341
341
  <details>
342
+ <summary>More</summary>
342
343
 
343
344
  > This method is recommended if you plan to serve it as a service.
344
345
  > A better approach is to use the [k8s-sglang-service.yaml](./docker/k8s-sglang-service.yaml).
@@ -350,6 +351,7 @@ docker run --gpus all \
350
351
  ### Method 5: Run on Kubernetes or Clouds with SkyPilot
351
352
 
352
353
  <details>
354
+ <summary>More</summary>
353
355
 
354
356
  To deploy on Kubernetes or 12+ clouds, you can use [SkyPilot](https://github.com/skypilot-org/skypilot).
355
357
 
@@ -389,7 +391,7 @@ sky status --endpoint 30000 sglang
389
391
 
390
392
 
391
393
  ### Common Notes
392
- - [FlashInfer](https://github.com/flashinfer-ai/flashinfer) is currently one of the dependencies that must be installed for SGLang. If you are using NVIDIA GPU devices below sm80, such as T4, you can't use SGLang for the time being. We expect to resolve this issue soon, so please stay tuned. If you encounter any FlashInfer-related issues on sm80+ devices (e.g., A100, L40S, H100), consider using Triton's kernel by `--disable-flashinfer --disable-flashinfer-sampling` and raise a issue.
394
+ - [FlashInfer](https://github.com/flashinfer-ai/flashinfer) is currently one of the dependencies that must be installed for SGLang. It only supports sm75 and above. If you encounter any FlashInfer-related issues on sm75+ devices (e.g., T4, A10, A100, L4, L40S, H100), consider using Triton's kernel by `--disable-flashinfer --disable-flashinfer-sampling` and raise an issue.
393
395
  - If you only need to use the OpenAI backend, you can avoid installing other dependencies by using `pip install "sglang[openai]"`.
394
396
 
395
397
  ## Backend: SGLang Runtime (SRT)
@@ -518,6 +520,7 @@ Instructions for supporting a new model are [here](https://github.com/sgl-projec
518
520
 
519
521
  #### Use Models From ModelScope
520
522
  <details>
523
+ <summary>More</summary>
521
524
 
522
525
  To use a model from [ModelScope](https://www.modelscope.cn), set the environment variable SGLANG_USE_MODELSCOPE.
523
526
  ```
@@ -532,6 +535,7 @@ SGLANG_USE_MODELSCOPE=true python -m sglang.launch_server --model-path qwen/Qwen
532
535
 
533
536
  #### Run Llama 3.1 405B
534
537
  <details>
538
+ <summary>More</summary>
535
539
 
536
540
  ```bash
537
541
  # Run 405B (fp8) on a single node
@@ -549,7 +553,9 @@ GLOO_SOCKET_IFNAME=eth0 python3 -m sglang.launch_server --model-path meta-llama/
549
553
 
550
554
  ### Benchmark Performance
551
555
 
552
- - Benchmark a single static batch by running the following command without launching a server. The arguments are the same as for `launch_server.py`. Note that this is not a dynamic batching server, so it may run out of memory for a batch size that a real server can handle. A real server truncates the prefill into several batches, while this unit test does not. For accurate large batch testing, consider using `sglang.bench_serving`.
556
+ - Benchmark a single static batch by running the following command without launching a server. The arguments are the same as for `launch_server.py`.
557
+ Note that this is not a dynamic batching server, so it may run out of memory for a batch size that a real server can handle.
558
+ A real server truncates the prefill into several batches, while this unit test does not. For accurate large batch testing, please use `sglang.bench_serving` instead.
553
559
  ```
554
560
  python -m sglang.bench_latency --model-path meta-llama/Meta-Llama-3-8B-Instruct --batch 32 --input-len 256 --output-len 32
555
561
  ```
@@ -56,7 +56,7 @@ pip install flashinfer -i https://flashinfer.ai/whl/cu121/torch2.4/
56
56
  ### Method 2: From source
57
57
  ```
58
58
  # Use the last release branch
59
- git clone -b v0.2.14 https://github.com/sgl-project/sglang.git
59
+ git clone -b v0.2.14.post1 https://github.com/sgl-project/sglang.git
60
60
  cd sglang
61
61
 
62
62
  pip install --upgrade pip
@@ -83,6 +83,7 @@ docker run --gpus all \
83
83
  ### Method 4: Using docker compose
84
84
 
85
85
  <details>
86
+ <summary>More</summary>
86
87
 
87
88
  > This method is recommended if you plan to serve it as a service.
88
89
  > A better approach is to use the [k8s-sglang-service.yaml](./docker/k8s-sglang-service.yaml).
@@ -94,6 +95,7 @@ docker run --gpus all \
94
95
  ### Method 5: Run on Kubernetes or Clouds with SkyPilot
95
96
 
96
97
  <details>
98
+ <summary>More</summary>
97
99
 
98
100
  To deploy on Kubernetes or 12+ clouds, you can use [SkyPilot](https://github.com/skypilot-org/skypilot).
99
101
 
@@ -133,7 +135,7 @@ sky status --endpoint 30000 sglang
133
135
 
134
136
 
135
137
  ### Common Notes
136
- - [FlashInfer](https://github.com/flashinfer-ai/flashinfer) is currently one of the dependencies that must be installed for SGLang. If you are using NVIDIA GPU devices below sm80, such as T4, you can't use SGLang for the time being. We expect to resolve this issue soon, so please stay tuned. If you encounter any FlashInfer-related issues on sm80+ devices (e.g., A100, L40S, H100), consider using Triton's kernel by `--disable-flashinfer --disable-flashinfer-sampling` and raise a issue.
138
+ - [FlashInfer](https://github.com/flashinfer-ai/flashinfer) is currently one of the dependencies that must be installed for SGLang. It only supports sm75 and above. If you encounter any FlashInfer-related issues on sm75+ devices (e.g., T4, A10, A100, L4, L40S, H100), consider using Triton's kernel by `--disable-flashinfer --disable-flashinfer-sampling` and raise an issue.
137
139
  - If you only need to use the OpenAI backend, you can avoid installing other dependencies by using `pip install "sglang[openai]"`.
138
140
 
139
141
  ## Backend: SGLang Runtime (SRT)
@@ -262,6 +264,7 @@ Instructions for supporting a new model are [here](https://github.com/sgl-projec
262
264
 
263
265
  #### Use Models From ModelScope
264
266
  <details>
267
+ <summary>More</summary>
265
268
 
266
269
  To use a model from [ModelScope](https://www.modelscope.cn), set the environment variable SGLANG_USE_MODELSCOPE.
267
270
  ```
@@ -276,6 +279,7 @@ SGLANG_USE_MODELSCOPE=true python -m sglang.launch_server --model-path qwen/Qwen
276
279
 
277
280
  #### Run Llama 3.1 405B
278
281
  <details>
282
+ <summary>More</summary>
279
283
 
280
284
  ```bash
281
285
  # Run 405B (fp8) on a single node
@@ -293,7 +297,9 @@ GLOO_SOCKET_IFNAME=eth0 python3 -m sglang.launch_server --model-path meta-llama/
293
297
 
294
298
  ### Benchmark Performance
295
299
 
296
- - Benchmark a single static batch by running the following command without launching a server. The arguments are the same as for `launch_server.py`. Note that this is not a dynamic batching server, so it may run out of memory for a batch size that a real server can handle. A real server truncates the prefill into several batches, while this unit test does not. For accurate large batch testing, consider using `sglang.bench_serving`.
300
+ - Benchmark a single static batch by running the following command without launching a server. The arguments are the same as for `launch_server.py`.
301
+ Note that this is not a dynamic batching server, so it may run out of memory for a batch size that a real server can handle.
302
+ A real server truncates the prefill into several batches, while this unit test does not. For accurate large batch testing, please use `sglang.bench_serving` instead.
297
303
  ```
298
304
  python -m sglang.bench_latency --model-path meta-llama/Meta-Llama-3-8B-Instruct --batch 32 --input-len 256 --output-len 32
299
305
  ```
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "sglang"
7
- version = "0.2.14"
7
+ version = "0.2.14.post1"
8
8
  description = "SGLang is yet another fast serving framework for large language models and vision language models."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -15,6 +15,8 @@ limitations under the License.
15
15
 
16
16
  """Cache for the compressed finite state machine."""
17
17
 
18
+ from outlines.fsm.json_schema import build_regex_from_schema
19
+
18
20
  from sglang.srt.constrained import RegexGuide, TransformerTokenizer
19
21
  from sglang.srt.constrained.base_tool_cache import BaseToolCache
20
22
 
@@ -26,9 +28,12 @@ class FSMCache(BaseToolCache):
26
28
  tokenizer_args_dict,
27
29
  enable=True,
28
30
  skip_tokenizer_init=False,
31
+ json_schema_mode=False,
29
32
  ):
30
33
  super().__init__(enable=enable)
31
34
 
35
+ self.json_schema_mode = json_schema_mode
36
+
32
37
  if (
33
38
  skip_tokenizer_init
34
39
  or tokenizer_path.endswith(".json")
@@ -72,5 +77,9 @@ class FSMCache(BaseToolCache):
72
77
  tokenizer_path, **tokenizer_args_dict
73
78
  )
74
79
 
75
- def init_value(self, regex):
76
- return RegexGuide(regex, self.outlines_tokenizer)
80
+ def init_value(self, value):
81
+ if self.json_schema_mode:
82
+ regex = build_regex_from_schema(value)
83
+ return RegexGuide(regex, self.outlines_tokenizer), regex
84
+ else:
85
+ return RegexGuide(value, self.outlines_tokenizer)
@@ -23,6 +23,7 @@ from collections import defaultdict
23
23
 
24
24
  import interegular
25
25
  import outlines.caching
26
+ from outlines.fsm.json_schema import build_regex_from_schema
26
27
 
27
28
  from sglang.srt.constrained import (
28
29
  FSMInfo,
@@ -0,0 +1,131 @@
1
+ """
2
+ Copyright 2023-2024 SGLang Team
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+ Unless required by applicable law or agreed to in writing, software
8
+ distributed under the License is distributed on an "AS IS" BASIS,
9
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ See the License for the specific language governing permissions and
11
+ limitations under the License.
12
+ """
13
+
14
+ """Fused operators for activation layers."""
15
+
16
+ from typing import Optional
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from flashinfer.activation import gelu_tanh_and_mul, silu_and_mul
22
+ from vllm.distributed import (
23
+ divide,
24
+ get_tensor_model_parallel_rank,
25
+ get_tensor_model_parallel_world_size,
26
+ )
27
+ from vllm.model_executor.custom_op import CustomOp
28
+ from vllm.model_executor.layers.quantization import QuantizationConfig
29
+ from vllm.model_executor.utils import set_weight_attrs
30
+
31
+
32
+ class SiluAndMul(CustomOp):
33
+ def forward_native(self, x: torch.Tensor) -> torch.Tensor:
34
+ d = x.shape[-1] // 2
35
+ return F.silu(x[..., :d]) * x[..., d:]
36
+
37
+ def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
38
+ d = x.shape[-1] // 2
39
+ output_shape = x.shape[:-1] + (d,)
40
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
41
+ silu_and_mul(x, out)
42
+ return out
43
+
44
+
45
+ class GeluAndMul(CustomOp):
46
+ def __init__(self, **kwargs):
47
+ super().__init__()
48
+
49
+ def forward_native(self, x: torch.Tensor) -> torch.Tensor:
50
+ d = x.shape[-1] // 2
51
+ return F.gelu(x[..., :d], approximate="tanh") * x[..., d:]
52
+
53
+ def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
54
+ d = x.shape[-1] // 2
55
+ output_shape = x.shape[:-1] + (d,)
56
+ out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
57
+ gelu_tanh_and_mul(x, out)
58
+ return out
59
+
60
+
61
+ class ScaledActivation(nn.Module):
62
+ """An activation function with post-scale parameters.
63
+
64
+ This is used for some quantization methods like AWQ.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ act_module: nn.Module,
70
+ intermediate_size: int,
71
+ input_is_parallel: bool = True,
72
+ params_dtype: Optional[torch.dtype] = None,
73
+ ):
74
+ super().__init__()
75
+ self.act = act_module
76
+ self.input_is_parallel = input_is_parallel
77
+ if input_is_parallel:
78
+ tp_size = get_tensor_model_parallel_world_size()
79
+ intermediate_size_per_partition = divide(intermediate_size, tp_size)
80
+ else:
81
+ intermediate_size_per_partition = intermediate_size
82
+ if params_dtype is None:
83
+ params_dtype = torch.get_default_dtype()
84
+ self.scales = nn.Parameter(
85
+ torch.empty(intermediate_size_per_partition, dtype=params_dtype)
86
+ )
87
+ set_weight_attrs(self.scales, {"weight_loader": self.weight_loader})
88
+
89
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
90
+ return self.act(x) / self.scales
91
+
92
+ def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor):
93
+ param_data = param.data
94
+ if self.input_is_parallel:
95
+ tp_rank = get_tensor_model_parallel_rank()
96
+ shard_size = param_data.shape[0]
97
+ start_idx = tp_rank * shard_size
98
+ loaded_weight = loaded_weight.narrow(0, start_idx, shard_size)
99
+ assert param_data.shape == loaded_weight.shape
100
+ param_data.copy_(loaded_weight)
101
+
102
+
103
+ _ACTIVATION_REGISTRY = {
104
+ "gelu": nn.GELU(),
105
+ "gelu_pytorch_tanh": nn.GELU(approximate="tanh"),
106
+ }
107
+
108
+
109
+ def get_act_fn(
110
+ act_fn_name: str,
111
+ quant_config: Optional[QuantizationConfig] = None,
112
+ intermediate_size: Optional[int] = None,
113
+ input_is_parallel: bool = True,
114
+ params_dtype: Optional[torch.dtype] = None,
115
+ ) -> nn.Module:
116
+ """Get an activation function by name."""
117
+ act_fn_name = act_fn_name.lower()
118
+ if act_fn_name not in _ACTIVATION_REGISTRY:
119
+ raise ValueError(f"Activation function {act_fn_name!r} is not supported.")
120
+
121
+ act_fn = _ACTIVATION_REGISTRY[act_fn_name]
122
+ if quant_config is not None and act_fn_name in quant_config.get_scaled_act_names():
123
+ if intermediate_size is None:
124
+ raise ValueError(
125
+ "intermediate_size must be specified for scaled "
126
+ "activation functions."
127
+ )
128
+ return ScaledActivation(
129
+ act_fn, intermediate_size, input_is_parallel, params_dtype
130
+ )
131
+ return act_fn
@@ -32,15 +32,12 @@ class RMSNorm(CustomOp):
32
32
  super().__init__()
33
33
  self.weight = nn.Parameter(torch.ones(hidden_size))
34
34
  self.variance_epsilon = eps
35
- self.is_lower_sm80 = torch.cuda.get_device_capability()[0] < 8
36
35
 
37
36
  def forward_cuda(
38
37
  self,
39
38
  x: torch.Tensor,
40
39
  residual: Optional[torch.Tensor] = None,
41
40
  ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
42
- if self.is_lower_sm80:
43
- return self.forward_native(x, residual)
44
41
 
45
42
  if residual is not None:
46
43
  fused_add_rmsnorm(x, residual, self.weight.data, self.variance_epsilon)
@@ -29,7 +29,7 @@ from sglang.srt.model_executor.forward_batch_info import ForwardMode, InputMetad
29
29
 
30
30
 
31
31
  @dataclasses.dataclass
32
- class LogitsProcessorOutput:
32
+ class LogitProcessorOutput:
33
33
  # The logits of the next tokens. shape: [#seq, vocab_size]
34
34
  next_token_logits: torch.Tensor
35
35
  # The logprobs of the next tokens. shape: [#seq, vocab_size]
@@ -185,7 +185,7 @@ class LogitsProcessor(nn.Module):
185
185
 
186
186
  # Return only last_logits if logprob is not requested
187
187
  if not logits_metadata.return_logprob:
188
- return LogitsProcessorOutput(
188
+ return LogitProcessorOutput(
189
189
  next_token_logits=last_logits,
190
190
  next_token_logprobs=None,
191
191
  normalized_prompt_logprobs=None,
@@ -209,7 +209,7 @@ class LogitsProcessor(nn.Module):
209
209
  else:
210
210
  output_top_logprobs = None
211
211
 
212
- return LogitsProcessorOutput(
212
+ return LogitProcessorOutput(
213
213
  next_token_logits=last_logits,
214
214
  next_token_logprobs=last_logprobs,
215
215
  normalized_prompt_logprobs=None,
@@ -278,7 +278,7 @@ class LogitsProcessor(nn.Module):
278
278
  # Remove the last token logprob for the prefill tokens.
279
279
  input_token_logprobs = input_token_logprobs[:-1]
280
280
 
281
- return LogitsProcessorOutput(
281
+ return LogitProcessorOutput(
282
282
  next_token_logits=last_logits,
283
283
  next_token_logprobs=last_logprobs,
284
284
  normalized_prompt_logprobs=normalized_prompt_logprobs,
@@ -1,6 +1,4 @@
1
- import dataclasses
2
1
  import logging
3
- from typing import Union
4
2
 
5
3
  import torch
6
4
  from flashinfer.sampling import (
@@ -11,8 +9,6 @@ from flashinfer.sampling import (
11
9
  )
12
10
  from vllm.model_executor.custom_op import CustomOp
13
11
 
14
- from sglang.srt.layers.logits_processor import LogitsProcessorOutput
15
-
16
12
  # TODO: move this dict to another place
17
13
  from sglang.srt.managers.schedule_batch import global_server_args_dict
18
14
  from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
@@ -20,71 +16,30 @@ from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
20
16
  logger = logging.getLogger(__name__)
21
17
 
22
18
 
23
- @dataclasses.dataclass
24
- class SampleOutput:
25
- success: torch.Tensor
26
- probs: torch.Tensor
27
- batch_next_token_ids: torch.Tensor
28
-
29
-
30
19
  class Sampler(CustomOp):
31
20
  def __init__(self):
32
21
  super().__init__()
33
22
 
34
- def _apply_penalties(self, logits: torch.Tensor, sampling_info: SamplingBatchInfo):
35
- # min-token, presence, frequency
36
- if sampling_info.linear_penalties is not None:
37
- logits += sampling_info.linear_penalties
38
-
39
- # repetition
40
- if sampling_info.scaling_penalties is not None:
41
- logits = torch.where(
42
- logits > 0,
43
- logits / sampling_info.scaling_penalties,
44
- logits * sampling_info.scaling_penalties,
45
- )
46
-
47
- return logits
48
-
49
- def _get_probs(
50
- self,
51
- logits: torch.Tensor,
52
- sampling_info: SamplingBatchInfo,
53
- is_torch_compile: bool = False,
54
- ):
23
+ def forward_cuda(self, logits: torch.Tensor, sampling_info: SamplingBatchInfo):
55
24
  # Post process logits
56
25
  logits = logits.contiguous()
57
26
  logits.div_(sampling_info.temperatures)
58
- if is_torch_compile:
59
- # FIXME: Temporary workaround for unknown bugs in torch.compile
60
- logits.add_(0)
61
-
62
27
  if sampling_info.logit_bias is not None:
63
28
  logits.add_(sampling_info.logit_bias)
64
29
 
65
30
  if sampling_info.vocab_mask is not None:
66
31
  logits = logits.masked_fill(~sampling_info.vocab_mask, float("-inf"))
67
32
 
68
- logits = self._apply_penalties(logits, sampling_info)
33
+ logits = sampling_info.penalizer_orchestrator.apply(logits)
69
34
 
70
- return torch.softmax(logits, dim=-1)
71
-
72
- def forward_cuda(
73
- self,
74
- logits: Union[torch.Tensor, LogitsProcessorOutput],
75
- sampling_info: SamplingBatchInfo,
76
- ):
77
- if isinstance(logits, LogitsProcessorOutput):
78
- logits = logits.next_token_logits
79
-
80
- probs = self._get_probs(logits, sampling_info)
35
+ probs = torch.softmax(logits, dim=-1)
81
36
 
82
37
  if not global_server_args_dict["disable_flashinfer_sampling"]:
83
38
  max_top_k_round, batch_size = 32, probs.shape[0]
84
39
  uniform_samples = torch.rand(
85
40
  (max_top_k_round, batch_size), device=probs.device
86
41
  )
87
- if sampling_info.need_min_p_sampling:
42
+ if sampling_info.min_ps.any():
88
43
  probs = top_k_renorm_prob(probs, sampling_info.top_ks)
89
44
  probs = top_p_renorm_prob(probs, sampling_info.top_ps)
90
45
  batch_next_token_ids, success = min_p_sampling_from_probs(
@@ -100,23 +55,18 @@ class Sampler(CustomOp):
100
55
  probs, sampling_info.top_ks, sampling_info.top_ps, sampling_info.min_ps
101
56
  )
102
57
 
103
- return SampleOutput(success, probs, batch_next_token_ids)
104
-
105
- def forward_native(
106
- self,
107
- logits: Union[torch.Tensor, LogitsProcessorOutput],
108
- sampling_info: SamplingBatchInfo,
109
- ):
110
- if isinstance(logits, LogitsProcessorOutput):
111
- logits = logits.next_token_logits
112
-
113
- probs = self._get_probs(logits, sampling_info, is_torch_compile=True)
58
+ if not torch.all(success):
59
+ logging.warning("Sampling failed, fallback to top_k=1 strategy")
60
+ probs = probs.masked_fill(torch.isnan(probs), 0.0)
61
+ argmax_ids = torch.argmax(probs, dim=-1)
62
+ batch_next_token_ids = torch.where(
63
+ success, batch_next_token_ids, argmax_ids
64
+ )
114
65
 
115
- batch_next_token_ids, success = top_k_top_p_min_p_sampling_from_probs_torch(
116
- probs, sampling_info.top_ks, sampling_info.top_ps, sampling_info.min_ps
117
- )
66
+ return batch_next_token_ids
118
67
 
119
- return SampleOutput(success, probs, batch_next_token_ids)
68
+ def forward_native():
69
+ raise NotImplementedError("Native forward is not implemented yet.")
120
70
 
121
71
 
122
72
  def top_k_top_p_min_p_sampling_from_probs_torch(
@@ -137,10 +87,7 @@ def top_k_top_p_min_p_sampling_from_probs_torch(
137
87
  probs_sort[probs_sort < min_p_thresholds.view(-1, 1)] = 0.0
138
88
  probs_sort.div_(probs_sort.max(dim=-1, keepdim=True)[0])
139
89
  try:
140
- # FIXME: torch.multiomial does not support num_samples = 1
141
- sampled_index = torch.multinomial(probs_sort, num_samples=2, replacement=True)[
142
- :, :1
143
- ]
90
+ sampled_index = torch.multinomial(probs_sort, num_samples=1)
144
91
  except RuntimeError as e:
145
92
  logger.warning(f"Sampling error: {e}")
146
93
  batch_next_token_ids = torch.zeros(
@@ -1,5 +1,3 @@
1
- from __future__ import annotations
2
-
3
1
  """
4
2
  Copyright 2023-2024 SGLang Team
5
3
  Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,7 +17,7 @@ limitations under the License.
19
17
 
20
18
  import logging
21
19
  from dataclasses import dataclass
22
- from typing import TYPE_CHECKING, List, Optional, Union
20
+ from typing import List, Optional, Union
23
21
 
24
22
  import torch
25
23
 
@@ -31,10 +29,6 @@ from sglang.srt.mem_cache.chunk_cache import ChunkCache
31
29
  from sglang.srt.mem_cache.memory_pool import BaseTokenToKVPool, ReqToTokenPool
32
30
  from sglang.srt.sampling.sampling_batch_info import SamplingBatchInfo
33
31
 
34
- if TYPE_CHECKING:
35
- from sglang.srt.layers.sampler import SampleOutput
36
-
37
-
38
32
  INIT_INCREMENTAL_DETOKENIZATION_OFFSET = 5
39
33
 
40
34
  # Put some global args for easy access
@@ -268,7 +262,14 @@ class Req:
268
262
 
269
263
  all_text = self.origin_input_text + self.decoded_text + jump_forward_str
270
264
  all_ids = self.tokenizer.encode(all_text)
265
+ if not all_ids:
266
+ logger.warning("Encoded all_text resulted in empty all_ids")
267
+ return False
268
+
271
269
  prompt_tokens = len(self.origin_input_ids_unpadded)
270
+ if prompt_tokens > len(all_ids):
271
+ logger.warning("prompt_tokens is larger than encoded all_ids")
272
+ return False
272
273
 
273
274
  if all_ids[prompt_tokens - 1] != self.origin_input_ids_unpadded[-1]:
274
275
  # TODO(lsyin): fix token fusion
@@ -677,17 +678,11 @@ class ScheduleBatch:
677
678
  self.top_logprobs_nums.extend(other.top_logprobs_nums)
678
679
  self.return_logprob = any(req.return_logprob for req in self.reqs)
679
680
 
680
- def check_sample_results(self, sample_output: SampleOutput):
681
- if not torch.all(sample_output.success):
682
- probs = sample_output.probs
683
- batch_next_token_ids = sample_output.batch_next_token_ids
684
- logging.warning("Sampling failed, fallback to top_k=1 strategy")
685
- probs = probs.masked_fill(torch.isnan(probs), 0.0)
686
- argmax_ids = torch.argmax(probs, dim=-1)
687
- batch_next_token_ids = torch.where(
688
- sample_output.success, batch_next_token_ids, argmax_ids
689
- )
690
- sample_output.probs = probs
691
- sample_output.batch_next_token_ids = batch_next_token_ids
681
+ def sample(self, logits: torch.Tensor):
682
+ from sglang.srt.layers.sampler import Sampler
683
+
684
+ sampler = Sampler()
685
+
686
+ batch_next_token_ids = sampler(logits, self.sampling_info)
692
687
 
693
- return sample_output.batch_next_token_ids
688
+ return batch_next_token_ids