sglang 0.5.3rc2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (408) hide show
  1. sglang/bench_one_batch.py +47 -28
  2. sglang/bench_one_batch_server.py +41 -25
  3. sglang/bench_serving.py +330 -156
  4. sglang/check_env.py +1 -1
  5. sglang/compile_deep_gemm.py +6 -2
  6. sglang/global_config.py +1 -25
  7. sglang/lang/api.py +6 -0
  8. sglang/lang/interpreter.py +1 -0
  9. sglang/lang/ir.py +13 -0
  10. sglang/launch_server.py +8 -15
  11. sglang/profiler.py +18 -1
  12. sglang/srt/_custom_ops.py +1 -1
  13. sglang/srt/batch_invariant_ops/batch_invariant_ops.py +4 -6
  14. sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
  15. sglang/srt/compilation/backend.py +437 -0
  16. sglang/srt/compilation/compilation_config.py +20 -0
  17. sglang/srt/compilation/compilation_counter.py +47 -0
  18. sglang/srt/compilation/compile.py +210 -0
  19. sglang/srt/compilation/compiler_interface.py +503 -0
  20. sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
  21. sglang/srt/compilation/fix_functionalization.py +134 -0
  22. sglang/srt/compilation/fx_utils.py +83 -0
  23. sglang/srt/compilation/inductor_pass.py +140 -0
  24. sglang/srt/compilation/pass_manager.py +66 -0
  25. sglang/srt/compilation/piecewise_context_manager.py +40 -0
  26. sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
  27. sglang/srt/configs/__init__.py +4 -0
  28. sglang/srt/configs/deepseek_ocr.py +262 -0
  29. sglang/srt/configs/deepseekvl2.py +194 -96
  30. sglang/srt/configs/dots_vlm.py +2 -7
  31. sglang/srt/configs/falcon_h1.py +13 -64
  32. sglang/srt/configs/load_config.py +25 -2
  33. sglang/srt/configs/mamba_utils.py +117 -0
  34. sglang/srt/configs/model_config.py +134 -23
  35. sglang/srt/configs/modelopt_config.py +30 -0
  36. sglang/srt/configs/nemotron_h.py +286 -0
  37. sglang/srt/configs/olmo3.py +105 -0
  38. sglang/srt/configs/points_v15_chat.py +29 -0
  39. sglang/srt/configs/qwen3_next.py +11 -47
  40. sglang/srt/configs/qwen3_omni.py +613 -0
  41. sglang/srt/configs/qwen3_vl.py +0 -10
  42. sglang/srt/connector/remote_instance.py +1 -1
  43. sglang/srt/constrained/base_grammar_backend.py +5 -1
  44. sglang/srt/constrained/llguidance_backend.py +5 -0
  45. sglang/srt/constrained/outlines_backend.py +1 -1
  46. sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
  47. sglang/srt/constrained/utils.py +12 -0
  48. sglang/srt/constrained/xgrammar_backend.py +20 -11
  49. sglang/srt/disaggregation/ascend/transfer_engine.py +1 -1
  50. sglang/srt/disaggregation/base/conn.py +17 -4
  51. sglang/srt/disaggregation/common/conn.py +4 -2
  52. sglang/srt/disaggregation/decode.py +123 -31
  53. sglang/srt/disaggregation/decode_kvcache_offload_manager.py +1 -1
  54. sglang/srt/disaggregation/fake/conn.py +11 -3
  55. sglang/srt/disaggregation/mooncake/conn.py +157 -19
  56. sglang/srt/disaggregation/nixl/conn.py +69 -24
  57. sglang/srt/disaggregation/prefill.py +96 -270
  58. sglang/srt/distributed/device_communicators/all_reduce_utils.py +4 -4
  59. sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
  60. sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
  61. sglang/srt/distributed/device_communicators/pynccl.py +24 -12
  62. sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
  63. sglang/srt/distributed/device_communicators/symm_mem.py +1 -1
  64. sglang/srt/distributed/naive_distributed.py +5 -4
  65. sglang/srt/distributed/parallel_state.py +70 -19
  66. sglang/srt/elastic_ep/elastic_ep.py +74 -0
  67. sglang/srt/entrypoints/context.py +3 -2
  68. sglang/srt/entrypoints/engine.py +66 -66
  69. sglang/srt/entrypoints/grpc_server.py +431 -234
  70. sglang/srt/entrypoints/harmony_utils.py +2 -2
  71. sglang/srt/entrypoints/http_server.py +120 -8
  72. sglang/srt/entrypoints/http_server_engine.py +1 -7
  73. sglang/srt/entrypoints/openai/protocol.py +225 -37
  74. sglang/srt/entrypoints/openai/serving_base.py +49 -2
  75. sglang/srt/entrypoints/openai/serving_chat.py +29 -74
  76. sglang/srt/entrypoints/openai/serving_classify.py +204 -0
  77. sglang/srt/entrypoints/openai/serving_completions.py +15 -1
  78. sglang/srt/entrypoints/openai/serving_responses.py +5 -2
  79. sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
  80. sglang/srt/environ.py +42 -4
  81. sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
  82. sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
  83. sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
  84. sglang/srt/eplb/expert_distribution.py +3 -4
  85. sglang/srt/eplb/expert_location_dispatch.py +2 -2
  86. sglang/srt/eplb/expert_location_updater.py +2 -2
  87. sglang/srt/function_call/base_format_detector.py +17 -18
  88. sglang/srt/function_call/function_call_parser.py +18 -14
  89. sglang/srt/function_call/glm4_moe_detector.py +1 -5
  90. sglang/srt/function_call/gpt_oss_detector.py +1 -1
  91. sglang/srt/function_call/json_array_parser.py +0 -2
  92. sglang/srt/function_call/utils.py +2 -2
  93. sglang/srt/grpc/compile_proto.py +3 -3
  94. sglang/srt/{entrypoints → grpc}/grpc_request_manager.py +112 -52
  95. sglang/srt/grpc/health_servicer.py +189 -0
  96. sglang/srt/grpc/scheduler_launcher.py +181 -0
  97. sglang/srt/grpc/sglang_scheduler_pb2.py +78 -70
  98. sglang/srt/grpc/sglang_scheduler_pb2.pyi +66 -10
  99. sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +89 -1
  100. sglang/srt/layers/activation.py +4 -1
  101. sglang/srt/layers/attention/aiter_backend.py +3 -3
  102. sglang/srt/layers/attention/ascend_backend.py +17 -1
  103. sglang/srt/layers/attention/attention_registry.py +43 -23
  104. sglang/srt/layers/attention/base_attn_backend.py +20 -1
  105. sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
  106. sglang/srt/layers/attention/fla/chunk.py +0 -1
  107. sglang/srt/layers/attention/fla/chunk_o.py +1 -1
  108. sglang/srt/layers/attention/fla/index.py +0 -2
  109. sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
  110. sglang/srt/layers/attention/fla/utils.py +0 -3
  111. sglang/srt/layers/attention/fla/wy_fast.py +0 -2
  112. sglang/srt/layers/attention/flashattention_backend.py +12 -8
  113. sglang/srt/layers/attention/flashinfer_backend.py +248 -21
  114. sglang/srt/layers/attention/flashinfer_mla_backend.py +20 -18
  115. sglang/srt/layers/attention/flashmla_backend.py +2 -2
  116. sglang/srt/layers/attention/hybrid_attn_backend.py +1 -1
  117. sglang/srt/layers/attention/hybrid_linear_attn_backend.py +165 -62
  118. sglang/srt/layers/attention/intel_amx_backend.py +1 -1
  119. sglang/srt/layers/attention/mamba/causal_conv1d.py +1 -1
  120. sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +9 -5
  121. sglang/srt/layers/attention/mamba/mamba.py +189 -241
  122. sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
  123. sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
  124. sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +0 -50
  125. sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +0 -60
  126. sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +0 -111
  127. sglang/srt/layers/attention/mamba/ops/ssd_combined.py +0 -1
  128. sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +0 -11
  129. sglang/srt/layers/attention/npu_ops/mla_preprocess.py +1 -1
  130. sglang/srt/layers/attention/nsa/nsa_indexer.py +40 -83
  131. sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
  132. sglang/srt/layers/attention/nsa/utils.py +0 -1
  133. sglang/srt/layers/attention/nsa_backend.py +404 -90
  134. sglang/srt/layers/attention/triton_backend.py +208 -34
  135. sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
  136. sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
  137. sglang/srt/layers/attention/trtllm_mha_backend.py +2 -2
  138. sglang/srt/layers/attention/trtllm_mla_backend.py +361 -30
  139. sglang/srt/layers/attention/utils.py +11 -7
  140. sglang/srt/layers/attention/vision.py +3 -3
  141. sglang/srt/layers/attention/xpu_backend.py +1028 -0
  142. sglang/srt/layers/communicator.py +11 -7
  143. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
  144. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/configurer.py +4 -3
  145. sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
  146. sglang/srt/layers/dp_attention.py +17 -0
  147. sglang/srt/layers/layernorm.py +45 -15
  148. sglang/srt/layers/linear.py +9 -1
  149. sglang/srt/layers/logits_processor.py +147 -17
  150. sglang/srt/layers/modelopt_utils.py +11 -0
  151. sglang/srt/layers/moe/cutlass_moe.py +0 -2
  152. sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
  153. sglang/srt/layers/moe/ep_moe/kernels.py +35 -457
  154. sglang/srt/layers/moe/ep_moe/layer.py +119 -397
  155. sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +1 -1
  156. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
  157. sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
  158. sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +11 -3
  159. sglang/srt/layers/moe/fused_moe_triton/layer.py +76 -70
  160. sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
  161. sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
  162. sglang/srt/layers/moe/moe_runner/runner.py +3 -0
  163. sglang/srt/layers/moe/moe_runner/triton.py +3 -1
  164. sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
  165. sglang/srt/layers/moe/router.py +51 -15
  166. sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
  167. sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
  168. sglang/srt/layers/moe/token_dispatcher/deepep.py +110 -97
  169. sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
  170. sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
  171. sglang/srt/layers/moe/topk.py +3 -2
  172. sglang/srt/layers/moe/utils.py +17 -1
  173. sglang/srt/layers/quantization/__init__.py +2 -53
  174. sglang/srt/layers/quantization/awq.py +183 -6
  175. sglang/srt/layers/quantization/awq_triton.py +29 -0
  176. sglang/srt/layers/quantization/base_config.py +20 -1
  177. sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
  178. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +20 -49
  179. sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
  180. sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +3 -0
  181. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
  182. sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
  183. sglang/srt/layers/quantization/fp8.py +84 -18
  184. sglang/srt/layers/quantization/fp8_kernel.py +55 -10
  185. sglang/srt/layers/quantization/fp8_utils.py +42 -14
  186. sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
  187. sglang/srt/layers/quantization/gptq.py +0 -1
  188. sglang/srt/layers/quantization/int8_kernel.py +18 -2
  189. sglang/srt/layers/quantization/marlin_utils.py +12 -0
  190. sglang/srt/layers/quantization/modelopt_quant.py +125 -100
  191. sglang/srt/layers/quantization/mxfp4.py +5 -30
  192. sglang/srt/layers/quantization/petit.py +1 -1
  193. sglang/srt/layers/quantization/quark/quark.py +3 -1
  194. sglang/srt/layers/quantization/quark/quark_moe.py +3 -3
  195. sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
  196. sglang/srt/layers/quantization/unquant.py +1 -4
  197. sglang/srt/layers/quantization/utils.py +0 -1
  198. sglang/srt/layers/quantization/w4afp8.py +51 -20
  199. sglang/srt/layers/quantization/w8a8_int8.py +30 -24
  200. sglang/srt/layers/radix_attention.py +59 -9
  201. sglang/srt/layers/rotary_embedding.py +673 -16
  202. sglang/srt/layers/sampler.py +36 -16
  203. sglang/srt/layers/sparse_pooler.py +98 -0
  204. sglang/srt/layers/utils.py +0 -1
  205. sglang/srt/layers/vocab_parallel_embedding.py +4 -1
  206. sglang/srt/lora/backend/triton_backend.py +0 -1
  207. sglang/srt/lora/eviction_policy.py +139 -0
  208. sglang/srt/lora/lora_manager.py +24 -9
  209. sglang/srt/lora/lora_registry.py +1 -1
  210. sglang/srt/lora/mem_pool.py +40 -16
  211. sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +1 -1
  212. sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +4 -2
  213. sglang/srt/managers/cache_controller.py +48 -17
  214. sglang/srt/managers/data_parallel_controller.py +146 -42
  215. sglang/srt/managers/detokenizer_manager.py +40 -13
  216. sglang/srt/managers/io_struct.py +66 -16
  217. sglang/srt/managers/mm_utils.py +20 -18
  218. sglang/srt/managers/multi_tokenizer_mixin.py +66 -81
  219. sglang/srt/managers/overlap_utils.py +96 -19
  220. sglang/srt/managers/schedule_batch.py +241 -511
  221. sglang/srt/managers/schedule_policy.py +15 -2
  222. sglang/srt/managers/scheduler.py +399 -499
  223. sglang/srt/managers/scheduler_metrics_mixin.py +55 -8
  224. sglang/srt/managers/scheduler_output_processor_mixin.py +317 -111
  225. sglang/srt/managers/scheduler_pp_mixin.py +341 -0
  226. sglang/srt/managers/scheduler_profiler_mixin.py +57 -10
  227. sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
  228. sglang/srt/managers/scheduler_update_weights_mixin.py +33 -14
  229. sglang/srt/managers/tokenizer_communicator_mixin.py +71 -55
  230. sglang/srt/managers/tokenizer_manager.py +378 -90
  231. sglang/srt/managers/tp_worker.py +212 -161
  232. sglang/srt/managers/utils.py +78 -2
  233. sglang/srt/mem_cache/allocator.py +7 -2
  234. sglang/srt/mem_cache/allocator_ascend.py +2 -2
  235. sglang/srt/mem_cache/base_prefix_cache.py +2 -2
  236. sglang/srt/mem_cache/chunk_cache.py +13 -2
  237. sglang/srt/mem_cache/common.py +480 -0
  238. sglang/srt/mem_cache/evict_policy.py +16 -1
  239. sglang/srt/mem_cache/hicache_storage.py +4 -1
  240. sglang/srt/mem_cache/hiradix_cache.py +16 -3
  241. sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
  242. sglang/srt/mem_cache/memory_pool.py +435 -219
  243. sglang/srt/mem_cache/memory_pool_host.py +0 -1
  244. sglang/srt/mem_cache/multimodal_cache.py +0 -1
  245. sglang/srt/mem_cache/radix_cache.py +53 -19
  246. sglang/srt/mem_cache/radix_cache_cpp.py +19 -14
  247. sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +8 -2
  248. sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +1 -13
  249. sglang/srt/mem_cache/storage/backend_factory.py +2 -2
  250. sglang/srt/mem_cache/storage/eic/eic_storage.py +5 -6
  251. sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
  252. sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +9 -3
  253. sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +5 -3
  254. sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +101 -17
  255. sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
  256. sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
  257. sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
  258. sglang/srt/mem_cache/swa_radix_cache.py +92 -26
  259. sglang/srt/metrics/collector.py +31 -0
  260. sglang/srt/metrics/func_timer.py +1 -1
  261. sglang/srt/model_executor/cuda_graph_runner.py +43 -5
  262. sglang/srt/model_executor/forward_batch_info.py +28 -23
  263. sglang/srt/model_executor/model_runner.py +379 -139
  264. sglang/srt/model_executor/npu_graph_runner.py +2 -3
  265. sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
  266. sglang/srt/model_loader/__init__.py +1 -1
  267. sglang/srt/model_loader/loader.py +424 -27
  268. sglang/srt/model_loader/utils.py +0 -1
  269. sglang/srt/model_loader/weight_utils.py +47 -28
  270. sglang/srt/models/apertus.py +2 -3
  271. sglang/srt/models/arcee.py +2 -2
  272. sglang/srt/models/bailing_moe.py +13 -52
  273. sglang/srt/models/bailing_moe_nextn.py +3 -4
  274. sglang/srt/models/bert.py +1 -1
  275. sglang/srt/models/deepseek_nextn.py +19 -3
  276. sglang/srt/models/deepseek_ocr.py +1516 -0
  277. sglang/srt/models/deepseek_v2.py +273 -98
  278. sglang/srt/models/dots_ocr.py +0 -2
  279. sglang/srt/models/dots_vlm.py +0 -1
  280. sglang/srt/models/dots_vlm_vit.py +1 -1
  281. sglang/srt/models/falcon_h1.py +13 -19
  282. sglang/srt/models/gemma3_mm.py +16 -0
  283. sglang/srt/models/gemma3n_mm.py +1 -2
  284. sglang/srt/models/glm4_moe.py +14 -37
  285. sglang/srt/models/glm4_moe_nextn.py +2 -2
  286. sglang/srt/models/glm4v.py +2 -1
  287. sglang/srt/models/glm4v_moe.py +5 -5
  288. sglang/srt/models/gpt_oss.py +5 -5
  289. sglang/srt/models/grok.py +10 -23
  290. sglang/srt/models/hunyuan.py +2 -7
  291. sglang/srt/models/interns1.py +0 -1
  292. sglang/srt/models/kimi_vl.py +1 -7
  293. sglang/srt/models/kimi_vl_moonvit.py +3 -1
  294. sglang/srt/models/llama.py +2 -2
  295. sglang/srt/models/llama_eagle3.py +1 -1
  296. sglang/srt/models/longcat_flash.py +5 -22
  297. sglang/srt/models/longcat_flash_nextn.py +3 -14
  298. sglang/srt/models/mimo.py +2 -13
  299. sglang/srt/models/mimo_mtp.py +1 -2
  300. sglang/srt/models/minicpmo.py +7 -5
  301. sglang/srt/models/mixtral.py +1 -4
  302. sglang/srt/models/mllama.py +1 -1
  303. sglang/srt/models/mllama4.py +13 -3
  304. sglang/srt/models/nemotron_h.py +511 -0
  305. sglang/srt/models/olmo2.py +31 -4
  306. sglang/srt/models/opt.py +5 -5
  307. sglang/srt/models/phi.py +1 -1
  308. sglang/srt/models/phi4mm.py +1 -1
  309. sglang/srt/models/phimoe.py +0 -1
  310. sglang/srt/models/pixtral.py +0 -3
  311. sglang/srt/models/points_v15_chat.py +186 -0
  312. sglang/srt/models/qwen.py +0 -1
  313. sglang/srt/models/qwen2_5_vl.py +3 -3
  314. sglang/srt/models/qwen2_audio.py +2 -15
  315. sglang/srt/models/qwen2_moe.py +15 -12
  316. sglang/srt/models/qwen2_vl.py +5 -2
  317. sglang/srt/models/qwen3_moe.py +19 -35
  318. sglang/srt/models/qwen3_next.py +7 -12
  319. sglang/srt/models/qwen3_next_mtp.py +3 -4
  320. sglang/srt/models/qwen3_omni_moe.py +661 -0
  321. sglang/srt/models/qwen3_vl.py +37 -33
  322. sglang/srt/models/qwen3_vl_moe.py +57 -185
  323. sglang/srt/models/roberta.py +55 -3
  324. sglang/srt/models/sarashina2_vision.py +0 -1
  325. sglang/srt/models/step3_vl.py +3 -5
  326. sglang/srt/models/utils.py +11 -1
  327. sglang/srt/multimodal/processors/base_processor.py +6 -2
  328. sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
  329. sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
  330. sglang/srt/multimodal/processors/dots_vlm.py +0 -1
  331. sglang/srt/multimodal/processors/glm4v.py +1 -5
  332. sglang/srt/multimodal/processors/internvl.py +0 -2
  333. sglang/srt/multimodal/processors/janus_pro.py +0 -1
  334. sglang/srt/multimodal/processors/mllama4.py +0 -8
  335. sglang/srt/multimodal/processors/phi4mm.py +0 -1
  336. sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
  337. sglang/srt/multimodal/processors/qwen_vl.py +75 -16
  338. sglang/srt/multimodal/processors/step3_vl.py +1 -1
  339. sglang/srt/parser/conversation.py +41 -0
  340. sglang/srt/parser/reasoning_parser.py +0 -1
  341. sglang/srt/sampling/custom_logit_processor.py +77 -2
  342. sglang/srt/sampling/sampling_batch_info.py +17 -22
  343. sglang/srt/sampling/sampling_params.py +70 -2
  344. sglang/srt/server_args.py +577 -73
  345. sglang/srt/server_args_config_parser.py +1 -1
  346. sglang/srt/single_batch_overlap.py +38 -28
  347. sglang/srt/speculative/base_spec_worker.py +34 -0
  348. sglang/srt/speculative/draft_utils.py +226 -0
  349. sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +24 -7
  350. sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +23 -2
  351. sglang/srt/speculative/eagle_info.py +57 -18
  352. sglang/srt/speculative/eagle_info_v2.py +458 -0
  353. sglang/srt/speculative/eagle_utils.py +138 -0
  354. sglang/srt/speculative/eagle_worker.py +83 -280
  355. sglang/srt/speculative/eagle_worker_v2.py +702 -0
  356. sglang/srt/speculative/{ngram_utils.py → ngram_info.py} +14 -9
  357. sglang/srt/speculative/ngram_worker.py +12 -11
  358. sglang/srt/speculative/spec_info.py +2 -0
  359. sglang/srt/speculative/spec_utils.py +38 -3
  360. sglang/srt/speculative/standalone_worker.py +4 -14
  361. sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
  362. sglang/srt/two_batch_overlap.py +28 -14
  363. sglang/srt/utils/__init__.py +1 -1
  364. sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
  365. sglang/srt/utils/common.py +192 -47
  366. sglang/srt/utils/hf_transformers_utils.py +40 -17
  367. sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
  368. sglang/srt/{offloader.py → utils/offloader.py} +4 -4
  369. sglang/srt/utils/profile_merger.py +199 -0
  370. sglang/test/attention/test_flashattn_backend.py +1 -1
  371. sglang/test/attention/test_flashattn_mla_backend.py +0 -1
  372. sglang/test/attention/test_prefix_chunk_info.py +0 -2
  373. sglang/test/attention/test_trtllm_mla_backend.py +221 -53
  374. sglang/test/few_shot_gsm8k_engine.py +2 -4
  375. sglang/test/kit_matched_stop.py +157 -0
  376. sglang/test/longbench_v2/__init__.py +1 -0
  377. sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
  378. sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
  379. sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
  380. sglang/test/run_eval.py +41 -0
  381. sglang/test/runners.py +2 -0
  382. sglang/test/send_one.py +42 -7
  383. sglang/test/simple_eval_common.py +3 -0
  384. sglang/test/simple_eval_gpqa.py +0 -1
  385. sglang/test/simple_eval_humaneval.py +0 -3
  386. sglang/test/simple_eval_longbench_v2.py +344 -0
  387. sglang/test/test_block_fp8.py +1 -2
  388. sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
  389. sglang/test/test_cutlass_moe.py +1 -2
  390. sglang/test/test_cutlass_w4a8_moe.py +10 -20
  391. sglang/test/test_deterministic.py +232 -99
  392. sglang/test/test_deterministic_utils.py +73 -0
  393. sglang/test/test_disaggregation_utils.py +81 -0
  394. sglang/test/test_marlin_moe.py +0 -1
  395. sglang/test/test_utils.py +85 -20
  396. sglang/version.py +1 -1
  397. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/METADATA +45 -33
  398. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/RECORD +404 -345
  399. sglang/srt/layers/attention/mamba/mamba_utils.py +0 -81
  400. sglang/srt/managers/tp_worker_overlap_thread.py +0 -311
  401. sglang/srt/speculative/build_eagle_tree.py +0 -427
  402. sglang/test/test_block_fp8_ep.py +0 -358
  403. /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
  404. /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
  405. /sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +0 -0
  406. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
  407. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
  408. {sglang-0.5.3rc2.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,503 @@
1
+ # Adapted from https://github.com/vllm-project/vllm/blob/v0.10.0/vllm/compilation/compiler_interface.py
2
+
3
+ import contextlib
4
+ import copy
5
+ import hashlib
6
+ import os
7
+ from contextlib import ExitStack
8
+ from typing import Any, Callable, Optional
9
+ from unittest.mock import patch
10
+
11
+ import torch
12
+ import torch._inductor.compile_fx
13
+ import torch.fx as fx
14
+
15
+ from sglang.srt.compilation.compilation_counter import compilation_counter
16
+ from sglang.srt.compilation.inductor_pass import pass_context
17
+
18
+
19
+ class CompilerInterface:
20
+ """
21
+ The interface for a compiler that can be used by vLLM.
22
+ """
23
+
24
+ # The name of the compiler, e.g. inductor.
25
+ # This is a class-level attribute.
26
+ name: str
27
+
28
+ def initialize_cache(
29
+ self, cache_dir: str, disable_cache: bool = False, prefix: str = ""
30
+ ):
31
+ """
32
+ when the vLLM process uses `cache_dir` as the cache directory,
33
+ the compiler should initialize itself with the cache directory,
34
+ e.g. by re-directing its own cache directory to a sub-directory.
35
+
36
+ prefix can be used in combination with cache_dir to figure out the base
37
+ cache directory, e.g. there're multiple parts of model being compiled,
38
+ but we want to share the same cache directory for all of them.
39
+
40
+ e.g.
41
+ cache_dir = "/path/to/dir/backbone", prefix = "backbone"
42
+ cache_dir = "/path/to/dir/eagle_head", prefix = "eagle_head"
43
+ """
44
+ pass
45
+
46
+ def compute_hash(self) -> str:
47
+ """
48
+ Gather all the relevant information from the vLLM config,
49
+ to compute a hash so that we can cache the compiled model.
50
+
51
+ See [`VllmConfig.compute_hash`][vllm.config.VllmConfig.compute_hash]
52
+ to check what information
53
+ is already considered by default. This function should only
54
+ consider the information that is specific to the compiler.
55
+ """
56
+ return ""
57
+
58
+ def compile(
59
+ self,
60
+ graph: fx.GraphModule,
61
+ example_inputs: list[Any],
62
+ compiler_config: dict[str, Any],
63
+ runtime_shape: Optional[int] = None,
64
+ key: Optional[str] = None,
65
+ ) -> tuple[Optional[Callable], Optional[Any]]:
66
+ """
67
+ Compile the graph with the given example inputs and compiler config,
68
+ with a runtime shape. If the `runtime_shape` is None, it means
69
+ the `example_inputs` have a dynamic shape. Otherwise, the
70
+ `runtime_shape` specifies the shape of the inputs. Right now we only
71
+ support one variable shape for all inputs, which is the batchsize
72
+ (number of tokens) during inference.
73
+
74
+ Dynamo will make sure `graph(*example_inputs)` is valid.
75
+
76
+ The function should return a compiled callable function, as well as
77
+ a handle that can be used to directly load the compiled function.
78
+
79
+ The handle should be a plain Python object, preferably a string or a
80
+ file path for readability.
81
+
82
+ If the compiler doesn't support caching, it should return None for the
83
+ handle. If the compiler fails to compile the graph, it should return
84
+ None for the compiled function as well.
85
+
86
+ `key` is required for StandaloneInductorAdapter, it specifies where to
87
+ save the compiled artifact. The compiled artifact gets saved to
88
+ `cache_dir/key`.
89
+ """
90
+ return None, None
91
+
92
+ def load(
93
+ self,
94
+ handle: Any,
95
+ graph: fx.GraphModule,
96
+ example_inputs: list[Any],
97
+ graph_index: int,
98
+ runtime_shape: Optional[int] = None,
99
+ ) -> Callable:
100
+ """
101
+ Load the compiled function from the handle.
102
+ Raises an error if the handle is invalid.
103
+
104
+ The handle is the second return value of the `compile` function.
105
+ """
106
+ raise NotImplementedError("caching is not supported")
107
+
108
+
109
+ def get_inductor_factors() -> list[Any]:
110
+ factors: list[Any] = []
111
+ # summarize system state
112
+ from torch._inductor.codecache import CacheBase
113
+
114
+ system_factors = CacheBase.get_system()
115
+ factors.append(system_factors)
116
+
117
+ # summarize pytorch state
118
+ from torch._inductor.codecache import torch_key
119
+
120
+ torch_factors = torch_key()
121
+ factors.append(torch_factors)
122
+ return factors
123
+
124
+
125
+ class AlwaysHitShapeEnv:
126
+ """
127
+ Why do we need this class:
128
+
129
+ For normal `torch.compile` usage, every compilation will have
130
+ one Dynamo bytecode compilation and one Inductor compilation.
131
+ The Inductor compilation happens under the context of the
132
+ Dynamo bytecode compilation, and that context is used to
133
+ determine the dynamic shape information, etc.
134
+
135
+ For our use case, we only run Dynamo bytecode compilation once,
136
+ and run Inductor compilation multiple times with different shapes
137
+ plus a general shape. The compilation for specific shapes happens
138
+ outside of the context of the Dynamo bytecode compilation. At that
139
+ time, we don't have shape environment to provide to Inductor, and
140
+ it will fail the Inductor code cache lookup.
141
+
142
+ By providing a dummy shape environment that always hits, we can
143
+ make the Inductor code cache lookup always hit, and we can
144
+ compile the graph for different shapes as needed.
145
+
146
+ The following dummy methods are obtained by trial-and-error
147
+ until it works.
148
+ """
149
+
150
+ def __init__(self) -> None:
151
+ self.guards: list[Any] = []
152
+
153
+ def evaluate_guards_expression(self, *args, **kwargs):
154
+ return True
155
+
156
+ def get_pruned_guards(self, *args, **kwargs):
157
+ return []
158
+
159
+ def produce_guards_expression(self, *args, **kwargs):
160
+ return ""
161
+
162
+
163
+ class InductorAdaptor(CompilerInterface):
164
+ """
165
+ The adaptor for the Inductor compiler, version 2.5, 2.6, 2.7.
166
+ """
167
+
168
+ name = "inductor"
169
+
170
+ def compute_hash(self) -> str:
171
+ factors = get_inductor_factors()
172
+ hash_str = hashlib.md5(
173
+ str(factors).encode(), usedforsecurity=False
174
+ ).hexdigest()[:10]
175
+ return hash_str
176
+
177
+ def initialize_cache(
178
+ self, cache_dir: str, disable_cache: bool = False, prefix: str = ""
179
+ ):
180
+ self.cache_dir = cache_dir
181
+ self.prefix = prefix
182
+ self.base_cache_dir = cache_dir[: -len(prefix)] if prefix else cache_dir
183
+ if disable_cache:
184
+ return
185
+ # redirect the cache directory to a sub-directory
186
+ # set flags so that Inductor and Triton store their cache
187
+ # in the cache_dir, then users only need to copy the cache_dir
188
+ # to another machine to reuse the cache.
189
+ inductor_cache = os.path.join(self.base_cache_dir, "inductor_cache")
190
+ os.makedirs(inductor_cache, exist_ok=True)
191
+ os.environ["TORCHINDUCTOR_CACHE_DIR"] = inductor_cache
192
+ triton_cache = os.path.join(self.base_cache_dir, "triton_cache")
193
+ os.makedirs(triton_cache, exist_ok=True)
194
+ os.environ["TRITON_CACHE_DIR"] = triton_cache
195
+
196
+ def compile(
197
+ self,
198
+ graph: fx.GraphModule,
199
+ example_inputs: list[Any],
200
+ compiler_config: dict[str, Any],
201
+ runtime_shape: Optional[int] = None,
202
+ key: Optional[str] = None,
203
+ ) -> tuple[Optional[Callable], Optional[Any]]:
204
+ compilation_counter.num_inductor_compiles += 1
205
+ from torch._inductor.compile_fx import compile_fx
206
+
207
+ current_config = {}
208
+ if compiler_config is not None:
209
+ current_config.update(compiler_config)
210
+
211
+ # disable remote cache
212
+ current_config["fx_graph_cache"] = True
213
+ current_config["fx_graph_remote_cache"] = False
214
+
215
+ set_inductor_config(current_config, runtime_shape)
216
+
217
+ # inductor can inplace modify the graph, so we need to copy it
218
+ # see https://github.com/pytorch/pytorch/issues/138980
219
+ graph = copy.deepcopy(graph)
220
+
221
+ # it's the first time we compile this graph
222
+ # the assumption is that we don't have nested Inductor compilation.
223
+ # compiled_fx_graph_hash will only be called once, and we can hook
224
+ # it to get the hash of the compiled graph directly.
225
+
226
+ hash_str, file_path = None, None
227
+ from torch._inductor.codecache import FxGraphCache, compiled_fx_graph_hash
228
+
229
+ if torch.__version__.startswith("2.5"):
230
+ original_load = FxGraphCache.load
231
+ original_load_name = "torch._inductor.codecache.FxGraphCache.load"
232
+
233
+ def hijack_load(*args, **kwargs):
234
+ inductor_compiled_graph = original_load(*args, **kwargs)
235
+ nonlocal file_path
236
+ compiled_fn = inductor_compiled_graph.current_callable
237
+ file_path = compiled_fn.__code__.co_filename # noqa
238
+ if not file_path.startswith(self.base_cache_dir):
239
+ # hooked in the align_inputs_from_check_idxs function
240
+ # in torch/_inductor/utils.py
241
+ for cell in compiled_fn.__closure__:
242
+ if not callable(cell.cell_contents):
243
+ continue
244
+ if cell.cell_contents.__code__.co_filename.startswith(
245
+ self.base_cache_dir
246
+ ):
247
+ # this is the real file path compiled from Inductor
248
+ file_path = cell.cell_contents.__code__.co_filename
249
+ break
250
+ return inductor_compiled_graph
251
+
252
+ hijacked_compile_fx_inner = (
253
+ torch._inductor.compile_fx.compile_fx_inner
254
+ ) # noqa
255
+ elif torch.__version__ >= "2.6":
256
+ # function renamed in 2.6
257
+ original_load_name = None
258
+
259
+ def hijacked_compile_fx_inner(*args, **kwargs):
260
+ output = torch._inductor.compile_fx.compile_fx_inner(*args, **kwargs)
261
+ nonlocal hash_str
262
+ inductor_compiled_graph = output
263
+ if inductor_compiled_graph is not None:
264
+ nonlocal file_path
265
+ compiled_fn = inductor_compiled_graph.current_callable
266
+ file_path = compiled_fn.__code__.co_filename # noqa
267
+ if not file_path.startswith(self.base_cache_dir):
268
+ # hooked in the align_inputs_from_check_idxs function
269
+ # in torch/_inductor/utils.py
270
+ for cell in compiled_fn.__closure__:
271
+ if not callable(cell.cell_contents):
272
+ continue
273
+ code = cell.cell_contents.__code__
274
+ if code.co_filename.startswith(self.base_cache_dir):
275
+ # this is the real file path
276
+ # compiled from Inductor
277
+ file_path = code.co_filename
278
+ break
279
+ hash_str = inductor_compiled_graph._fx_graph_cache_key
280
+ return output
281
+
282
+ def hijack_compiled_fx_graph_hash(*args, **kwargs):
283
+ out = compiled_fx_graph_hash(*args, **kwargs)
284
+ nonlocal hash_str
285
+ hash_str = out[0]
286
+ return out
287
+
288
+ def _check_can_cache(*args, **kwargs):
289
+ # no error means it can be cached.
290
+ # Inductor refuses to cache the graph outside of Dynamo
291
+ # tracing context, and also disables caching for graphs
292
+ # with high-order ops.
293
+ # For vLLM, in either case, we want to cache the graph.
294
+ # see https://github.com/pytorch/pytorch/blob/9f5ebf3fc609105a74eab4ccc24932d6353ff566/torch/_inductor/codecache.py#L1221 # noqa
295
+ return
296
+
297
+ def _get_shape_env() -> AlwaysHitShapeEnv:
298
+ return AlwaysHitShapeEnv()
299
+
300
+ with ExitStack() as stack:
301
+ # hijack to get the compiled graph itself
302
+ if original_load_name is not None:
303
+ stack.enter_context(patch(original_load_name, hijack_load))
304
+
305
+ # for hijacking the hash of the compiled graph
306
+ stack.enter_context(
307
+ patch(
308
+ "torch._inductor.codecache.compiled_fx_graph_hash",
309
+ hijack_compiled_fx_graph_hash,
310
+ )
311
+ )
312
+
313
+ # for providing a dummy shape environment
314
+ stack.enter_context(
315
+ patch(
316
+ "torch._inductor.codecache.FxGraphCache._get_shape_env",
317
+ _get_shape_env,
318
+ )
319
+ )
320
+
321
+ from torch._functorch._aot_autograd.autograd_cache import AOTAutogradCache
322
+
323
+ # torch 2.8+ on main uses _get_shape_env in AOTAutogradCache
324
+ if hasattr(AOTAutogradCache, "_get_shape_env"):
325
+ stack.enter_context(
326
+ patch(
327
+ "torch._functorch._aot_autograd.autograd_cache.AOTAutogradCache._get_shape_env",
328
+ _get_shape_env,
329
+ )
330
+ )
331
+
332
+ # for forcing the graph to be cached
333
+ stack.enter_context(
334
+ patch(
335
+ "torch._inductor.codecache.FxGraphCache._check_can_cache",
336
+ _check_can_cache,
337
+ )
338
+ )
339
+
340
+ # Dynamo metrics context, see method for more details.
341
+ stack.enter_context(self.metrics_context())
342
+
343
+ # Disable remote caching. When these are on, on remote cache-hit,
344
+ # the monkey-patched functions never actually get called.
345
+ # vLLM today assumes and requires the monkey-patched functions to
346
+ # get hit.
347
+ # TODO(zou3519): we're going to replace this all with
348
+ # standalone_compile sometime.
349
+
350
+ stack.enter_context(
351
+ torch._inductor.config.patch(fx_graph_remote_cache=False)
352
+ )
353
+ # InductorAdaptor (unfortunately) requires AOTAutogradCache
354
+ # to be turned off to run. It will fail to acquire the hash_str
355
+ # and error if not.
356
+ # StandaloneInductorAdaptor (PyTorch 2.8+) fixes this problem.
357
+ stack.enter_context(
358
+ torch._functorch.config.patch(enable_autograd_cache=False)
359
+ )
360
+ stack.enter_context(
361
+ torch._functorch.config.patch(enable_remote_autograd_cache=False)
362
+ )
363
+
364
+ with pass_context(runtime_shape):
365
+ compiled_graph = compile_fx(
366
+ graph,
367
+ example_inputs,
368
+ inner_compile=hijacked_compile_fx_inner,
369
+ config_patches=current_config,
370
+ )
371
+ return compiled_graph, (hash_str, file_path)
372
+
373
+ def load(
374
+ self,
375
+ handle: Any,
376
+ graph: fx.GraphModule,
377
+ example_inputs: list[Any],
378
+ graph_index: int,
379
+ runtime_shape: Optional[int] = None,
380
+ ) -> Callable:
381
+ assert isinstance(handle, tuple)
382
+ assert isinstance(handle[0], str)
383
+ assert isinstance(handle[1], str)
384
+ hash_str = handle[0]
385
+
386
+ from torch._functorch._aot_autograd.autograd_cache import AOTAutogradCache
387
+ from torch._inductor.codecache import FxGraphCache
388
+
389
+ with ExitStack() as exit_stack:
390
+ exit_stack.enter_context(
391
+ patch(
392
+ "torch._inductor.codecache.FxGraphCache._get_shape_env",
393
+ lambda *args, **kwargs: AlwaysHitShapeEnv(),
394
+ )
395
+ )
396
+ # torch 2.8+ on main uses _get_shape_env in AOTAutogradCache
397
+ if hasattr(AOTAutogradCache, "_get_shape_env"):
398
+ exit_stack.enter_context(
399
+ patch(
400
+ "torch._functorch._aot_autograd.autograd_cache.AOTAutogradCache._get_shape_env",
401
+ lambda *args, **kwargs: AlwaysHitShapeEnv(),
402
+ )
403
+ )
404
+
405
+ # Dynamo metrics context, see method for more details.
406
+ exit_stack.enter_context(self.metrics_context())
407
+
408
+ if torch.__version__.startswith("2.5"):
409
+ inductor_compiled_graph = FxGraphCache._lookup_graph(
410
+ hash_str, example_inputs, True, False
411
+ )
412
+ assert inductor_compiled_graph is not None, (
413
+ "Inductor cache lookup failed. Please remove"
414
+ f"the cache directory and try again." # noqa
415
+ )
416
+ elif torch.__version__ >= "2.6":
417
+ from torch._inductor.output_code import CompiledFxGraphConstantsWithGm
418
+
419
+ constants = CompiledFxGraphConstantsWithGm(graph)
420
+ inductor_compiled_graph, _ = FxGraphCache._lookup_graph(
421
+ hash_str, example_inputs, True, None, constants
422
+ )
423
+ assert inductor_compiled_graph is not None, (
424
+ "Inductor cache lookup failed. Please remove"
425
+ f"the cache directory and try again." # noqa
426
+ )
427
+
428
+ # Inductor calling convention (function signature):
429
+ # f(list) -> tuple
430
+ # Dynamo calling convention (function signature):
431
+ # f(*args) -> Any
432
+
433
+ # need to know if the graph returns a tuple
434
+ from torch._inductor.compile_fx import graph_returns_tuple
435
+
436
+ returns_tuple = graph_returns_tuple(graph)
437
+
438
+ # this is the callable we return to Dynamo to run
439
+ def compiled_graph(*args):
440
+ # convert args to list
441
+ list_args = list(args)
442
+ graph_output = inductor_compiled_graph(list_args)
443
+ # unpack the tuple if needed
444
+ if returns_tuple:
445
+ return graph_output
446
+ else:
447
+ return graph_output[0]
448
+
449
+ return compiled_graph
450
+
451
+ def metrics_context(self) -> contextlib.AbstractContextManager:
452
+ """
453
+ This method returns the Dynamo metrics context (if it exists,
454
+ otherwise a null context). It is used by various compile components.
455
+ Present in torch>=2.6, it's used inside FxGraphCache in
456
+ torch==2.6 (but not after). It might also be used in various other
457
+ torch.compile internal functions.
458
+
459
+ Because it is re-entrant, we always set it (even if entering via Dynamo
460
+ and the context was already entered). We might want to revisit if it
461
+ should be set at a different level of compilation.
462
+
463
+ This is likely a bug in PyTorch: public APIs should not rely on
464
+ manually setting up internal contexts. But we also rely on non-public
465
+ APIs which might not provide these guarantees.
466
+ """
467
+ import torch._dynamo.utils
468
+
469
+ return torch._dynamo.utils.get_metrics_context()
470
+
471
+
472
+ def set_inductor_config(config, runtime_shape):
473
+ if isinstance(runtime_shape, int):
474
+ # for a specific batchsize, tuning triton kernel parameters
475
+ # can be beneficial
476
+ config["max_autotune"] = True
477
+ config["coordinate_descent_tuning"] = True
478
+
479
+
480
+ class EagerAdapter(CompilerInterface):
481
+ name = "eager"
482
+
483
+ def compile(
484
+ self,
485
+ graph: fx.GraphModule,
486
+ example_inputs: list[Any],
487
+ compiler_config: dict[str, Any],
488
+ runtime_shape: Optional[int] = None,
489
+ key: Optional[str] = None,
490
+ num_graphs: int = 1,
491
+ ) -> tuple[Optional[Callable], Optional[Any]]:
492
+ return graph, None
493
+
494
+ def load(
495
+ self,
496
+ handle: Any,
497
+ graph: fx.GraphModule,
498
+ example_inputs: list[Any],
499
+ graph_index: int,
500
+ runtime_shape: Optional[int] = None,
501
+ num_graphs: int = 1,
502
+ ) -> Callable:
503
+ raise NotImplementedError("eager compilation is not supported")