sglang 0.5.3rc0__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sglang/bench_one_batch.py +54 -37
- sglang/bench_one_batch_server.py +340 -34
- sglang/bench_serving.py +340 -159
- sglang/check_env.py +1 -1
- sglang/compile_deep_gemm.py +6 -2
- sglang/global_config.py +1 -25
- sglang/lang/api.py +6 -0
- sglang/lang/backend/runtime_endpoint.py +1 -1
- sglang/lang/interpreter.py +1 -0
- sglang/lang/ir.py +13 -0
- sglang/launch_server.py +9 -2
- sglang/profiler.py +20 -3
- sglang/srt/_custom_ops.py +1 -1
- sglang/srt/batch_invariant_ops/__init__.py +27 -0
- sglang/srt/batch_invariant_ops/batch_invariant_ops.py +547 -0
- sglang/srt/checkpoint_engine/checkpoint_engine_worker.py +142 -0
- sglang/srt/compilation/backend.py +437 -0
- sglang/srt/compilation/compilation_config.py +20 -0
- sglang/srt/compilation/compilation_counter.py +47 -0
- sglang/srt/compilation/compile.py +210 -0
- sglang/srt/compilation/compiler_interface.py +503 -0
- sglang/srt/compilation/cuda_piecewise_backend.py +228 -0
- sglang/srt/compilation/fix_functionalization.py +134 -0
- sglang/srt/compilation/fx_utils.py +83 -0
- sglang/srt/compilation/inductor_pass.py +140 -0
- sglang/srt/compilation/pass_manager.py +66 -0
- sglang/srt/compilation/piecewise_context_manager.py +40 -0
- sglang/srt/compilation/weak_ref_tensor_jit.py +16 -0
- sglang/srt/configs/__init__.py +8 -0
- sglang/srt/configs/deepseek_ocr.py +262 -0
- sglang/srt/configs/deepseekvl2.py +194 -96
- sglang/srt/configs/dots_ocr.py +64 -0
- sglang/srt/configs/dots_vlm.py +2 -7
- sglang/srt/configs/falcon_h1.py +309 -0
- sglang/srt/configs/load_config.py +33 -2
- sglang/srt/configs/mamba_utils.py +117 -0
- sglang/srt/configs/model_config.py +284 -118
- sglang/srt/configs/modelopt_config.py +30 -0
- sglang/srt/configs/nemotron_h.py +286 -0
- sglang/srt/configs/olmo3.py +105 -0
- sglang/srt/configs/points_v15_chat.py +29 -0
- sglang/srt/configs/qwen3_next.py +11 -47
- sglang/srt/configs/qwen3_omni.py +613 -0
- sglang/srt/configs/qwen3_vl.py +576 -0
- sglang/srt/connector/remote_instance.py +1 -1
- sglang/srt/constrained/base_grammar_backend.py +6 -1
- sglang/srt/constrained/llguidance_backend.py +5 -0
- sglang/srt/constrained/outlines_backend.py +1 -1
- sglang/srt/constrained/outlines_jump_forward.py +1 -1
- sglang/srt/constrained/reasoner_grammar_backend.py +9 -6
- sglang/srt/constrained/utils.py +12 -0
- sglang/srt/constrained/xgrammar_backend.py +26 -15
- sglang/srt/debug_utils/dumper.py +10 -3
- sglang/srt/disaggregation/ascend/conn.py +2 -2
- sglang/srt/disaggregation/ascend/transfer_engine.py +48 -10
- sglang/srt/disaggregation/base/conn.py +17 -4
- sglang/srt/disaggregation/common/conn.py +268 -98
- sglang/srt/disaggregation/decode.py +172 -39
- sglang/srt/disaggregation/decode_kvcache_offload_manager.py +185 -0
- sglang/srt/disaggregation/decode_schedule_batch_mixin.py +25 -16
- sglang/srt/disaggregation/fake/conn.py +11 -3
- sglang/srt/disaggregation/mooncake/conn.py +203 -555
- sglang/srt/disaggregation/nixl/conn.py +217 -63
- sglang/srt/disaggregation/prefill.py +113 -270
- sglang/srt/disaggregation/utils.py +36 -5
- sglang/srt/distributed/device_communicators/all_reduce_utils.py +16 -0
- sglang/srt/distributed/device_communicators/custom_all_reduce.py +6 -6
- sglang/srt/distributed/device_communicators/pymscclpp.py +2 -2
- sglang/srt/distributed/device_communicators/pynccl.py +24 -12
- sglang/srt/distributed/device_communicators/pynccl_allocator.py +2 -2
- sglang/srt/distributed/device_communicators/shm_broadcast.py +4 -2
- sglang/srt/distributed/device_communicators/symm_mem.py +164 -0
- sglang/srt/distributed/naive_distributed.py +5 -4
- sglang/srt/distributed/parallel_state.py +203 -97
- sglang/srt/elastic_ep/elastic_ep.py +74 -0
- sglang/srt/entrypoints/context.py +3 -2
- sglang/srt/entrypoints/engine.py +85 -65
- sglang/srt/entrypoints/grpc_server.py +632 -305
- sglang/srt/entrypoints/harmony_utils.py +2 -2
- sglang/srt/entrypoints/http_server.py +169 -17
- sglang/srt/entrypoints/http_server_engine.py +1 -7
- sglang/srt/entrypoints/openai/protocol.py +327 -34
- sglang/srt/entrypoints/openai/serving_base.py +74 -8
- sglang/srt/entrypoints/openai/serving_chat.py +202 -118
- sglang/srt/entrypoints/openai/serving_classify.py +204 -0
- sglang/srt/entrypoints/openai/serving_completions.py +20 -4
- sglang/srt/entrypoints/openai/serving_embedding.py +1 -0
- sglang/srt/entrypoints/openai/serving_responses.py +47 -2
- sglang/srt/entrypoints/openai/serving_tokenize.py +144 -0
- sglang/srt/environ.py +323 -0
- sglang/srt/eplb/eplb_algorithms/__init__.py +18 -1
- sglang/srt/eplb/eplb_algorithms/deepseek.py +0 -2
- sglang/srt/eplb/eplb_algorithms/elasticity_aware.py +87 -0
- sglang/srt/eplb/expert_distribution.py +3 -4
- sglang/srt/eplb/expert_location.py +30 -5
- sglang/srt/eplb/expert_location_dispatch.py +2 -2
- sglang/srt/eplb/expert_location_updater.py +2 -2
- sglang/srt/function_call/base_format_detector.py +17 -18
- sglang/srt/function_call/function_call_parser.py +21 -16
- sglang/srt/function_call/glm4_moe_detector.py +4 -8
- sglang/srt/function_call/gpt_oss_detector.py +24 -1
- sglang/srt/function_call/json_array_parser.py +61 -0
- sglang/srt/function_call/kimik2_detector.py +17 -4
- sglang/srt/function_call/utils.py +98 -7
- sglang/srt/grpc/compile_proto.py +245 -0
- sglang/srt/grpc/grpc_request_manager.py +915 -0
- sglang/srt/grpc/health_servicer.py +189 -0
- sglang/srt/grpc/scheduler_launcher.py +181 -0
- sglang/srt/grpc/sglang_scheduler_pb2.py +81 -68
- sglang/srt/grpc/sglang_scheduler_pb2.pyi +124 -61
- sglang/srt/grpc/sglang_scheduler_pb2_grpc.py +92 -1
- sglang/srt/layers/activation.py +11 -7
- sglang/srt/layers/attention/aiter_backend.py +17 -18
- sglang/srt/layers/attention/ascend_backend.py +125 -10
- sglang/srt/layers/attention/attention_registry.py +226 -0
- sglang/srt/layers/attention/base_attn_backend.py +32 -4
- sglang/srt/layers/attention/cutlass_mla_backend.py +3 -3
- sglang/srt/layers/attention/double_sparsity_backend.py +2 -2
- sglang/srt/layers/attention/dual_chunk_flashattention_backend.py +1 -1
- sglang/srt/layers/attention/fla/chunk.py +0 -1
- sglang/srt/layers/attention/fla/chunk_o.py +1 -1
- sglang/srt/layers/attention/fla/chunk_scaled_dot_kkt.py +2 -2
- sglang/srt/layers/attention/fla/fused_recurrent.py +4 -4
- sglang/srt/layers/attention/fla/fused_sigmoid_gating_recurrent.py +2 -2
- sglang/srt/layers/attention/fla/index.py +0 -2
- sglang/srt/layers/attention/fla/layernorm_gated.py +50 -32
- sglang/srt/layers/attention/fla/utils.py +0 -3
- sglang/srt/layers/attention/fla/wy_fast.py +0 -2
- sglang/srt/layers/attention/flashattention_backend.py +52 -15
- sglang/srt/layers/attention/flashinfer_backend.py +357 -212
- sglang/srt/layers/attention/flashinfer_mla_backend.py +31 -33
- sglang/srt/layers/attention/flashmla_backend.py +9 -7
- sglang/srt/layers/attention/hybrid_attn_backend.py +12 -4
- sglang/srt/layers/attention/hybrid_linear_attn_backend.py +236 -133
- sglang/srt/layers/attention/intel_amx_backend.py +1 -1
- sglang/srt/layers/attention/mamba/causal_conv1d.py +2 -1
- sglang/srt/layers/attention/mamba/causal_conv1d_triton.py +24 -103
- sglang/srt/layers/attention/mamba/mamba.py +514 -1
- sglang/srt/layers/attention/mamba/mamba2_metadata.py +211 -0
- sglang/srt/layers/attention/mamba/mixer2_rms_norm_gated.py +120 -0
- sglang/srt/layers/attention/mamba/ops/__init__.py +2 -0
- sglang/srt/layers/attention/mamba/ops/layernorm_gated.py +172 -0
- sglang/srt/layers/attention/mamba/ops/mamba_ssm.py +442 -0
- sglang/srt/layers/attention/mamba/ops/ssd_bmm.py +214 -0
- sglang/srt/layers/attention/mamba/ops/ssd_chunk_scan.py +562 -0
- sglang/srt/layers/attention/mamba/ops/ssd_chunk_state.py +646 -0
- sglang/srt/layers/attention/mamba/ops/ssd_combined.py +261 -0
- sglang/srt/layers/attention/mamba/ops/ssd_state_passing.py +264 -0
- sglang/srt/layers/attention/npu_ops/mla_preprocess.py +393 -0
- sglang/srt/layers/attention/nsa/dequant_k_cache.py +163 -0
- sglang/srt/layers/attention/nsa/index_buf_accessor.py +354 -0
- sglang/srt/layers/attention/nsa/nsa_indexer.py +718 -0
- sglang/srt/layers/attention/nsa/quant_k_cache.py +255 -0
- sglang/srt/layers/attention/nsa/tilelang_kernel.py +785 -0
- sglang/srt/layers/attention/nsa/transform_index.py +144 -0
- sglang/srt/layers/attention/nsa/triton_kernel.py +136 -0
- sglang/srt/layers/attention/nsa/utils.py +23 -0
- sglang/srt/layers/attention/nsa_backend.py +1201 -0
- sglang/srt/layers/attention/tbo_backend.py +6 -6
- sglang/srt/layers/attention/torch_flex_backend.py +325 -0
- sglang/srt/layers/attention/triton_backend.py +249 -42
- sglang/srt/layers/attention/triton_ops/double_sparsity_attention.py +2 -2
- sglang/srt/layers/attention/triton_ops/extend_attention.py +539 -44
- sglang/srt/layers/attention/trtllm_mha_backend.py +7 -9
- sglang/srt/layers/attention/trtllm_mla_backend.py +523 -48
- sglang/srt/layers/attention/utils.py +11 -7
- sglang/srt/layers/attention/vision.py +61 -3
- sglang/srt/layers/attention/wave_backend.py +4 -4
- sglang/srt/layers/attention/xpu_backend.py +1028 -0
- sglang/srt/layers/communicator.py +19 -7
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/compile_utils.py +4 -8
- sglang/srt/layers/deep_gemm_wrapper/configurer.py +25 -0
- sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/entrypoint.py +3 -3
- sglang/srt/layers/dp_attention.py +28 -1
- sglang/srt/layers/elementwise.py +3 -1
- sglang/srt/layers/layernorm.py +47 -15
- sglang/srt/layers/linear.py +30 -5
- sglang/srt/layers/logits_processor.py +161 -18
- sglang/srt/layers/modelopt_utils.py +11 -0
- sglang/srt/layers/moe/cutlass_moe.py +0 -2
- sglang/srt/layers/moe/cutlass_w4a8_moe.py +213 -21
- sglang/srt/layers/moe/ep_moe/kernels.py +36 -458
- sglang/srt/layers/moe/ep_moe/layer.py +243 -448
- sglang/srt/layers/moe/flashinfer_cutedsl_moe.py +52 -25
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=128,N=192,device_name=NVIDIA_H200,dtype=fp8_w8a8.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_B200.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=256,N=256,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=128,device_name=NVIDIA_H800,dtype=fp8_w8a8,block_shape=[128, 128].json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/configs/triton_3_4_0/E=512,N=256,device_name=NVIDIA_B200.json +146 -0
- sglang/srt/layers/moe/fused_moe_triton/fused_moe_triton_config.py +17 -5
- sglang/srt/layers/moe/fused_moe_triton/layer.py +86 -81
- sglang/srt/layers/moe/fused_moe_triton/triton_kernels_moe.py +18 -42
- sglang/srt/layers/moe/moe_runner/deep_gemm.py +304 -0
- sglang/srt/layers/moe/moe_runner/runner.py +3 -0
- sglang/srt/layers/moe/moe_runner/triton.py +3 -1
- sglang/srt/layers/moe/rocm_moe_utils.py +0 -1
- sglang/srt/layers/moe/router.py +51 -15
- sglang/srt/layers/moe/token_dispatcher/__init__.py +10 -0
- sglang/srt/layers/moe/token_dispatcher/base.py +1 -1
- sglang/srt/layers/moe/token_dispatcher/deepep.py +177 -106
- sglang/srt/layers/moe/token_dispatcher/mooncake.py +386 -0
- sglang/srt/layers/moe/token_dispatcher/standard.py +46 -0
- sglang/srt/layers/moe/topk.py +3 -2
- sglang/srt/layers/moe/utils.py +27 -1
- sglang/srt/layers/parameter.py +23 -6
- sglang/srt/layers/quantization/__init__.py +2 -53
- sglang/srt/layers/quantization/awq.py +183 -6
- sglang/srt/layers/quantization/awq_triton.py +29 -0
- sglang/srt/layers/quantization/base_config.py +20 -1
- sglang/srt/layers/quantization/compressed_tensors/__init__.py +7 -0
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors.py +21 -49
- sglang/srt/layers/quantization/compressed_tensors/compressed_tensors_moe.py +421 -70
- sglang/srt/layers/quantization/compressed_tensors/schemes/__init__.py +5 -0
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a16_fp8.py +4 -22
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py +173 -0
- sglang/srt/layers/quantization/compressed_tensors/schemes/compressed_tensors_wNa16.py +339 -0
- sglang/srt/layers/quantization/fp8.py +86 -20
- sglang/srt/layers/quantization/fp8_kernel.py +55 -10
- sglang/srt/layers/quantization/fp8_utils.py +43 -15
- sglang/srt/layers/quantization/fpgemm_fp8.py +2 -3
- sglang/srt/layers/quantization/gptq.py +0 -1
- sglang/srt/layers/quantization/int8_kernel.py +18 -2
- sglang/srt/layers/quantization/marlin_utils.py +12 -0
- sglang/srt/layers/quantization/modelopt_quant.py +141 -81
- sglang/srt/layers/quantization/mxfp4.py +17 -34
- sglang/srt/layers/quantization/petit.py +1 -1
- sglang/srt/layers/quantization/quark/quark.py +3 -1
- sglang/srt/layers/quantization/quark/quark_moe.py +18 -5
- sglang/srt/layers/quantization/quark/schemes/quark_w4a4_mxfp4.py +0 -7
- sglang/srt/layers/quantization/unquant.py +1 -4
- sglang/srt/layers/quantization/utils.py +0 -1
- sglang/srt/layers/quantization/w4afp8.py +51 -24
- sglang/srt/layers/quantization/w8a8_int8.py +45 -27
- sglang/srt/layers/radix_attention.py +59 -9
- sglang/srt/layers/rotary_embedding.py +750 -46
- sglang/srt/layers/sampler.py +84 -16
- sglang/srt/layers/sparse_pooler.py +98 -0
- sglang/srt/layers/utils.py +23 -1
- sglang/srt/layers/vocab_parallel_embedding.py +4 -1
- sglang/srt/lora/backend/base_backend.py +3 -3
- sglang/srt/lora/backend/chunked_backend.py +348 -0
- sglang/srt/lora/backend/triton_backend.py +9 -4
- sglang/srt/lora/eviction_policy.py +139 -0
- sglang/srt/lora/lora.py +7 -5
- sglang/srt/lora/lora_manager.py +33 -7
- sglang/srt/lora/lora_registry.py +1 -1
- sglang/srt/lora/mem_pool.py +41 -17
- sglang/srt/lora/triton_ops/__init__.py +4 -0
- sglang/srt/lora/triton_ops/chunked_sgmv_expand.py +214 -0
- sglang/srt/lora/triton_ops/chunked_sgmv_shrink.py +176 -0
- sglang/srt/lora/utils.py +7 -5
- sglang/srt/managers/cache_controller.py +83 -152
- sglang/srt/managers/data_parallel_controller.py +156 -87
- sglang/srt/managers/detokenizer_manager.py +51 -24
- sglang/srt/managers/io_struct.py +223 -129
- sglang/srt/managers/mm_utils.py +49 -10
- sglang/srt/managers/multi_tokenizer_mixin.py +83 -98
- sglang/srt/managers/multimodal_processor.py +1 -2
- sglang/srt/managers/overlap_utils.py +130 -0
- sglang/srt/managers/schedule_batch.py +340 -529
- sglang/srt/managers/schedule_policy.py +158 -18
- sglang/srt/managers/scheduler.py +665 -620
- sglang/srt/managers/scheduler_input_blocker.py +1 -1
- sglang/srt/managers/scheduler_metrics_mixin.py +150 -131
- sglang/srt/managers/scheduler_output_processor_mixin.py +337 -122
- sglang/srt/managers/scheduler_pp_mixin.py +341 -0
- sglang/srt/managers/scheduler_profiler_mixin.py +62 -15
- sglang/srt/managers/scheduler_runtime_checker_mixin.py +217 -0
- sglang/srt/managers/scheduler_update_weights_mixin.py +40 -14
- sglang/srt/managers/tokenizer_communicator_mixin.py +141 -19
- sglang/srt/managers/tokenizer_manager.py +462 -226
- sglang/srt/managers/tp_worker.py +217 -156
- sglang/srt/managers/utils.py +79 -47
- sglang/srt/mem_cache/allocator.py +21 -22
- sglang/srt/mem_cache/allocator_ascend.py +42 -28
- sglang/srt/mem_cache/base_prefix_cache.py +3 -3
- sglang/srt/mem_cache/chunk_cache.py +20 -2
- sglang/srt/mem_cache/common.py +480 -0
- sglang/srt/mem_cache/evict_policy.py +38 -0
- sglang/srt/mem_cache/hicache_storage.py +44 -2
- sglang/srt/mem_cache/hiradix_cache.py +134 -34
- sglang/srt/mem_cache/mamba_radix_cache.py +993 -0
- sglang/srt/mem_cache/memory_pool.py +602 -208
- sglang/srt/mem_cache/memory_pool_host.py +134 -183
- sglang/srt/mem_cache/multimodal_cache.py +0 -1
- sglang/srt/mem_cache/radix_cache.py +263 -78
- sglang/srt/mem_cache/radix_cache_cpp.py +29 -21
- sglang/srt/mem_cache/storage/__init__.py +10 -0
- sglang/srt/mem_cache/storage/aibrix_kvcache/aibrix_kvcache_storage.py +157 -0
- sglang/srt/mem_cache/storage/aibrix_kvcache/unit_test.py +97 -0
- sglang/srt/mem_cache/storage/backend_factory.py +223 -0
- sglang/srt/mem_cache/storage/eic/eic_storage.py +777 -0
- sglang/srt/mem_cache/storage/eic/test_unit.py +115 -0
- sglang/srt/mem_cache/storage/hf3fs/hf3fs_client.py +0 -1
- sglang/srt/mem_cache/storage/hf3fs/storage_hf3fs.py +180 -59
- sglang/srt/mem_cache/storage/lmcache/lmc_radix_cache.py +15 -9
- sglang/srt/mem_cache/storage/mooncake_store/mooncake_store.py +217 -26
- sglang/srt/mem_cache/storage/nixl/hicache_nixl.py +38 -9
- sglang/srt/mem_cache/storage/nixl/nixl_utils.py +1 -1
- sglang/srt/mem_cache/storage/nixl/test_hicache_nixl_storage.py +17 -2
- sglang/srt/mem_cache/swa_radix_cache.py +115 -58
- sglang/srt/metrics/collector.py +113 -120
- sglang/srt/metrics/func_timer.py +3 -8
- sglang/srt/metrics/utils.py +8 -1
- sglang/srt/model_executor/cpu_graph_runner.py +2 -2
- sglang/srt/model_executor/cuda_graph_runner.py +81 -36
- sglang/srt/model_executor/forward_batch_info.py +40 -50
- sglang/srt/model_executor/model_runner.py +507 -319
- sglang/srt/model_executor/npu_graph_runner.py +11 -5
- sglang/srt/model_executor/piecewise_cuda_graph_runner.py +539 -0
- sglang/srt/model_loader/__init__.py +1 -1
- sglang/srt/model_loader/loader.py +438 -37
- sglang/srt/model_loader/utils.py +0 -1
- sglang/srt/model_loader/weight_utils.py +200 -27
- sglang/srt/models/apertus.py +2 -3
- sglang/srt/models/arcee.py +2 -2
- sglang/srt/models/bailing_moe.py +40 -56
- sglang/srt/models/bailing_moe_nextn.py +3 -4
- sglang/srt/models/bert.py +1 -1
- sglang/srt/models/deepseek_nextn.py +25 -4
- sglang/srt/models/deepseek_ocr.py +1516 -0
- sglang/srt/models/deepseek_v2.py +793 -235
- sglang/srt/models/dots_ocr.py +171 -0
- sglang/srt/models/dots_vlm.py +0 -1
- sglang/srt/models/dots_vlm_vit.py +1 -1
- sglang/srt/models/falcon_h1.py +570 -0
- sglang/srt/models/gemma3_causal.py +0 -2
- sglang/srt/models/gemma3_mm.py +17 -1
- sglang/srt/models/gemma3n_mm.py +2 -3
- sglang/srt/models/glm4_moe.py +17 -40
- sglang/srt/models/glm4_moe_nextn.py +4 -4
- sglang/srt/models/glm4v.py +3 -2
- sglang/srt/models/glm4v_moe.py +6 -6
- sglang/srt/models/gpt_oss.py +12 -35
- sglang/srt/models/grok.py +10 -23
- sglang/srt/models/hunyuan.py +2 -7
- sglang/srt/models/interns1.py +0 -1
- sglang/srt/models/kimi_vl.py +1 -7
- sglang/srt/models/kimi_vl_moonvit.py +4 -2
- sglang/srt/models/llama.py +6 -2
- sglang/srt/models/llama_eagle3.py +1 -1
- sglang/srt/models/longcat_flash.py +6 -23
- sglang/srt/models/longcat_flash_nextn.py +4 -15
- sglang/srt/models/mimo.py +2 -13
- sglang/srt/models/mimo_mtp.py +1 -2
- sglang/srt/models/minicpmo.py +7 -5
- sglang/srt/models/mixtral.py +1 -4
- sglang/srt/models/mllama.py +1 -1
- sglang/srt/models/mllama4.py +27 -6
- sglang/srt/models/nemotron_h.py +511 -0
- sglang/srt/models/olmo2.py +31 -4
- sglang/srt/models/opt.py +5 -5
- sglang/srt/models/phi.py +1 -1
- sglang/srt/models/phi4mm.py +1 -1
- sglang/srt/models/phimoe.py +0 -1
- sglang/srt/models/pixtral.py +0 -3
- sglang/srt/models/points_v15_chat.py +186 -0
- sglang/srt/models/qwen.py +0 -1
- sglang/srt/models/qwen2.py +0 -7
- sglang/srt/models/qwen2_5_vl.py +5 -5
- sglang/srt/models/qwen2_audio.py +2 -15
- sglang/srt/models/qwen2_moe.py +70 -4
- sglang/srt/models/qwen2_vl.py +6 -3
- sglang/srt/models/qwen3.py +18 -3
- sglang/srt/models/qwen3_moe.py +50 -38
- sglang/srt/models/qwen3_next.py +43 -21
- sglang/srt/models/qwen3_next_mtp.py +3 -4
- sglang/srt/models/qwen3_omni_moe.py +661 -0
- sglang/srt/models/qwen3_vl.py +791 -0
- sglang/srt/models/qwen3_vl_moe.py +343 -0
- sglang/srt/models/registry.py +15 -3
- sglang/srt/models/roberta.py +55 -3
- sglang/srt/models/sarashina2_vision.py +268 -0
- sglang/srt/models/solar.py +505 -0
- sglang/srt/models/starcoder2.py +357 -0
- sglang/srt/models/step3_vl.py +3 -5
- sglang/srt/models/torch_native_llama.py +9 -2
- sglang/srt/models/utils.py +61 -0
- sglang/srt/multimodal/processors/base_processor.py +21 -9
- sglang/srt/multimodal/processors/deepseek_ocr.py +37 -0
- sglang/srt/multimodal/processors/deepseek_vl_v2.py +0 -3
- sglang/srt/multimodal/processors/dots_vlm.py +2 -4
- sglang/srt/multimodal/processors/glm4v.py +1 -5
- sglang/srt/multimodal/processors/internvl.py +20 -10
- sglang/srt/multimodal/processors/janus_pro.py +0 -1
- sglang/srt/multimodal/processors/mllama4.py +0 -8
- sglang/srt/multimodal/processors/phi4mm.py +0 -1
- sglang/srt/multimodal/processors/points_v15_chat.py +52 -0
- sglang/srt/multimodal/processors/qwen_vl.py +83 -17
- sglang/srt/multimodal/processors/sarashina2_vision.py +81 -0
- sglang/srt/multimodal/processors/step3_vl.py +1 -1
- sglang/srt/parser/conversation.py +41 -0
- sglang/srt/parser/jinja_template_utils.py +6 -0
- sglang/srt/parser/reasoning_parser.py +0 -1
- sglang/srt/sampling/custom_logit_processor.py +77 -2
- sglang/srt/sampling/sampling_batch_info.py +36 -23
- sglang/srt/sampling/sampling_params.py +75 -0
- sglang/srt/server_args.py +1300 -338
- sglang/srt/server_args_config_parser.py +146 -0
- sglang/srt/single_batch_overlap.py +161 -0
- sglang/srt/speculative/base_spec_worker.py +34 -0
- sglang/srt/speculative/cpp_ngram/ngram.cpp +374 -0
- sglang/srt/speculative/cpp_ngram/ngram.h +110 -0
- sglang/srt/speculative/cpp_ngram/ngram_cache.py +138 -0
- sglang/srt/speculative/cpp_ngram/ngram_cache_binding.cpp +43 -0
- sglang/srt/speculative/cpp_ngram/param.h +125 -0
- sglang/srt/speculative/cpp_ngram/queue.h +71 -0
- sglang/srt/speculative/draft_utils.py +226 -0
- sglang/srt/speculative/eagle_draft_cuda_graph_runner.py +26 -8
- sglang/srt/speculative/eagle_draft_extend_cuda_graph_runner.py +26 -3
- sglang/srt/speculative/eagle_info.py +786 -0
- sglang/srt/speculative/eagle_info_v2.py +458 -0
- sglang/srt/speculative/eagle_utils.py +113 -1270
- sglang/srt/speculative/eagle_worker.py +120 -285
- sglang/srt/speculative/eagle_worker_v2.py +702 -0
- sglang/srt/speculative/ngram_info.py +433 -0
- sglang/srt/speculative/ngram_worker.py +246 -0
- sglang/srt/speculative/spec_info.py +49 -0
- sglang/srt/speculative/spec_utils.py +641 -0
- sglang/srt/speculative/standalone_worker.py +4 -14
- sglang/srt/tokenizer/tiktoken_tokenizer.py +2 -2
- sglang/srt/tracing/trace.py +32 -6
- sglang/srt/two_batch_overlap.py +35 -18
- sglang/srt/utils/__init__.py +2 -0
- sglang/srt/{bench_utils.py → utils/bench_utils.py} +4 -2
- sglang/srt/{utils.py → utils/common.py} +583 -113
- sglang/srt/{hf_transformers_utils.py → utils/hf_transformers_utils.py} +86 -19
- sglang/srt/{host_shared_memory.py → utils/host_shared_memory.py} +0 -1
- sglang/srt/{offloader.py → utils/offloader.py} +4 -4
- sglang/srt/{patch_torch.py → utils/patch_torch.py} +8 -0
- sglang/srt/utils/profile_merger.py +199 -0
- sglang/srt/utils/rpd_utils.py +452 -0
- sglang/srt/utils/slow_rank_detector.py +71 -0
- sglang/srt/{torch_memory_saver_adapter.py → utils/torch_memory_saver_adapter.py} +5 -7
- sglang/srt/warmup.py +8 -4
- sglang/srt/weight_sync/utils.py +1 -1
- sglang/test/attention/test_flashattn_backend.py +1 -1
- sglang/test/attention/test_flashattn_mla_backend.py +0 -1
- sglang/test/attention/test_prefix_chunk_info.py +0 -2
- sglang/test/attention/test_trtllm_mla_backend.py +221 -53
- sglang/test/few_shot_gsm8k_engine.py +2 -4
- sglang/test/get_logits_ut.py +57 -0
- sglang/test/kit_matched_stop.py +157 -0
- sglang/test/longbench_v2/__init__.py +1 -0
- sglang/test/longbench_v2/test_longbench_v2_eval.py +238 -0
- sglang/test/longbench_v2/validate_longbench_v2.py +337 -0
- sglang/test/longbench_v2/validate_longbench_v2_standalone.py +306 -0
- sglang/test/run_eval.py +120 -11
- sglang/test/runners.py +3 -1
- sglang/test/send_one.py +42 -7
- sglang/test/simple_eval_common.py +8 -2
- sglang/test/simple_eval_gpqa.py +0 -1
- sglang/test/simple_eval_humaneval.py +0 -3
- sglang/test/simple_eval_longbench_v2.py +344 -0
- sglang/test/simple_eval_mmmu_vlm.py +441 -0
- sglang/test/test_block_fp8.py +3 -4
- sglang/test/test_block_fp8_deep_gemm_blackwell.py +0 -1
- sglang/test/test_cutlass_moe.py +1 -2
- sglang/test/test_cutlass_w4a8_moe.py +10 -20
- sglang/test/test_deterministic.py +430 -0
- sglang/test/test_deterministic_utils.py +73 -0
- sglang/test/test_disaggregation_utils.py +93 -1
- sglang/test/test_marlin_moe.py +0 -1
- sglang/test/test_programs.py +1 -1
- sglang/test/test_utils.py +432 -16
- sglang/utils.py +10 -1
- sglang/version.py +1 -1
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/METADATA +64 -43
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/RECORD +476 -346
- sglang/srt/entrypoints/grpc_request_manager.py +0 -580
- sglang/srt/layers/quantization/deep_gemm_wrapper/configurer.py +0 -32
- sglang/srt/managers/tp_worker_overlap_thread.py +0 -319
- sglang/srt/mem_cache/lora_radix_cache.py +0 -421
- sglang/srt/speculative/build_eagle_tree.py +0 -427
- sglang/test/test_block_fp8_ep.py +0 -358
- /sglang/srt/layers/{quantization/deep_gemm_wrapper → deep_gemm_wrapper}/__init__.py +0 -0
- /sglang/srt/{remote_instance_weight_loader_utils.py → model_loader/remote_instance_weight_loader_utils.py} +0 -0
- /sglang/srt/{aio_rwlock.py → utils/aio_rwlock.py} +0 -0
- /sglang/srt/{poll_based_barrier.py → utils/poll_based_barrier.py} +0 -0
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/WHEEL +0 -0
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/licenses/LICENSE +0 -0
- {sglang-0.5.3rc0.dist-info → sglang-0.5.4.dist-info}/top_level.txt +0 -0
|
@@ -11,8 +11,6 @@ import re
|
|
|
11
11
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
12
12
|
from typing import Dict, List, Optional
|
|
13
13
|
|
|
14
|
-
import tqdm
|
|
15
|
-
|
|
16
14
|
try:
|
|
17
15
|
from human_eval.data import read_problems
|
|
18
16
|
from human_eval.evaluation import estimate_pass_at_k
|
|
@@ -41,7 +39,6 @@ def evaluate_functional_correctness(
|
|
|
41
39
|
Evaluates the functional correctness of generated samples, and writes
|
|
42
40
|
results to f"{sample_file}_results.jsonl.gz"
|
|
43
41
|
"""
|
|
44
|
-
import copy
|
|
45
42
|
|
|
46
43
|
# Check the generated samples against test suites.
|
|
47
44
|
with ThreadPoolExecutor(max_workers=n_workers) as executor:
|
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
# Adapted from https://github.com/openai/simple-evals/
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
LongBench v2: Towards Deeper Understanding and Reasoning on Realistic Long-Context Multitasks
|
|
5
|
+
Yushi Bai, Shangqing Tu, Jiajie Zhang, Hao Peng, Xiaozhi Wang, Xin Lv, Shulin Cao, Jiazheng Xu, Lei Hou, Yuxiao Dong, Jie Tang, Juanzi Li
|
|
6
|
+
https://arxiv.org/abs/2412.15204
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import csv
|
|
10
|
+
import json
|
|
11
|
+
import os
|
|
12
|
+
import re
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
from transformers import AutoTokenizer
|
|
16
|
+
|
|
17
|
+
from sglang.test import simple_eval_common as common
|
|
18
|
+
from sglang.test.simple_eval_common import (
|
|
19
|
+
ANSWER_PATTERN_MULTICHOICE,
|
|
20
|
+
HTML_JINJA,
|
|
21
|
+
Eval,
|
|
22
|
+
EvalResult,
|
|
23
|
+
SamplerBase,
|
|
24
|
+
SingleEvalResult,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# LongBench-v2 task categories
|
|
28
|
+
TASK_CATEGORIES = {
|
|
29
|
+
"single_document_qa",
|
|
30
|
+
"multi_document_qa",
|
|
31
|
+
"long_in_context_learning",
|
|
32
|
+
"long_dialogue_history",
|
|
33
|
+
"code_repo_understanding",
|
|
34
|
+
"long_structured_data",
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
DEFAULT_DATASET = "THUDM/LongBench-v2"
|
|
38
|
+
DEFAULT_DATASET_SPLIT = "train"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def format_longbench_v2_question(row: dict) -> str:
|
|
42
|
+
"""Format a LongBench-v2 question using the official template."""
|
|
43
|
+
context = row.get("context", "")
|
|
44
|
+
question = row.get("question", "")
|
|
45
|
+
|
|
46
|
+
# Handle both standard format (A, B, C, D) and alternative format (choices list)
|
|
47
|
+
if "choices" in row:
|
|
48
|
+
choices = row["choices"]
|
|
49
|
+
choice_A = choices[0] if len(choices) > 0 else ""
|
|
50
|
+
choice_B = choices[1] if len(choices) > 1 else ""
|
|
51
|
+
choice_C = choices[2] if len(choices) > 2 else ""
|
|
52
|
+
choice_D = choices[3] if len(choices) > 3 else ""
|
|
53
|
+
else:
|
|
54
|
+
choice_A = row.get("A", row.get("choice_A", ""))
|
|
55
|
+
choice_B = row.get("B", row.get("choice_B", ""))
|
|
56
|
+
choice_C = row.get("C", row.get("choice_C", ""))
|
|
57
|
+
choice_D = row.get("D", row.get("choice_D", ""))
|
|
58
|
+
|
|
59
|
+
# Official LongBench-v2 template
|
|
60
|
+
prompt = f"""
|
|
61
|
+
Please read the following text and answer the question below.
|
|
62
|
+
<text>
|
|
63
|
+
{context.strip()}
|
|
64
|
+
</text>
|
|
65
|
+
|
|
66
|
+
What is the correct answer to this question: {question.strip()}
|
|
67
|
+
Choices:
|
|
68
|
+
(A) {choice_A.strip()}
|
|
69
|
+
(B) {choice_B.strip()}
|
|
70
|
+
(C) {choice_C.strip()}
|
|
71
|
+
(D) {choice_D.strip()}
|
|
72
|
+
|
|
73
|
+
Format your response as follows: "The correct answer is (insert answer here)"."""
|
|
74
|
+
|
|
75
|
+
return prompt
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def extract_longbench_v2_answer(response: str) -> Optional[str]:
|
|
79
|
+
"""Extract answer from model response using official LongBench-v2 method."""
|
|
80
|
+
response = response.replace("*", "")
|
|
81
|
+
|
|
82
|
+
# First try: "The correct answer is (A)"
|
|
83
|
+
match = re.search(r"The correct answer is \(([A-D])\)", response, re.IGNORECASE)
|
|
84
|
+
if match:
|
|
85
|
+
return match.group(1).upper()
|
|
86
|
+
|
|
87
|
+
# Second try: "The correct answer is A"
|
|
88
|
+
match = re.search(r"The correct answer is ([A-D])", response, re.IGNORECASE)
|
|
89
|
+
if match:
|
|
90
|
+
return match.group(1).upper()
|
|
91
|
+
|
|
92
|
+
# Fallback: Standard SGLang multichoice pattern
|
|
93
|
+
match = re.search(ANSWER_PATTERN_MULTICHOICE, response)
|
|
94
|
+
if match:
|
|
95
|
+
return match.group(1).upper()
|
|
96
|
+
|
|
97
|
+
# Generic fallback when model says "answer is A"
|
|
98
|
+
match = re.search(r"answer\s+is\s*\(?([A-D])\)?", response, re.IGNORECASE)
|
|
99
|
+
if match:
|
|
100
|
+
return match.group(1).upper()
|
|
101
|
+
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class LongBenchV2Eval(Eval):
|
|
106
|
+
"""
|
|
107
|
+
Evaluation utility for LongBench-v2 dataset.
|
|
108
|
+
|
|
109
|
+
LongBench-v2 is designed to assess the ability of LLMs to handle long-context problems
|
|
110
|
+
requiring deep understanding and reasoning across real-world multitasks.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
def __init__(
|
|
114
|
+
self,
|
|
115
|
+
model: str = None,
|
|
116
|
+
data_source: str = DEFAULT_DATASET,
|
|
117
|
+
num_examples: Optional[int] = None,
|
|
118
|
+
num_threads: int = 1,
|
|
119
|
+
n_repeats: int = 1,
|
|
120
|
+
categories: Optional[List[str]] = None,
|
|
121
|
+
max_context_length: Optional[int] = None,
|
|
122
|
+
min_context_length: Optional[int] = None,
|
|
123
|
+
):
|
|
124
|
+
"""
|
|
125
|
+
Initialize LongBench-v2 evaluation.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
data_source: HuggingFace dataset name, local file path (CSV/JSON)
|
|
129
|
+
num_examples: Number of examples to evaluate (None for all)
|
|
130
|
+
num_threads: Number of threads for parallel processing
|
|
131
|
+
n_repeats: Number of times to repeat evaluation for error bars
|
|
132
|
+
categories: List of task categories to include (None for all)
|
|
133
|
+
max_context_length: Maximum context length in characters
|
|
134
|
+
min_context_length: Minimum context length in characters
|
|
135
|
+
"""
|
|
136
|
+
self.tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True)
|
|
137
|
+
self.min_context_length = min_context_length
|
|
138
|
+
self.max_context_length = max_context_length
|
|
139
|
+
# Load dataset based on data source type
|
|
140
|
+
examples = self._load_dataset(data_source)
|
|
141
|
+
|
|
142
|
+
# Apply filtering
|
|
143
|
+
if categories:
|
|
144
|
+
examples = [ex for ex in examples if ex.get("category") in categories]
|
|
145
|
+
|
|
146
|
+
# Sample examples if specified
|
|
147
|
+
if num_examples:
|
|
148
|
+
assert n_repeats == 1, "n_repeats only supported when not sampling examples"
|
|
149
|
+
examples = examples[: min(num_examples, len(examples))]
|
|
150
|
+
|
|
151
|
+
# Repeat examples for multiple runs
|
|
152
|
+
examples = examples * n_repeats
|
|
153
|
+
|
|
154
|
+
if not examples:
|
|
155
|
+
raise ValueError(
|
|
156
|
+
"No examples available for LongBench-v2 evaluation after filtering"
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
self.examples = examples
|
|
160
|
+
self.n_repeats = n_repeats
|
|
161
|
+
self.num_threads = num_threads
|
|
162
|
+
|
|
163
|
+
print(f"Loaded {len(self.examples)} examples from LongBench-v2")
|
|
164
|
+
if categories:
|
|
165
|
+
print(f"Filtered to categories: {categories}")
|
|
166
|
+
if min_context_length or max_context_length:
|
|
167
|
+
print(
|
|
168
|
+
f"Context length filter: {min_context_length}-{max_context_length} characters"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
def _load_dataset(self, data_source: str) -> List[Dict[str, Any]]:
|
|
172
|
+
"""Load dataset from HuggingFace hub or local files."""
|
|
173
|
+
|
|
174
|
+
if not data_source:
|
|
175
|
+
data_source = DEFAULT_DATASET
|
|
176
|
+
|
|
177
|
+
if os.path.exists(data_source):
|
|
178
|
+
raw_examples = self._load_local_file(data_source)
|
|
179
|
+
else:
|
|
180
|
+
raw_examples = self._load_hf_dataset(data_source)
|
|
181
|
+
|
|
182
|
+
return [self._normalize_example(example) for example in raw_examples]
|
|
183
|
+
|
|
184
|
+
def _load_local_file(self, path: str) -> List[Dict[str, Any]]:
|
|
185
|
+
"""Load examples from a local CSV/JSON/JSONL file."""
|
|
186
|
+
|
|
187
|
+
suffix = os.path.splitext(path)[1].lower()
|
|
188
|
+
if suffix in {".json", ".jsonl"}:
|
|
189
|
+
with open(path, "r", encoding="utf-8") as fh:
|
|
190
|
+
if suffix == ".jsonl":
|
|
191
|
+
data = [json.loads(line) for line in fh if line.strip()]
|
|
192
|
+
else:
|
|
193
|
+
data = json.load(fh)
|
|
194
|
+
elif suffix == ".csv":
|
|
195
|
+
with open(path, "r", encoding="utf-8") as fh:
|
|
196
|
+
reader = csv.DictReader(fh)
|
|
197
|
+
data = list(reader)
|
|
198
|
+
else:
|
|
199
|
+
# Try JSON, then CSV as fallback
|
|
200
|
+
try:
|
|
201
|
+
with open(path, "r", encoding="utf-8") as fh:
|
|
202
|
+
data = json.load(fh)
|
|
203
|
+
except json.JSONDecodeError:
|
|
204
|
+
with open(path, "r", encoding="utf-8") as fh:
|
|
205
|
+
reader = csv.DictReader(fh)
|
|
206
|
+
data = list(reader)
|
|
207
|
+
|
|
208
|
+
if isinstance(data, dict):
|
|
209
|
+
data = data.get("data", [])
|
|
210
|
+
|
|
211
|
+
if not isinstance(data, list):
|
|
212
|
+
raise ValueError("Expected list of examples from local file")
|
|
213
|
+
|
|
214
|
+
return data
|
|
215
|
+
|
|
216
|
+
def _load_hf_dataset(self, identifier: str) -> List[Dict[str, Any]]:
|
|
217
|
+
"""Load the dataset from HuggingFace Hub."""
|
|
218
|
+
|
|
219
|
+
parts = identifier.split(":", maxsplit=1)
|
|
220
|
+
dataset_name = parts[0]
|
|
221
|
+
split = parts[1] if len(parts) == 2 else DEFAULT_DATASET_SPLIT
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
from datasets import load_dataset # type: ignore
|
|
225
|
+
except ImportError as exc:
|
|
226
|
+
raise ImportError(
|
|
227
|
+
"Please install the 'datasets' package to load LongBench-v2 from HuggingFace: pip install datasets"
|
|
228
|
+
) from exc
|
|
229
|
+
|
|
230
|
+
dataset = load_dataset(dataset_name, split=split)
|
|
231
|
+
return [dict(row) for row in dataset]
|
|
232
|
+
|
|
233
|
+
def _normalize_example(self, example: Dict[str, Any]) -> Dict[str, Any]:
|
|
234
|
+
"""Ensure each example exposes the expected keys."""
|
|
235
|
+
|
|
236
|
+
normalized = dict(example)
|
|
237
|
+
|
|
238
|
+
for letter in ["A", "B", "C", "D"]:
|
|
239
|
+
choice_key = f"choice_{letter}"
|
|
240
|
+
if letter not in normalized and choice_key in normalized:
|
|
241
|
+
normalized[letter] = normalized[choice_key]
|
|
242
|
+
|
|
243
|
+
if "category" not in normalized and "domain" in normalized:
|
|
244
|
+
normalized["category"] = normalized["domain"]
|
|
245
|
+
|
|
246
|
+
answer = normalized.get("answer")
|
|
247
|
+
if isinstance(answer, str):
|
|
248
|
+
normalized["answer"] = answer.strip().upper()
|
|
249
|
+
elif isinstance(answer, int) and 0 <= answer < 4:
|
|
250
|
+
normalized["answer"] = ["A", "B", "C", "D"][answer]
|
|
251
|
+
|
|
252
|
+
return normalized
|
|
253
|
+
|
|
254
|
+
def _check_context_length(
|
|
255
|
+
self,
|
|
256
|
+
formatted_question: str,
|
|
257
|
+
tokenizer: AutoTokenizer,
|
|
258
|
+
min_length: Optional[int],
|
|
259
|
+
max_length: Optional[int],
|
|
260
|
+
) -> bool:
|
|
261
|
+
"""Filter examples by context length measured in characters."""
|
|
262
|
+
input_ids = tokenizer.encode(formatted_question)
|
|
263
|
+
context_length = len(input_ids)
|
|
264
|
+
|
|
265
|
+
if min_length is not None and context_length < min_length:
|
|
266
|
+
return False
|
|
267
|
+
if max_length is not None and context_length > max_length:
|
|
268
|
+
return False
|
|
269
|
+
|
|
270
|
+
return True
|
|
271
|
+
|
|
272
|
+
def __call__(self, sampler: SamplerBase) -> EvalResult:
|
|
273
|
+
"""Run the evaluation."""
|
|
274
|
+
|
|
275
|
+
def fn(row: dict):
|
|
276
|
+
# Format the question using official template
|
|
277
|
+
formatted_question = format_longbench_v2_question(row)
|
|
278
|
+
|
|
279
|
+
if self.min_context_length or self.max_context_length:
|
|
280
|
+
if not self._check_context_length(
|
|
281
|
+
formatted_question,
|
|
282
|
+
self.tokenizer,
|
|
283
|
+
self.min_context_length,
|
|
284
|
+
self.max_context_length,
|
|
285
|
+
):
|
|
286
|
+
# Skip this example
|
|
287
|
+
return None
|
|
288
|
+
|
|
289
|
+
prompt_messages = [
|
|
290
|
+
sampler._pack_message(content=formatted_question, role="user")
|
|
291
|
+
]
|
|
292
|
+
|
|
293
|
+
# Get model response
|
|
294
|
+
response_text = sampler(prompt_messages)
|
|
295
|
+
if response_text is None:
|
|
296
|
+
response_text = ""
|
|
297
|
+
|
|
298
|
+
# Extract answer using official method
|
|
299
|
+
extracted_answer = extract_longbench_v2_answer(response_text)
|
|
300
|
+
|
|
301
|
+
# Get correct answer
|
|
302
|
+
correct_answer = row.get("answer", "")
|
|
303
|
+
if isinstance(correct_answer, str):
|
|
304
|
+
correct_answer = correct_answer.strip().upper()
|
|
305
|
+
elif isinstance(correct_answer, int) and 0 <= correct_answer < 4:
|
|
306
|
+
correct_answer = ["A", "B", "C", "D"][correct_answer]
|
|
307
|
+
|
|
308
|
+
# Calculate score
|
|
309
|
+
score = 1.0 if extracted_answer == correct_answer else 0.0
|
|
310
|
+
|
|
311
|
+
# Generate HTML report
|
|
312
|
+
html = common.jinja_env.from_string(HTML_JINJA).render(
|
|
313
|
+
prompt_messages=prompt_messages,
|
|
314
|
+
next_message=dict(content=response_text, role="assistant"),
|
|
315
|
+
score=score,
|
|
316
|
+
correct_answer=correct_answer,
|
|
317
|
+
extracted_answer=extracted_answer,
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
# Build conversation
|
|
321
|
+
convo = prompt_messages + [dict(content=response_text, role="assistant")]
|
|
322
|
+
|
|
323
|
+
# Prepare metrics
|
|
324
|
+
metrics = {"chars": len(response_text)}
|
|
325
|
+
|
|
326
|
+
# Add category-specific metrics
|
|
327
|
+
category = row.get("category", row.get("domain", "unknown"))
|
|
328
|
+
if category in TASK_CATEGORIES:
|
|
329
|
+
metrics[category] = score
|
|
330
|
+
|
|
331
|
+
difficulty = row.get("difficulty")
|
|
332
|
+
if isinstance(difficulty, str) and difficulty:
|
|
333
|
+
metrics[f"difficulty_{difficulty.lower()}"] = score
|
|
334
|
+
|
|
335
|
+
return SingleEvalResult(
|
|
336
|
+
html=html,
|
|
337
|
+
score=score,
|
|
338
|
+
convo=convo,
|
|
339
|
+
metrics=metrics,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Run evaluation with progress tracking
|
|
343
|
+
results = common.map_with_progress(fn, self.examples, self.num_threads)
|
|
344
|
+
return common.aggregate_results(results)
|