evalscope 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/api/benchmark/__init__.py +9 -1
- evalscope/api/benchmark/adapters/__init__.py +4 -0
- evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +75 -4
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +85 -2
- evalscope/api/benchmark/meta.py +10 -1
- evalscope/api/dataset/dataset.py +27 -6
- evalscope/api/dataset/loader.py +8 -3
- evalscope/api/evaluator/cache.py +31 -4
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +17 -1
- evalscope/api/messages/__init__.py +1 -0
- evalscope/api/messages/chat_message.py +52 -2
- evalscope/api/metric/__init__.py +1 -1
- evalscope/api/metric/metric.py +6 -1
- evalscope/api/metric/scorer.py +15 -7
- evalscope/api/mixin/__init__.py +1 -1
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +182 -0
- evalscope/api/model/generate_config.py +10 -6
- evalscope/api/model/model.py +5 -2
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/app/app.py +3 -0
- evalscope/app/ui/multi_model.py +6 -1
- evalscope/app/ui/single_model.py +11 -5
- evalscope/app/utils/data_utils.py +8 -7
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -12
- evalscope/app/utils/visualization.py +2 -2
- evalscope/arguments.py +8 -4
- evalscope/backend/opencompass/backend_manager.py +0 -2
- evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/aime/aime24_adapter.py +5 -0
- evalscope/benchmarks/aime/aime25_adapter.py +136 -1
- evalscope/benchmarks/aime/grader.py +307 -0
- evalscope/benchmarks/aime/math_normalize.py +189 -0
- evalscope/benchmarks/amc/amc_adapter.py +51 -0
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +1 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/{bfcl_adapter.py → v3/bfcl_v3_adapter.py} +131 -19
- evalscope/benchmarks/bfcl/{generation.py → v3/generation.py} +9 -9
- evalscope/benchmarks/bfcl/v3/utils.py +23 -0
- evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
- evalscope/benchmarks/bfcl/v4/utils.py +410 -0
- evalscope/benchmarks/biomix_qa/__init__.py +0 -0
- evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/coin_flip/__init__.py +0 -0
- evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
- evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
- evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +5 -0
- evalscope/benchmarks/data_collection/data_collection_adapter.py +24 -19
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drivelology/__init__.py +0 -0
- evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
- evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
- evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
- evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
- evalscope/benchmarks/drop/drop_adapter.py +15 -44
- evalscope/benchmarks/drop/utils.py +97 -0
- evalscope/benchmarks/frames/frames_adapter.py +2 -1
- evalscope/benchmarks/general_arena/general_arena_adapter.py +7 -2
- evalscope/benchmarks/general_arena/utils.py +2 -1
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
- evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +25 -9
- evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
- evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
- evalscope/benchmarks/halu_eval/__init__.py +0 -0
- evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
- evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hle/hle_adapter.py +3 -2
- evalscope/benchmarks/humaneval/humaneval_adapter.py +24 -52
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +66 -54
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/logi_qa/__int__.py +0 -0
- evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +5 -1
- evalscope/benchmarks/math_qa/__init__.py +0 -0
- evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
- evalscope/benchmarks/math_verse/__init__.py +0 -0
- evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
- evalscope/benchmarks/math_vision/__init__.py +0 -0
- evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
- evalscope/benchmarks/med_mcqa/__init__.py +0 -0
- evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +1 -1
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
- evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/music_trivia/__init__.py +0 -0
- evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +7 -6
- evalscope/benchmarks/ner/__init__.py +0 -0
- evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
- evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
- evalscope/benchmarks/ner/copious_adapter.py +85 -0
- evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
- evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
- evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
- evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
- evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
- evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
- evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
- evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
- evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
- evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
- evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
- evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
- evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
- evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
- evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
- evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
- evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
- evalscope/benchmarks/piqa/__init__.py +0 -0
- evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
- evalscope/benchmarks/poly_math/__init__.py +0 -0
- evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
- evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
- evalscope/benchmarks/pope/__init__.py +0 -0
- evalscope/benchmarks/pope/pope_adapter.py +112 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +1 -0
- evalscope/benchmarks/pumed_qa/__init__.py +0 -0
- evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
- evalscope/benchmarks/qasc/__init__.py +0 -0
- evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/sciq/__init__.py +0 -0
- evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
- evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
- evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +1 -1
- evalscope/benchmarks/simple_vqa/__init__.py +0 -0
- evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
- evalscope/benchmarks/siqa/__init__.py +0 -0
- evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
- evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
- evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
- evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/{generation.py → tau_bench/generation.py} +1 -1
- evalscope/benchmarks/tau_bench/{tau_bench_adapter.py → tau_bench/tau_bench_adapter.py} +29 -29
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +3 -3
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope/benchmarks/visu_logic/__init__.py +0 -0
- evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
- evalscope/benchmarks/wmt/__init__.py +0 -0
- evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
- evalscope/benchmarks/zerobench/__init__.py +0 -0
- evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/config.py +103 -18
- evalscope/constants.py +18 -0
- evalscope/evaluator/evaluator.py +138 -82
- evalscope/metrics/bert_score/__init__.py +0 -0
- evalscope/metrics/bert_score/scorer.py +338 -0
- evalscope/metrics/bert_score/utils.py +697 -0
- evalscope/metrics/llm_judge.py +19 -7
- evalscope/metrics/math_parser.py +14 -0
- evalscope/metrics/metric.py +317 -13
- evalscope/metrics/metrics.py +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/model_apis.py +22 -0
- evalscope/models/openai_compatible.py +21 -0
- evalscope/models/text2image_model.py +2 -2
- evalscope/models/utils/openai.py +16 -6
- evalscope/perf/arguments.py +26 -4
- evalscope/perf/benchmark.py +76 -89
- evalscope/perf/http_client.py +31 -16
- evalscope/perf/main.py +15 -2
- evalscope/perf/plugin/api/base.py +9 -7
- evalscope/perf/plugin/api/custom_api.py +13 -58
- evalscope/perf/plugin/api/default_api.py +188 -79
- evalscope/perf/plugin/api/openai_api.py +85 -20
- evalscope/perf/plugin/datasets/base.py +21 -0
- evalscope/perf/plugin/datasets/custom.py +2 -3
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/line_by_line.py +2 -3
- evalscope/perf/plugin/datasets/longalpaca.py +2 -3
- evalscope/perf/plugin/datasets/openqa.py +2 -4
- evalscope/perf/plugin/datasets/random_dataset.py +1 -3
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +43 -27
- evalscope/perf/utils/db_util.py +14 -19
- evalscope/perf/utils/local_server.py +3 -44
- evalscope/perf/utils/log_utils.py +21 -6
- evalscope/report/__init__.py +13 -3
- evalscope/report/combinator.py +91 -20
- evalscope/report/generator.py +8 -87
- evalscope/report/report.py +8 -4
- evalscope/run.py +13 -5
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/argument_utils.py +1 -1
- evalscope/utils/chat_service.py +1 -1
- evalscope/utils/function_utils.py +249 -12
- evalscope/utils/import_utils.py +73 -1
- evalscope/utils/io_utils.py +132 -7
- evalscope/utils/json_schema.py +25 -2
- evalscope/utils/logger.py +69 -18
- evalscope/utils/model_utils.py +4 -3
- evalscope/utils/multi_choices.py +39 -7
- evalscope/utils/ner.py +377 -0
- evalscope/version.py +2 -2
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/METADATA +252 -408
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/RECORD +290 -154
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
- evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- tests/__init__.py +0 -1
- tests/aigc/__init__.py +0 -1
- tests/aigc/test_t2i.py +0 -142
- tests/benchmark/__init__.py +0 -1
- tests/benchmark/test_eval.py +0 -386
- tests/cli/__init__.py +0 -1
- tests/cli/test_all.py +0 -229
- tests/cli/test_collection.py +0 -96
- tests/cli/test_custom.py +0 -268
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -176
- tests/rag/test_clip_benchmark.py +0 -90
- tests/rag/test_mteb.py +0 -213
- tests/rag/test_ragas.py +0 -128
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -146
- tests/swift/test_run_swift_vlm_eval.py +0 -128
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
- tests/test_run_all.py +0 -12
- tests/utils.py +0 -13
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -102
- /evalscope/benchmarks/{aigc → aa_lcr}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/i2i → ai2d}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → amc}/__init__.py +0 -0
- {tests/rag → evalscope/benchmarks/bfcl/v3}/__init__.py +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# flake8: noqa: E501
|
|
1
2
|
from typing import Any, Dict
|
|
2
3
|
|
|
3
4
|
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
@@ -7,7 +8,7 @@ from evalscope.api.messages.chat_message import ChatMessageUser
|
|
|
7
8
|
from evalscope.api.metric import Score
|
|
8
9
|
from evalscope.api.registry import register_benchmark
|
|
9
10
|
from evalscope.constants import Tags
|
|
10
|
-
from evalscope.utils.io_utils import
|
|
11
|
+
from evalscope.utils.io_utils import convert_normal_types
|
|
11
12
|
from evalscope.utils.logger import get_logger
|
|
12
13
|
|
|
13
14
|
logger = get_logger()
|
|
@@ -19,17 +20,18 @@ logger = get_logger()
|
|
|
19
20
|
pretty_name='Live-Code-Bench',
|
|
20
21
|
tags=[Tags.CODING],
|
|
21
22
|
description=
|
|
22
|
-
'Live Code Bench is a benchmark for evaluating code generation models on real-world coding tasks. It includes a variety of programming problems with test cases to assess the model\'s ability to generate correct and efficient code solutions.'
|
|
23
|
+
'Live Code Bench is a benchmark for evaluating code generation models on real-world coding tasks. It includes a variety of programming problems with test cases to assess the model\'s ability to generate correct and efficient code solutions. '
|
|
24
|
+
'**By default the code is executed in local environment. We recommend using sandbox execution to safely run and evaluate the generated code, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html) for more details.**',
|
|
23
25
|
dataset_id='AI-ModelScope/code_generation_lite',
|
|
24
26
|
subset_list=['release_latest'],
|
|
25
|
-
|
|
27
|
+
aggregation='mean_and_pass_at_k',
|
|
26
28
|
eval_split='test',
|
|
27
29
|
prompt_template=
|
|
28
30
|
'### Question:\n{question_content}\n\n{format_prompt} ### Answer: (use the provided format with backticks)\n\n',
|
|
31
|
+
review_timeout=6,
|
|
29
32
|
extra_params={
|
|
30
33
|
'start_date': None,
|
|
31
34
|
'end_date': None,
|
|
32
|
-
'timeout': 6,
|
|
33
35
|
'debug': False
|
|
34
36
|
},
|
|
35
37
|
)
|
|
@@ -42,7 +44,6 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
|
|
|
42
44
|
def __init__(self, **kwargs):
|
|
43
45
|
super().__init__(**kwargs)
|
|
44
46
|
|
|
45
|
-
self.timeout = self.extra_params.get('timeout', 6)
|
|
46
47
|
self.debug = self.extra_params.get('debug', False)
|
|
47
48
|
self.start_date = self.extra_params.get('start_date')
|
|
48
49
|
self.end_date = self.extra_params.get('end_date')
|
|
@@ -81,58 +82,69 @@ class LiveCodeBenchAdapter(DefaultDataAdapter):
|
|
|
81
82
|
def match_score(
|
|
82
83
|
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
83
84
|
) -> Score:
|
|
84
|
-
from .evaluate_utils import codegen_metrics
|
|
85
|
-
|
|
86
85
|
score = Score(
|
|
87
86
|
extracted_prediction=filtered_prediction,
|
|
88
87
|
prediction=original_prediction,
|
|
89
88
|
)
|
|
90
89
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
90
|
+
if not self.use_sandbox:
|
|
91
|
+
# Use original evaluation method
|
|
92
|
+
from .evaluate_utils import codegen_metrics
|
|
93
|
+
|
|
94
|
+
references = [{'input_output': task_state.metadata['evaluation_sample']}]
|
|
95
|
+
predictions = [[filtered_prediction]]
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
metrics, eval_results, final_metadata = codegen_metrics(
|
|
99
|
+
references,
|
|
100
|
+
predictions,
|
|
101
|
+
k_list=[1],
|
|
102
|
+
num_process_evaluate=1,
|
|
103
|
+
timeout=self.review_timeout,
|
|
104
|
+
debug=self.debug,
|
|
105
|
+
)
|
|
106
|
+
pass_rate = metrics['pass@1'] / 100 # convert to point scale
|
|
107
|
+
|
|
108
|
+
score.value = {'acc': float(pass_rate > 0)}
|
|
109
|
+
score.explanation = f"Pass@1: {metrics['pass@1']}%"
|
|
110
|
+
|
|
111
|
+
# Convert numpy types to native Python types for JSON serialization
|
|
112
|
+
serializable_eval_results = convert_normal_types(eval_results)
|
|
113
|
+
serializable_final_metadata = convert_normal_types(final_metadata)
|
|
114
|
+
|
|
115
|
+
score.metadata = {
|
|
116
|
+
'pass_rate': float(pass_rate),
|
|
117
|
+
'timeout': self.review_timeout,
|
|
118
|
+
'debug': self.debug,
|
|
119
|
+
'eval_results': serializable_eval_results,
|
|
120
|
+
'final_metadata': serializable_final_metadata
|
|
121
|
+
}
|
|
122
|
+
except Exception as e:
|
|
123
|
+
score.value = {'acc': False}
|
|
124
|
+
score.explanation = f'Evaluation failed: {str(e)}'
|
|
125
|
+
score.metadata = {'error': str(e)}
|
|
126
|
+
else:
|
|
127
|
+
# Use sandbox execution
|
|
128
|
+
try:
|
|
129
|
+
from .sandbox_evaluate_utils import evaluate_in_sandbox
|
|
130
|
+
|
|
131
|
+
evaluation_sample = task_state.metadata['evaluation_sample']
|
|
132
|
+
passed, detailed_results = evaluate_in_sandbox(
|
|
133
|
+
self, filtered_prediction, evaluation_sample, timeout=self.review_timeout, debug=self.debug
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
score.value = {'acc': passed}
|
|
137
|
+
score.explanation = f"Sandbox execution: {'Passed' if passed else 'Failed'}"
|
|
138
|
+
score.metadata = {
|
|
139
|
+
'timeout': self.review_timeout,
|
|
140
|
+
'debug': self.debug,
|
|
141
|
+
'execution_method': 'sandbox',
|
|
142
|
+
'detailed_results': detailed_results
|
|
143
|
+
}
|
|
144
|
+
except Exception as e:
|
|
145
|
+
score.value = {'acc': False}
|
|
146
|
+
score.explanation = f'Sandbox evaluation failed: {str(e)}'
|
|
147
|
+
score.metadata = {'error': str(e), 'execution_method': 'sandbox'}
|
|
148
|
+
|
|
149
|
+
score.main_score_name = 'acc'
|
|
125
150
|
return score
|
|
126
|
-
|
|
127
|
-
def aggregate_scores(self, sample_scores):
|
|
128
|
-
from evalscope.metrics.metric import PassAtK
|
|
129
|
-
|
|
130
|
-
# calculate pass@k here
|
|
131
|
-
agg_list = []
|
|
132
|
-
for metric in self.metric_list:
|
|
133
|
-
if metric.lower().startswith('pass@'):
|
|
134
|
-
k = int(metric.split('@')[1])
|
|
135
|
-
# Get the scores for this metric
|
|
136
|
-
agg = PassAtK(k)
|
|
137
|
-
agg_list.extend(agg(sample_scores))
|
|
138
|
-
return agg_list
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import TYPE_CHECKING, Dict, List, Tuple
|
|
3
|
+
|
|
4
|
+
from evalscope.utils.logger import get_logger
|
|
5
|
+
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from evalscope.api.mixin.sandbox_mixin import SandboxMixin
|
|
8
|
+
|
|
9
|
+
logger = get_logger()
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def evaluate_in_sandbox(
|
|
13
|
+
adapter: 'SandboxMixin',
|
|
14
|
+
code: str,
|
|
15
|
+
evaluation_sample: str,
|
|
16
|
+
timeout: int = 6,
|
|
17
|
+
debug: bool = False
|
|
18
|
+
) -> Tuple[bool, Dict]:
|
|
19
|
+
"""
|
|
20
|
+
Evaluate code in sandbox environment for Live Code Bench.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
adapter: The adapter instance with sandbox capabilities
|
|
24
|
+
code: The code to evaluate
|
|
25
|
+
evaluation_sample: JSON string containing input/output test cases
|
|
26
|
+
timeout: Timeout for execution
|
|
27
|
+
debug: Whether to enable debug logging
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Tuple[bool, Dict]: (overall_pass, detailed_results)
|
|
31
|
+
"""
|
|
32
|
+
try:
|
|
33
|
+
# Parse the evaluation sample
|
|
34
|
+
test_data = json.loads(evaluation_sample)
|
|
35
|
+
inputs = test_data.get('inputs', [])
|
|
36
|
+
outputs = test_data.get('outputs', [])
|
|
37
|
+
fn_name = test_data.get('fn_name')
|
|
38
|
+
|
|
39
|
+
if debug:
|
|
40
|
+
logger.info(f'Evaluating code with {len(inputs)} test cases')
|
|
41
|
+
logger.info(f'Function name: {fn_name}')
|
|
42
|
+
|
|
43
|
+
# Determine if this is call-based or stdio-based
|
|
44
|
+
if fn_name:
|
|
45
|
+
# Call-based evaluation
|
|
46
|
+
return _evaluate_call_based_in_sandbox(adapter, code, inputs, outputs, fn_name, timeout, debug)
|
|
47
|
+
else:
|
|
48
|
+
# Standard input/output evaluation
|
|
49
|
+
return _evaluate_stdio_in_sandbox(adapter, code, inputs, outputs, timeout, debug)
|
|
50
|
+
|
|
51
|
+
except Exception as e:
|
|
52
|
+
if debug:
|
|
53
|
+
logger.error(f'Sandbox evaluation error: {str(e)}')
|
|
54
|
+
return False, {'error': str(e), 'total_tests': 0, 'passed_tests': 0}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _evaluate_call_based_in_sandbox(
|
|
58
|
+
adapter: 'SandboxMixin', code: str, inputs: list, outputs: list, fn_name: str, timeout: int, debug: bool
|
|
59
|
+
) -> Tuple[bool, Dict]:
|
|
60
|
+
"""Evaluate call-based problems in sandbox."""
|
|
61
|
+
try:
|
|
62
|
+
all_passed = True
|
|
63
|
+
passed_count = 0
|
|
64
|
+
failed_cases = []
|
|
65
|
+
|
|
66
|
+
for i, (test_input, expected_output) in enumerate(zip(inputs, outputs)):
|
|
67
|
+
# Prepare individual test code for each test case
|
|
68
|
+
test_code = f"""
|
|
69
|
+
import json
|
|
70
|
+
import sys
|
|
71
|
+
|
|
72
|
+
# User's code
|
|
73
|
+
{code}
|
|
74
|
+
|
|
75
|
+
# Test execution for single test case
|
|
76
|
+
try:
|
|
77
|
+
test_input = {repr(test_input)}
|
|
78
|
+
expected_output = {repr(expected_output)}
|
|
79
|
+
|
|
80
|
+
if 'class Solution' in '''{code}''':
|
|
81
|
+
# LeetCode style
|
|
82
|
+
solution = Solution()
|
|
83
|
+
method = getattr(solution, '{fn_name}')
|
|
84
|
+
else:
|
|
85
|
+
# Function is directly available
|
|
86
|
+
method = {fn_name}
|
|
87
|
+
|
|
88
|
+
# Parse input if it's JSON string
|
|
89
|
+
if isinstance(test_input, str):
|
|
90
|
+
try:
|
|
91
|
+
test_input = json.loads(test_input)
|
|
92
|
+
except:
|
|
93
|
+
pass # Keep as string if not valid JSON
|
|
94
|
+
|
|
95
|
+
# Call the method
|
|
96
|
+
if isinstance(test_input, list):
|
|
97
|
+
result = method(*test_input)
|
|
98
|
+
else:
|
|
99
|
+
result = method(test_input)
|
|
100
|
+
|
|
101
|
+
# Parse expected output if it's JSON string
|
|
102
|
+
if isinstance(expected_output, str):
|
|
103
|
+
try:
|
|
104
|
+
expected_output = json.loads(expected_output)
|
|
105
|
+
except:
|
|
106
|
+
pass # Keep as string if not valid JSON
|
|
107
|
+
|
|
108
|
+
# Convert tuple to list for comparison
|
|
109
|
+
if isinstance(result, tuple):
|
|
110
|
+
result = list(result)
|
|
111
|
+
|
|
112
|
+
if result == expected_output:
|
|
113
|
+
print("TEST_PASSED")
|
|
114
|
+
else:
|
|
115
|
+
print(f"TEST_FAILED: expected {{expected_output}}, got {{result}}")
|
|
116
|
+
|
|
117
|
+
except Exception as e:
|
|
118
|
+
print(f"EXECUTION_ERROR: {{str(e)}}")
|
|
119
|
+
import traceback
|
|
120
|
+
traceback.print_exc()
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
# Execute in sandbox
|
|
124
|
+
result = adapter.execute_code_in_sandbox(code=test_code, timeout=timeout, language='python')
|
|
125
|
+
|
|
126
|
+
if debug:
|
|
127
|
+
logger.info(f'Test case {i} execution result: {result}')
|
|
128
|
+
|
|
129
|
+
# Check if execution was successful and test passed
|
|
130
|
+
if result.get('status') == 'success':
|
|
131
|
+
output = result.get('output', '')
|
|
132
|
+
if 'TEST_PASSED' in output:
|
|
133
|
+
passed_count += 1
|
|
134
|
+
elif 'TEST_FAILED:' in output:
|
|
135
|
+
# Extract failure details from output
|
|
136
|
+
for line in output.split('\n'):
|
|
137
|
+
if line.startswith('TEST_FAILED:'):
|
|
138
|
+
failed_cases.append(f"Test {i}: {line.replace('TEST_FAILED: ', '')}")
|
|
139
|
+
break
|
|
140
|
+
all_passed = False
|
|
141
|
+
break
|
|
142
|
+
elif 'EXECUTION_ERROR:' in output:
|
|
143
|
+
# Extract error details
|
|
144
|
+
for line in output.split('\n'):
|
|
145
|
+
if line.startswith('EXECUTION_ERROR:'):
|
|
146
|
+
failed_cases.append(f'Test {i}: {line}')
|
|
147
|
+
break
|
|
148
|
+
all_passed = False
|
|
149
|
+
break
|
|
150
|
+
else:
|
|
151
|
+
failed_cases.append(f'Test {i}: Unknown error in output. Result: {result}')
|
|
152
|
+
all_passed = False
|
|
153
|
+
break
|
|
154
|
+
else:
|
|
155
|
+
failed_cases.append(f'Test {i}: Sandbox execution failed - Result: {result}')
|
|
156
|
+
all_passed = False
|
|
157
|
+
break
|
|
158
|
+
|
|
159
|
+
detailed_results = {'total_tests': len(inputs), 'passed_tests': passed_count, 'failed_cases': failed_cases}
|
|
160
|
+
|
|
161
|
+
return all_passed, detailed_results
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
if debug:
|
|
165
|
+
logger.error(f'Call-based evaluation error: {str(e)}')
|
|
166
|
+
return False, {'error': str(e), 'total_tests': len(inputs), 'passed_tests': 0}
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _evaluate_stdio_in_sandbox(
|
|
170
|
+
adapter: 'SandboxMixin', code: str, inputs: list, outputs: list, timeout: int, debug: bool
|
|
171
|
+
) -> Tuple[bool, Dict]:
|
|
172
|
+
"""Evaluate stdio-based problems in sandbox."""
|
|
173
|
+
try:
|
|
174
|
+
all_passed = True
|
|
175
|
+
passed_count = 0
|
|
176
|
+
failed_cases = []
|
|
177
|
+
|
|
178
|
+
for i, (test_input, expected_output) in enumerate(zip(inputs, outputs)):
|
|
179
|
+
test_code = f"""
|
|
180
|
+
import sys
|
|
181
|
+
from io import StringIO
|
|
182
|
+
|
|
183
|
+
# Redirect stdin
|
|
184
|
+
sys.stdin = StringIO('''{test_input}''')
|
|
185
|
+
|
|
186
|
+
# User's code
|
|
187
|
+
{code}
|
|
188
|
+
"""
|
|
189
|
+
|
|
190
|
+
# Execute in sandbox
|
|
191
|
+
result = adapter.execute_code_in_sandbox(code=test_code, timeout=timeout, language='python')
|
|
192
|
+
|
|
193
|
+
if result.get('status') != 'success':
|
|
194
|
+
if debug:
|
|
195
|
+
logger.error(f'Test case {i} execution failed: {result}')
|
|
196
|
+
failed_cases.append(f'Test {i}: Execution error - Result: {result}')
|
|
197
|
+
all_passed = False
|
|
198
|
+
break
|
|
199
|
+
|
|
200
|
+
# Compare output
|
|
201
|
+
actual_output = result.get('output', '').strip()
|
|
202
|
+
expected_output = expected_output.strip()
|
|
203
|
+
|
|
204
|
+
if actual_output == expected_output:
|
|
205
|
+
passed_count += 1
|
|
206
|
+
else:
|
|
207
|
+
if debug:
|
|
208
|
+
logger.info(f"Test case {i} failed: expected '{expected_output}', got '{actual_output}'")
|
|
209
|
+
failed_cases.append(f"Test {i}: Expected '{expected_output}', got '{actual_output}'")
|
|
210
|
+
all_passed = False
|
|
211
|
+
break
|
|
212
|
+
|
|
213
|
+
detailed_results = {'total_tests': len(inputs), 'passed_tests': passed_count, 'failed_cases': failed_cases}
|
|
214
|
+
|
|
215
|
+
return all_passed, detailed_results
|
|
216
|
+
|
|
217
|
+
except Exception as e:
|
|
218
|
+
if debug:
|
|
219
|
+
logger.error(f'Stdio evaluation error: {str(e)}')
|
|
220
|
+
return False, {'error': str(e), 'total_tests': len(inputs), 'passed_tests': 0}
|
|
File without changes
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# flake8: noqa: E501
|
|
2
|
+
|
|
3
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
4
|
+
from evalscope.api.dataset import Sample
|
|
5
|
+
from evalscope.api.registry import register_benchmark
|
|
6
|
+
from evalscope.constants import Tags
|
|
7
|
+
|
|
8
|
+
DESCRIPTION = 'LogiQA is a dataset sourced from expert-written questions for testing human Logical reasoning.'
|
|
9
|
+
|
|
10
|
+
PROMPT_TEMPLATE = r"""
|
|
11
|
+
Answer the following multiple choice question. The entire content of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of {letters}.
|
|
12
|
+
|
|
13
|
+
{question}
|
|
14
|
+
|
|
15
|
+
{choices}
|
|
16
|
+
""".strip()
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@register_benchmark(
|
|
20
|
+
BenchmarkMeta(
|
|
21
|
+
name='logi_qa',
|
|
22
|
+
pretty_name='LogiQA',
|
|
23
|
+
tags=[Tags.REASONING, Tags.MULTIPLE_CHOICE],
|
|
24
|
+
description=DESCRIPTION.strip(),
|
|
25
|
+
dataset_id='extraordinarylab/logiqa',
|
|
26
|
+
metric_list=['acc'],
|
|
27
|
+
few_shot_num=0,
|
|
28
|
+
train_split='validation',
|
|
29
|
+
eval_split='test',
|
|
30
|
+
prompt_template=PROMPT_TEMPLATE,
|
|
31
|
+
)
|
|
32
|
+
)
|
|
33
|
+
class LogiQAAdapter(MultiChoiceAdapter):
|
|
34
|
+
|
|
35
|
+
def record_to_sample(self, record) -> Sample:
|
|
36
|
+
return Sample(
|
|
37
|
+
input=f"{record['context']}\n{record['question']}",
|
|
38
|
+
choices=record['choices'],
|
|
39
|
+
target=record['answer'],
|
|
40
|
+
metadata={},
|
|
41
|
+
)
|
|
@@ -4,7 +4,6 @@ from typing import Any, Dict
|
|
|
4
4
|
|
|
5
5
|
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
6
|
from evalscope.api.dataset import Sample
|
|
7
|
-
from evalscope.api.evaluator import TaskState
|
|
8
7
|
from evalscope.api.registry import register_benchmark
|
|
9
8
|
from evalscope.constants import Tags
|
|
10
9
|
from evalscope.utils.logger import get_logger
|
|
@@ -49,3 +48,8 @@ class Math500Adapter(DefaultDataAdapter):
|
|
|
49
48
|
'solution': record['solution'],
|
|
50
49
|
},
|
|
51
50
|
)
|
|
51
|
+
|
|
52
|
+
def extract_answer(self, prediction: str, task_state):
|
|
53
|
+
from evalscope.metrics.math_parser import extract_answer
|
|
54
|
+
|
|
55
|
+
return extract_answer(prediction)
|
|
File without changes
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
2
|
+
from evalscope.api.dataset import Sample
|
|
3
|
+
from evalscope.api.registry import register_benchmark
|
|
4
|
+
from evalscope.constants import Tags
|
|
5
|
+
from evalscope.utils.multi_choices import MultipleChoiceTemplate
|
|
6
|
+
|
|
7
|
+
DESCRIPTION = (
|
|
8
|
+
'MathQA dataset is gathered by using a new representation language to annotate over the '
|
|
9
|
+
'AQuA-RAT dataset with fully-specified operational programs.'
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@register_benchmark(
|
|
14
|
+
BenchmarkMeta(
|
|
15
|
+
name='math_qa',
|
|
16
|
+
pretty_name='MathQA',
|
|
17
|
+
tags=[Tags.REASONING, Tags.MATH, Tags.MULTIPLE_CHOICE],
|
|
18
|
+
description=DESCRIPTION.strip(),
|
|
19
|
+
dataset_id='extraordinarylab/math-qa',
|
|
20
|
+
metric_list=['acc'],
|
|
21
|
+
few_shot_num=0,
|
|
22
|
+
train_split=None,
|
|
23
|
+
eval_split='test',
|
|
24
|
+
prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER_COT,
|
|
25
|
+
)
|
|
26
|
+
)
|
|
27
|
+
class MathQAAdapter(MultiChoiceAdapter):
|
|
28
|
+
|
|
29
|
+
def record_to_sample(self, record) -> Sample:
|
|
30
|
+
return Sample(
|
|
31
|
+
input=record['question'],
|
|
32
|
+
choices=record['choices'],
|
|
33
|
+
target=record['answer'],
|
|
34
|
+
metadata={'reasoning': record['reasoning']},
|
|
35
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# flake8: noqa: E501
|
|
2
|
+
from typing import Any, Dict
|
|
3
|
+
|
|
4
|
+
from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
|
|
5
|
+
from evalscope.api.dataset import Sample
|
|
6
|
+
from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
|
|
7
|
+
from evalscope.api.registry import register_benchmark
|
|
8
|
+
from evalscope.constants import Tags
|
|
9
|
+
from evalscope.utils.io_utils import bytes_to_base64
|
|
10
|
+
from evalscope.utils.logger import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger()
|
|
13
|
+
|
|
14
|
+
MULTI_CHOICE_TYPE = 'multi-choice'
|
|
15
|
+
OPEN_TYPE = 'free-form'
|
|
16
|
+
|
|
17
|
+
OPEN_PROMPT = '{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.'
|
|
18
|
+
|
|
19
|
+
MULT_CHOICE_PROMPT = """
|
|
20
|
+
Answer the following multiple choice question. The last line of your response should be of the following format: 'ANSWER: $LETTER' (without quotes) where LETTER is one of A, B, C, D. Think step by step before answering.
|
|
21
|
+
|
|
22
|
+
{question}
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
SUBSET_LIST = ['Text Dominant', 'Text Lite', 'Vision Intensive', 'Vision Dominant', 'Vision Only']
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@register_benchmark(
|
|
29
|
+
BenchmarkMeta(
|
|
30
|
+
name='math_verse',
|
|
31
|
+
pretty_name='MathVerse',
|
|
32
|
+
dataset_id='evalscope/MathVerse',
|
|
33
|
+
tags=[Tags.MATH, Tags.REASONING, Tags.MULTIPLE_CHOICE, Tags.MULTI_MODAL],
|
|
34
|
+
description=
|
|
35
|
+
'MathVerse, an all-around visual math benchmark designed for an equitable and in-depth evaluation of MLLMs. 2,612 high-quality, multi-subject math problems with diagrams from publicly available sources. Each problem is then transformed by human annotators into six distinct versions, each offering varying degrees of information content in multi-modality, contributing to 15K test samples in total. This approach allows MathVerse to comprehensively assess whether and how much MLLMs can truly understand the visual diagrams for mathematical reasoning.',
|
|
36
|
+
subset_list=SUBSET_LIST,
|
|
37
|
+
metric_list=[{
|
|
38
|
+
'acc': {
|
|
39
|
+
'numeric': True
|
|
40
|
+
}
|
|
41
|
+
}],
|
|
42
|
+
default_subset='testmini',
|
|
43
|
+
eval_split='testmini',
|
|
44
|
+
prompt_template=OPEN_PROMPT,
|
|
45
|
+
)
|
|
46
|
+
)
|
|
47
|
+
class MathVerseAdapter(VisionLanguageAdapter):
|
|
48
|
+
|
|
49
|
+
def __init__(self, **kwargs):
|
|
50
|
+
super().__init__(**kwargs)
|
|
51
|
+
self.reformat_subset = True
|
|
52
|
+
self._use_llm_judge = True
|
|
53
|
+
|
|
54
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
55
|
+
"""
|
|
56
|
+
Convert a dataset record to a Sample. Unifies handling for both multi-choice and free-form.
|
|
57
|
+
Builds the content list inline and appends image content if provided.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
record: Raw dataset record.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Sample: The standardized sample ready for evaluation.
|
|
64
|
+
"""
|
|
65
|
+
question_type = record.get('question_type', OPEN_TYPE)
|
|
66
|
+
question: str = record.get('question', '')
|
|
67
|
+
content_list: list[Content] = []
|
|
68
|
+
|
|
69
|
+
# Choose prompt text based on type; keep a single unified flow for creating Sample
|
|
70
|
+
if question_type == MULTI_CHOICE_TYPE:
|
|
71
|
+
prompt_text = MULT_CHOICE_PROMPT.format(question=question).strip()
|
|
72
|
+
else:
|
|
73
|
+
prompt_text = OPEN_PROMPT.format(question=question).strip()
|
|
74
|
+
|
|
75
|
+
content_list.append(ContentText(text=prompt_text))
|
|
76
|
+
|
|
77
|
+
# Append image if exists
|
|
78
|
+
image = record.get('image')
|
|
79
|
+
if image and isinstance(image, dict):
|
|
80
|
+
image_bytes = image.get('bytes')
|
|
81
|
+
if image_bytes:
|
|
82
|
+
image_base64 = bytes_to_base64(image_bytes, format='png', add_header=True)
|
|
83
|
+
content_list.append(ContentImage(image=image_base64))
|
|
84
|
+
|
|
85
|
+
metadata: Dict[str, Any] = {
|
|
86
|
+
'sample_index': record.get('sample_index'),
|
|
87
|
+
'problem_index': record.get('problem_index'),
|
|
88
|
+
'problem_version': record.get('problem_version'),
|
|
89
|
+
'question_type': question_type,
|
|
90
|
+
'query_wo': record.get('query_wo'),
|
|
91
|
+
'query_cot': record.get('query_cot'),
|
|
92
|
+
'question_for_eval': record.get('question_for_eval'),
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
return Sample(
|
|
96
|
+
input=[ChatMessageUser(content=content_list)],
|
|
97
|
+
target=record['answer'],
|
|
98
|
+
subset_key=record['problem_version'],
|
|
99
|
+
metadata=metadata,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
def extract_answer(self, prediction: str, task_state):
|
|
103
|
+
from evalscope.metrics.math_parser import extract_answer
|
|
104
|
+
|
|
105
|
+
return extract_answer(prediction)
|
|
File without changes
|