evalscope 0.10.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +11 -0
- evalscope/api/benchmark/adapters/__init__.py +7 -0
- evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +754 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +86 -0
- evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +157 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +404 -0
- evalscope/api/benchmark/meta.py +124 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +370 -0
- evalscope/api/dataset/loader.py +266 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +382 -0
- evalscope/api/evaluator/evaluator.py +61 -0
- evalscope/api/evaluator/state.py +280 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +248 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +60 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +2 -0
- evalscope/api/mixin/llm_judge_mixin.py +170 -0
- evalscope/api/mixin/sandbox_mixin.py +182 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +161 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/__init__.py +28 -0
- evalscope/app/app.py +38 -0
- evalscope/app/arguments.py +11 -0
- evalscope/app/constants.py +22 -0
- evalscope/app/ui/__init__.py +20 -0
- evalscope/app/ui/app_ui.py +53 -0
- evalscope/app/ui/multi_model.py +353 -0
- evalscope/app/ui/sidebar.py +42 -0
- evalscope/app/ui/single_model.py +220 -0
- evalscope/app/ui/visualization.py +36 -0
- evalscope/app/utils/data_utils.py +195 -0
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/localization.py +221 -0
- evalscope/app/utils/text_utils.py +119 -0
- evalscope/app/utils/visualization.py +96 -0
- evalscope/arguments.py +32 -9
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +10 -7
- evalscope/backend/rag_eval/__init__.py +1 -1
- evalscope/backend/rag_eval/backend_manager.py +23 -6
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +33 -21
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/cmteb/arguments.py +14 -1
- evalscope/backend/rag_eval/cmteb/task_template.py +19 -3
- evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +1 -1
- evalscope/backend/rag_eval/ragas/arguments.py +0 -1
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +9 -3
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -6
- evalscope/backend/rag_eval/utils/embedding.py +125 -32
- evalscope/backend/rag_eval/utils/llm.py +16 -16
- evalscope/backend/vlm_eval_kit/backend_manager.py +8 -3
- evalscope/benchmarks/__init__.py +17 -5
- evalscope/benchmarks/aa_lcr/__init__.py +0 -0
- evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
- evalscope/benchmarks/ai2d/__init__.py +0 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/aime/__init__.py +0 -0
- evalscope/benchmarks/aime/aime24_adapter.py +55 -0
- evalscope/benchmarks/aime/aime25_adapter.py +181 -0
- evalscope/benchmarks/aime/grader.py +307 -0
- evalscope/{metrics/math_accuracy.py → benchmarks/aime/math_normalize.py} +61 -72
- evalscope/benchmarks/alpaca_eval/__init__.py +0 -0
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
- evalscope/benchmarks/amc/__init__.py +0 -0
- evalscope/benchmarks/amc/amc_adapter.py +51 -0
- evalscope/benchmarks/arc/arc_adapter.py +34 -149
- evalscope/benchmarks/arena_hard/__init__.py +0 -0
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +149 -0
- evalscope/benchmarks/arena_hard/utils.py +186 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +117 -157
- evalscope/benchmarks/bfcl/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/bfcl_v3_adapter.py +370 -0
- evalscope/benchmarks/bfcl/v3/generation.py +222 -0
- evalscope/benchmarks/bfcl/v3/utils.py +23 -0
- evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
- evalscope/benchmarks/bfcl/v4/utils.py +410 -0
- evalscope/benchmarks/biomix_qa/__init__.py +0 -0
- evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -174
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +170 -0
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -140
- evalscope/benchmarks/coin_flip/__init__.py +0 -0
- evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
- evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
- evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +64 -112
- evalscope/benchmarks/data_collection/__init__.py +0 -0
- evalscope/benchmarks/data_collection/data_collection_adapter.py +215 -0
- evalscope/benchmarks/docmath/__init__.py +0 -0
- evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
- evalscope/benchmarks/docmath/utils.py +219 -0
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drivelology/__init__.py +0 -0
- evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
- evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
- evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
- evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
- evalscope/benchmarks/drop/__init__.py +0 -0
- evalscope/benchmarks/drop/drop_adapter.py +155 -0
- evalscope/benchmarks/drop/utils.py +156 -0
- evalscope/benchmarks/frames/__init__.py +0 -0
- evalscope/benchmarks/frames/frames_adapter.py +175 -0
- evalscope/benchmarks/frames/utils.py +37 -0
- evalscope/benchmarks/general_arena/__init__.py +0 -0
- evalscope/benchmarks/general_arena/general_arena_adapter.py +454 -0
- evalscope/benchmarks/general_arena/utils.py +223 -0
- evalscope/benchmarks/general_mcq/__init__.py +0 -0
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
- evalscope/benchmarks/general_qa/general_qa_adapter.py +75 -107
- evalscope/benchmarks/gpqa/__init__.py +0 -0
- evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
- evalscope/benchmarks/gpqa/prompt.py +88 -0
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +77 -144
- evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
- evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
- evalscope/benchmarks/halu_eval/__init__.py +0 -0
- evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
- evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +36 -134
- evalscope/benchmarks/hle/__init__.py +0 -0
- evalscope/benchmarks/hle/hle_adapter.py +153 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +80 -88
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/ifeval_adapter.py +71 -45
- evalscope/benchmarks/ifeval/instructions.py +112 -68
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -58
- evalscope/benchmarks/live_code_bench/__init__.py +0 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +195 -0
- evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +150 -0
- evalscope/benchmarks/live_code_bench/load_utils.py +63 -0
- evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
- evalscope/benchmarks/live_code_bench/prompts.py +207 -0
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/live_code_bench/testing_util.py +544 -0
- evalscope/benchmarks/logi_qa/__int__.py +0 -0
- evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
- evalscope/benchmarks/maritime_bench/__init__.py +0 -0
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
- evalscope/benchmarks/math_500/__init__.py +0 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +55 -0
- evalscope/benchmarks/math_qa/__init__.py +0 -0
- evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
- evalscope/benchmarks/math_verse/__init__.py +0 -0
- evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
- evalscope/benchmarks/math_vision/__init__.py +0 -0
- evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
- evalscope/benchmarks/med_mcqa/__init__.py +0 -0
- evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -210
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +87 -103
- evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
- evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/music_trivia/__init__.py +0 -0
- evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
- evalscope/benchmarks/musr/__init__.py +0 -0
- evalscope/benchmarks/musr/musr_adapter.py +43 -0
- evalscope/benchmarks/needle_haystack/__init__.py +0 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +389 -0
- evalscope/benchmarks/needle_haystack/utils.py +79 -0
- evalscope/benchmarks/ner/__init__.py +0 -0
- evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
- evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
- evalscope/benchmarks/ner/copious_adapter.py +85 -0
- evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
- evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
- evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
- evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
- evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
- evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
- evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
- evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
- evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
- evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
- evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
- evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
- evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
- evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
- evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
- evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
- evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
- evalscope/benchmarks/piqa/__init__.py +0 -0
- evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
- evalscope/benchmarks/poly_math/__init__.py +0 -0
- evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
- evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
- evalscope/benchmarks/pope/__init__.py +0 -0
- evalscope/benchmarks/pope/pope_adapter.py +112 -0
- evalscope/benchmarks/process_bench/__init__.py +0 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +171 -0
- evalscope/benchmarks/pumed_qa/__init__.py +0 -0
- evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
- evalscope/benchmarks/qasc/__init__.py +0 -0
- evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
- evalscope/benchmarks/race/race_adapter.py +33 -120
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/sciq/__init__.py +0 -0
- evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
- evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
- evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
- evalscope/benchmarks/simple_qa/__init__.py +0 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +169 -0
- evalscope/benchmarks/simple_vqa/__init__.py +0 -0
- evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
- evalscope/benchmarks/siqa/__init__.py +0 -0
- evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
- evalscope/benchmarks/super_gpqa/__init__.py +0 -0
- evalscope/benchmarks/super_gpqa/prompt.py +88 -0
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
- evalscope/benchmarks/super_gpqa/utils.py +86 -0
- evalscope/benchmarks/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
- evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
- evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench/tau_bench_adapter.py +168 -0
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/__init__.py +0 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
- evalscope/benchmarks/tool_bench/utils.py +203 -0
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -118
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -270
- evalscope/benchmarks/visu_logic/__init__.py +0 -0
- evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
- evalscope/benchmarks/winogrande/__init__.py +0 -0
- evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
- evalscope/benchmarks/wmt/__init__.py +0 -0
- evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
- evalscope/benchmarks/zerobench/__init__.py +0 -0
- evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +12 -2
- evalscope/cli/start_eval.py +4 -3
- evalscope/cli/start_perf.py +10 -2
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +27 -3
- evalscope/collections/sampler.py +12 -11
- evalscope/collections/schema.py +13 -12
- evalscope/config.py +218 -147
- evalscope/constants.py +78 -82
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +334 -318
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +59 -3
- evalscope/metrics/bert_score/__init__.py +0 -0
- evalscope/metrics/bert_score/scorer.py +338 -0
- evalscope/metrics/bert_score/utils.py +697 -0
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +20 -15
- evalscope/metrics/llm_judge.py +211 -0
- evalscope/metrics/math_parser.py +545 -0
- evalscope/metrics/metric.py +611 -0
- evalscope/metrics/metrics.py +112 -23
- evalscope/metrics/rouge_metric.py +11 -13
- evalscope/metrics/t2v_metrics/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/clipscore.py +14 -0
- evalscope/metrics/t2v_metrics/constants.py +12 -0
- evalscope/metrics/t2v_metrics/itmscore.py +14 -0
- evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +134 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +282 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +115 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +87 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +86 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +85 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +99 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +176 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +82 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +74 -0
- evalscope/metrics/t2v_metrics/models/model.py +45 -0
- evalscope/metrics/t2v_metrics/models/utils.py +25 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +306 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +84 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +223 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +153 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +24 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +190 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +100 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +313 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +192 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +320 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1111 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +457 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +370 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +765 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +274 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +896 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1876 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +83 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +58 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +187 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +179 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +115 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +348 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +870 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +273 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +514 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1291 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +476 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +35 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +393 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +129 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +18 -0
- evalscope/metrics/t2v_metrics/score.py +78 -0
- evalscope/metrics/t2v_metrics/vqascore.py +14 -0
- evalscope/models/__init__.py +23 -13
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +69 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +144 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +708 -0
- evalscope/perf/__init__.py +0 -1
- evalscope/perf/arguments.py +103 -69
- evalscope/perf/benchmark.py +114 -163
- evalscope/perf/http_client.py +59 -89
- evalscope/perf/main.py +91 -18
- evalscope/perf/plugin/__init__.py +3 -2
- evalscope/perf/plugin/api/__init__.py +4 -3
- evalscope/perf/plugin/api/base.py +27 -7
- evalscope/perf/plugin/api/custom_api.py +170 -57
- evalscope/perf/plugin/api/dashscope_api.py +4 -10
- evalscope/perf/plugin/api/default_api.py +214 -0
- evalscope/perf/plugin/api/openai_api.py +120 -41
- evalscope/perf/plugin/datasets/__init__.py +10 -6
- evalscope/perf/plugin/datasets/base.py +43 -1
- evalscope/perf/plugin/datasets/custom.py +22 -3
- evalscope/perf/plugin/datasets/flickr8k.py +5 -27
- evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
- evalscope/perf/plugin/datasets/line_by_line.py +7 -3
- evalscope/perf/plugin/datasets/longalpaca.py +7 -3
- evalscope/perf/plugin/datasets/openqa.py +13 -14
- evalscope/perf/plugin/datasets/random_dataset.py +67 -0
- evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
- evalscope/perf/plugin/datasets/speed_benchmark.py +11 -0
- evalscope/perf/plugin/registry.py +36 -16
- evalscope/perf/utils/analysis_result.py +24 -23
- evalscope/perf/utils/benchmark_util.py +95 -55
- evalscope/perf/utils/db_util.py +115 -78
- evalscope/perf/utils/local_server.py +12 -47
- evalscope/perf/utils/log_utils.py +63 -0
- evalscope/perf/utils/rich_display.py +192 -0
- evalscope/report/__init__.py +46 -3
- evalscope/report/combinator.py +143 -32
- evalscope/report/generator.py +74 -34
- evalscope/report/report.py +238 -0
- evalscope/run.py +71 -46
- evalscope/summarizer.py +5 -5
- evalscope/third_party/longbench_write/infer.py +1 -1
- evalscope/third_party/thinkbench/__init__.py +3 -0
- evalscope/third_party/thinkbench/eval.py +441 -0
- evalscope/third_party/thinkbench/infer.py +130 -0
- evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
- evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
- evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- evalscope/third_party/thinkbench/tools/llm.py +48 -0
- evalscope/third_party/thinkbench/tools/utils.py +13 -0
- evalscope/third_party/toolbench_static/llm/swift_infer.py +46 -20
- evalscope/third_party/toolbench_static/toolbench_static.py +2 -1
- evalscope/utils/__init__.py +82 -2
- evalscope/utils/argument_utils.py +64 -0
- evalscope/utils/chat_service.py +8 -6
- evalscope/utils/deprecation_utils.py +53 -0
- evalscope/utils/function_utils.py +266 -0
- evalscope/utils/import_utils.py +154 -0
- evalscope/utils/io_utils.py +336 -8
- evalscope/utils/json_schema.py +231 -0
- evalscope/utils/logger.py +121 -31
- evalscope/utils/model_utils.py +57 -1
- evalscope/utils/multi_choices.py +303 -0
- evalscope/utils/ner.py +377 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- evalscope-1.2.0.dist-info/METADATA +553 -0
- evalscope-1.2.0.dist-info/RECORD +628 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
- evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -46
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -76
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/ceval/samples.jsonl +0 -1
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -291
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/collections/evaluator.py +0 -198
- evalscope/evaluator/rating_eval.py +0 -157
- evalscope/evaluator/reviewer/__init__.py +0 -1
- evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
- evalscope/metrics/code_metric.py +0 -98
- evalscope/metrics/named_metrics.py +0 -17
- evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
- evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
- evalscope/models/base_adapter.py +0 -52
- evalscope/models/chat_adapter.py +0 -138
- evalscope/models/choice_adapter.py +0 -211
- evalscope/models/custom/__init__.py +0 -3
- evalscope/models/custom/custom_model.py +0 -53
- evalscope/models/custom/dummy_model.py +0 -63
- evalscope/models/custom_adapter.py +0 -67
- evalscope/models/local_model.py +0 -74
- evalscope/models/model.py +0 -229
- evalscope/models/server_adapter.py +0 -111
- evalscope/registry/__init__.py +0 -1
- evalscope/registry/config/cfg_arena.yaml +0 -77
- evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
- evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
- evalscope/registry/config/cfg_single.yaml +0 -78
- evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
- evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
- evalscope/registry/data/qa_browser/battle.jsonl +0 -634
- evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
- evalscope/registry/data/question.jsonl +0 -80
- evalscope/registry/tasks/arc.yaml +0 -28
- evalscope/registry/tasks/bbh.yaml +0 -26
- evalscope/registry/tasks/bbh_mini.yaml +0 -26
- evalscope/registry/tasks/ceval.yaml +0 -27
- evalscope/registry/tasks/ceval_mini.yaml +0 -26
- evalscope/registry/tasks/cmmlu.yaml +0 -27
- evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
- evalscope/registry/tasks/general_qa.yaml +0 -27
- evalscope/registry/tasks/gsm8k.yaml +0 -29
- evalscope/registry/tasks/mmlu.yaml +0 -29
- evalscope/registry/tasks/mmlu_mini.yaml +0 -27
- evalscope/report/app.py +0 -506
- evalscope/report/utils.py +0 -133
- evalscope/run_arena.py +0 -202
- evalscope/utils/arena_utils.py +0 -217
- evalscope/utils/completion_parsers.py +0 -82
- evalscope/utils/utils.py +0 -301
- evalscope-0.10.0.dist-info/METADATA +0 -565
- evalscope-0.10.0.dist-info/RECORD +0 -286
- tests/__init__.py +0 -1
- tests/cli/__init__.py +0 -1
- tests/cli/test_collection.py +0 -57
- tests/cli/test_run.py +0 -165
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -101
- tests/rag/test_clip_benchmark.py +0 -85
- tests/rag/test_mteb.py +0 -138
- tests/rag/test_ragas.py +0 -120
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -145
- tests/swift/test_run_swift_vlm_eval.py +0 -127
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -156
- tests/test_run_all.py +0 -12
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -60
- {tests/rag → evalscope/api}/__init__.py +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
|
File without changes
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
2
|
+
from evalscope.api.dataset import Sample
|
|
3
|
+
from evalscope.api.registry import register_benchmark
|
|
4
|
+
from evalscope.constants import Tags
|
|
5
|
+
from evalscope.utils.multi_choices import MultipleChoiceTemplate
|
|
6
|
+
|
|
7
|
+
DESCRIPTION = 'PIQA addresses the challenging task of reasoning about physical commonsense in natural language.'
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@register_benchmark(
|
|
11
|
+
BenchmarkMeta(
|
|
12
|
+
name='piqa',
|
|
13
|
+
pretty_name='PIQA',
|
|
14
|
+
tags=[Tags.REASONING, Tags.COMMONSENSE, Tags.MULTIPLE_CHOICE],
|
|
15
|
+
description=DESCRIPTION.strip(),
|
|
16
|
+
dataset_id='extraordinarylab/piqa',
|
|
17
|
+
metric_list=['acc'],
|
|
18
|
+
few_shot_num=0,
|
|
19
|
+
train_split='train',
|
|
20
|
+
eval_split='validation',
|
|
21
|
+
prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
|
|
22
|
+
)
|
|
23
|
+
)
|
|
24
|
+
class PIQAAdapter(MultiChoiceAdapter):
|
|
25
|
+
|
|
26
|
+
def record_to_sample(self, record) -> Sample:
|
|
27
|
+
return Sample(
|
|
28
|
+
input=record['question'],
|
|
29
|
+
choices=record['choices'],
|
|
30
|
+
target=record['answer'],
|
|
31
|
+
metadata={},
|
|
32
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Tuple
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.dataset.dataset import DatasetDict, MemoryDataset
|
|
8
|
+
from evalscope.api.metric.scorer import AggScore, SampleScore, Score
|
|
9
|
+
from evalscope.api.registry import register_benchmark
|
|
10
|
+
from evalscope.constants import Tags
|
|
11
|
+
from evalscope.report.report import Report, Subset
|
|
12
|
+
from evalscope.utils.logger import get_logger
|
|
13
|
+
|
|
14
|
+
logger = get_logger()
|
|
15
|
+
|
|
16
|
+
SUBSET_LIST = [
|
|
17
|
+
'en', 'zh', 'ar', 'bn', 'de', 'es', 'fr', 'id', 'it', 'ja', 'ko', 'ms', 'pt', 'ru', 'sw', 'te', 'th', 'vi'
|
|
18
|
+
]
|
|
19
|
+
LEVEL_LIST = ['low', 'medium', 'high', 'top']
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@register_benchmark(
|
|
23
|
+
BenchmarkMeta(
|
|
24
|
+
name='poly_math',
|
|
25
|
+
pretty_name='PolyMath',
|
|
26
|
+
tags=[Tags.MATH, Tags.REASONING, Tags.MULTI_LINGUAL],
|
|
27
|
+
description=
|
|
28
|
+
'PolyMath is a multilingual mathematical reasoning benchmark covering 18 languages and 4 easy-to-hard difficulty levels, with 9,000 high-quality problem samples. Our benchmark ensures difficulty comprehensiveness, language diversity, and high-quality translation, making it a highly discriminative multilingual mathematical benchmark in the era of reasoning LLMs.', # noqa: E501
|
|
29
|
+
dataset_id='evalscope/PolyMath',
|
|
30
|
+
subset_list=SUBSET_LIST,
|
|
31
|
+
metric_list=[{
|
|
32
|
+
'acc': {
|
|
33
|
+
'numeric': True
|
|
34
|
+
}
|
|
35
|
+
}],
|
|
36
|
+
eval_split='test',
|
|
37
|
+
prompt_template='{question}',
|
|
38
|
+
)
|
|
39
|
+
)
|
|
40
|
+
class PolyMathAdapter(DefaultDataAdapter):
|
|
41
|
+
|
|
42
|
+
def __init__(self, *args, **kwargs):
|
|
43
|
+
super().__init__(*args, **kwargs)
|
|
44
|
+
|
|
45
|
+
def load(self) -> Tuple[DatasetDict, None]:
|
|
46
|
+
"""Load all difficulty levels, rename subsets with their level suffix, and merge them."""
|
|
47
|
+
# Need to load all levels to get the full dataset
|
|
48
|
+
dataset_list: List[Dict[str, MemoryDataset]] = []
|
|
49
|
+
original_split = getattr(self, 'eval_split', None)
|
|
50
|
+
try:
|
|
51
|
+
for level in LEVEL_LIST:
|
|
52
|
+
self.eval_split = level
|
|
53
|
+
cur_level_dataset_dict, _ = super().load()
|
|
54
|
+
# Build a renamed mapping without mutating during iteration
|
|
55
|
+
renamed: Dict[str, MemoryDataset] = {
|
|
56
|
+
f'{subset}-{level}': ds
|
|
57
|
+
for subset, ds in cur_level_dataset_dict.items()
|
|
58
|
+
}
|
|
59
|
+
dataset_list.append(renamed)
|
|
60
|
+
finally:
|
|
61
|
+
# Restore original split to avoid side effects
|
|
62
|
+
if original_split is not None:
|
|
63
|
+
self.eval_split = original_split
|
|
64
|
+
# Merge all levels into one dataset
|
|
65
|
+
return DatasetDict.from_dataset_dicts(dataset_list), None
|
|
66
|
+
|
|
67
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
68
|
+
"""Convert a single record into a Sample with language-specific instruction."""
|
|
69
|
+
from .utils.instruction import query_dic
|
|
70
|
+
|
|
71
|
+
# e.g., 'high-en-1'
|
|
72
|
+
question_id = record['id']
|
|
73
|
+
level, language, index = question_id.split('-')
|
|
74
|
+
# Get the instruction for the specific language
|
|
75
|
+
instruction = query_dic[language]
|
|
76
|
+
|
|
77
|
+
return Sample(
|
|
78
|
+
input=record['question'] + '\n' + instruction,
|
|
79
|
+
target=record['answer'],
|
|
80
|
+
metadata={
|
|
81
|
+
'level': level,
|
|
82
|
+
'language': language,
|
|
83
|
+
'index': index,
|
|
84
|
+
},
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
def extract_answer(self, prediction: str, task_state):
|
|
88
|
+
from evalscope.metrics.math_parser import extract_answer
|
|
89
|
+
|
|
90
|
+
return extract_answer(prediction)
|
|
91
|
+
|
|
92
|
+
def _on_generate_report_end(self, report: Report, output_dir, **kwargs):
|
|
93
|
+
"""
|
|
94
|
+
Finalize the report generation process. Calculate the difficulty-weighted accuracy (DW-ACC)
|
|
95
|
+
per language and the overall DW-ACC, and append as a new category to each metric.
|
|
96
|
+
"""
|
|
97
|
+
from evalscope.report import Category, Metric
|
|
98
|
+
|
|
99
|
+
WEIGHT_DENOMINATOR = 15 # 1 + 2 + 4 + 8 for ['low','medium','high','top']
|
|
100
|
+
|
|
101
|
+
for metric in report.metrics:
|
|
102
|
+
# Collect all subsets by name for easy lookup (e.g., "en-low")
|
|
103
|
+
subset_dict: Dict[str, Subset] = {}
|
|
104
|
+
for category in metric.categories:
|
|
105
|
+
for subset in category.subsets:
|
|
106
|
+
subset_dict[subset.name] = subset
|
|
107
|
+
|
|
108
|
+
# Compute per-language DW-ACC
|
|
109
|
+
dw_subsets: List[Subset] = []
|
|
110
|
+
for language in SUBSET_LIST:
|
|
111
|
+
weighted_sum = 0.0
|
|
112
|
+
total_num = 0
|
|
113
|
+
for i, level in enumerate(LEVEL_LIST):
|
|
114
|
+
s = subset_dict.get(f'{language}-{level}')
|
|
115
|
+
if s is not None:
|
|
116
|
+
weighted_sum += (2**i) * s.score
|
|
117
|
+
total_num += s.num
|
|
118
|
+
# missing subsets contribute 0 by design
|
|
119
|
+
if total_num == 0:
|
|
120
|
+
continue
|
|
121
|
+
dw_acc = weighted_sum / WEIGHT_DENOMINATOR
|
|
122
|
+
dw_subsets.append(Subset(name=language, score=dw_acc, num=total_num))
|
|
123
|
+
|
|
124
|
+
# Overall DW-ACC: unweighted average across languages
|
|
125
|
+
if dw_subsets:
|
|
126
|
+
overall_score = sum(s.score for s in dw_subsets) / len(dw_subsets)
|
|
127
|
+
overall_num = sum(s.num for s in dw_subsets)
|
|
128
|
+
dw_subsets.append(Subset(name='OVERALL', score=overall_score, num=overall_num))
|
|
129
|
+
|
|
130
|
+
# Append DW-ACC metric
|
|
131
|
+
if dw_subsets:
|
|
132
|
+
report.metrics.append(Metric(name='DW-ACC', categories=[Category(name='-', subsets=dw_subsets)]))
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# flake8: noqa
|
|
2
|
+
language_dic = {
|
|
3
|
+
'en': 'English',
|
|
4
|
+
'zh': 'Chinese',
|
|
5
|
+
'ar': 'Arabic',
|
|
6
|
+
'bn': 'Bangali',
|
|
7
|
+
'de': 'German',
|
|
8
|
+
'es': 'Spanish',
|
|
9
|
+
'fr': 'French',
|
|
10
|
+
'id': 'Indonesian',
|
|
11
|
+
'it': 'Italian',
|
|
12
|
+
'ja': 'Japanese',
|
|
13
|
+
'ko': 'Korean',
|
|
14
|
+
'ms': 'Malay',
|
|
15
|
+
'pt': 'Potuguese',
|
|
16
|
+
'ru': 'Russian',
|
|
17
|
+
'sw': 'Swahili',
|
|
18
|
+
'te': 'Telugu',
|
|
19
|
+
'th': 'Thai',
|
|
20
|
+
'vi': 'Vietnamese',
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
query_dic = {
|
|
24
|
+
'en': 'Note: Please put the final answer in the $\\boxed\{\}$.',
|
|
25
|
+
'zh': '注意:请将最终答案放在 $\\boxed\{\}$ 中。',
|
|
26
|
+
'ar': 'ملاحظة: يُرجى وضع الإجابة النهائية في $\\boxed\{\}$.',
|
|
27
|
+
'bn': 'বিঃদ্রঃ: অনুগ্রহ করে চূড়ান্ত উত্তরটি $\\boxed\{\}$ এর মধ্যে রাখুন।',
|
|
28
|
+
'de': 'Hinweis: Bitte setzen Sie die endgültige Antwort in $\\boxed\{\}$.',
|
|
29
|
+
'es': 'Nota: Por favor, coloque la respuesta final en el $\\boxed\{\}$.',
|
|
30
|
+
'fr': 'Remarque : Veuillez mettre la réponse finale dans le $\\boxed\{\}$.',
|
|
31
|
+
'id': 'Catatan: Silakan letakkan jawaban akhir di dalam $\\boxed\{\}$.',
|
|
32
|
+
'it': 'Nota: Per favore, metti la risposta finale nel $\\boxed\{\}$.',
|
|
33
|
+
'ja': '注意:最終的な答えを $\\boxed\{\}$ に入れてください。',
|
|
34
|
+
'ko': '참고: 최종 답안을 $\\boxed\{\}$ 안에 넣어 주세요.',
|
|
35
|
+
'ms': 'Nota: Sila letakkan jawapan akhir dalam $\\boxed\{\}$.',
|
|
36
|
+
'pt': 'Nota: Por favor, coloque a resposta final no $\\boxed\{\}$.',
|
|
37
|
+
'ru': 'Примечание: Пожалуйста, поместите окончательный ответ в $\\boxed\{\}$.',
|
|
38
|
+
'sw': 'Kumbuka: Tafadhali weka jibu la mwisho katika $\\boxed\{\}$.',
|
|
39
|
+
'te': 'గమనిక: దయచేసి తుది జవాబును $\\boxed\{\}$ లో ఉంచండి.',
|
|
40
|
+
'th': 'หมายเหตุ: กรุณาใส่คำตอบสุดท้ายใน $\\boxed\{\}$.',
|
|
41
|
+
'vi': 'Lưu ý: Vui lòng đặt câu trả lời cuối cùng trong $\\boxed\{\}$.',
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
language_control = {
|
|
45
|
+
'forcing_raw': {
|
|
46
|
+
'en': 'Use English to think and answer.',
|
|
47
|
+
'zh': '使用中文进行思考和回答。',
|
|
48
|
+
'ar': 'استخدم العربية للتفكير والإجابة.',
|
|
49
|
+
'bn': 'বাংলা ব্যবহার করে চিন্তা এবং উত্তর দিন।',
|
|
50
|
+
'de': 'Verwende Deutsch, um zu denken und zu antworten.',
|
|
51
|
+
'es': 'Usa español para pensar y responder.',
|
|
52
|
+
'fr': 'Utilisez le français pour penser et répondre.',
|
|
53
|
+
'id': 'Gunakan bahasa Indonesia untuk berpikir dan menjawab.',
|
|
54
|
+
'it': 'Usa italiano per pensare e rispondere.',
|
|
55
|
+
'ja': '日本語を使って考え、回答してください。',
|
|
56
|
+
'ko': '한국어로 생각하고 답변하세요.',
|
|
57
|
+
'ms': 'Gunakan bahasa Melayu untuk berfikir dan menjawab.',
|
|
58
|
+
'pt': 'Use português para pensar e responder.',
|
|
59
|
+
'ru': 'Используйте русский язык для размышлений и ответов.',
|
|
60
|
+
'sw': 'Tumia Kiswahili kufikiri na kujibu.',
|
|
61
|
+
'te': 'తెలుగును ఉపయోగించి ఆలోచించి సమాధానం ఇవ్వండి.',
|
|
62
|
+
'th': 'ใช้ภาษาไทยในการคิดและตอบคำถาม.',
|
|
63
|
+
'vi': 'Sử dụng tiếng Việt để suy nghĩ và trả lời.',
|
|
64
|
+
},
|
|
65
|
+
'forcing_en': {
|
|
66
|
+
'en': 'Use English to think and answer.',
|
|
67
|
+
'zh': '使用英文进行思考和回答。',
|
|
68
|
+
'ar': 'استخدم اللغة الإنجليزية للتفكير والإجابة.',
|
|
69
|
+
'bn': 'ইংরেজি ব্যবহার করে চিন্তা এবং উত্তর দিন।',
|
|
70
|
+
'de': 'Verwenden Sie Englisch, um zu denken und zu antworten.',
|
|
71
|
+
'es': 'Usa inglés para pensar y responder.',
|
|
72
|
+
'fr': "Utilisez l'anglais pour penser et répondre.",
|
|
73
|
+
'id': 'Gunakan bahasa Inggris untuk berpikir dan menjawab.',
|
|
74
|
+
'it': 'Usa inglese per pensare e rispondere.',
|
|
75
|
+
'ja': '英語を使って考え、回答してください。',
|
|
76
|
+
'ko': '영어로 생각하고 답변하세요.',
|
|
77
|
+
'ms': 'Gunakan bahasa Inggeris untuk berfikir dan menjawab.',
|
|
78
|
+
'pt': 'Use inglês para pensar e responder.',
|
|
79
|
+
'ru': 'Используйте английский язык, чтобы думать и отвечать.',
|
|
80
|
+
'sw': 'Tumia Kiingereza kufikiri na kujibu.',
|
|
81
|
+
'te': 'ఇంగ్లీష్ను ఉపయోగించి ఆలోచించి ఉత్తరించండి.',
|
|
82
|
+
'th': 'ใช้ภาษาอังกฤษในการคิดและตอบคำถาม.',
|
|
83
|
+
'vi': 'Sử dụng tiếng Anh để suy nghĩ và trả lời.',
|
|
84
|
+
},
|
|
85
|
+
'forcing_prefer': {
|
|
86
|
+
'en': 'Choose the language you are most proficient in to think and answer.',
|
|
87
|
+
'zh': '自选一种你最擅长的语言进行思考和回答。',
|
|
88
|
+
'ar': 'اختر اللغة التي تجيدها أكثر للتفكير والإجابة.',
|
|
89
|
+
'bn': 'আপনি যে ভাষাটি সবচেয়ে পারদর্শী সেটি বেছে নিয়ে চিন্তা এবং উত্তর দিন।',
|
|
90
|
+
'de': 'Wählen Sie die Sprache, in der Sie am kompetentesten sind, um zu denken und zu antworten.',
|
|
91
|
+
'es': 'Elige el idioma en el que eres más competente para pensar y responder.',
|
|
92
|
+
'fr': 'Choisissez la langue dans laquelle vous êtes le plus compétent pour penser et répondre.',
|
|
93
|
+
'id': 'Pilih bahasa yang paling Anda kuasai untuk berpikir dan menjawab.',
|
|
94
|
+
'it': 'Scegli la lingua in cui sei più competente per pensare e rispondere.',
|
|
95
|
+
'ja': '最も得意な言語を選んで考え、回答してください。',
|
|
96
|
+
'ko': '가장 능숙한 언어를 선택하여 생각하고 답변하세요.',
|
|
97
|
+
'ms': 'Pilih bahasa yang paling anda mahir untuk berfikir dan menjawab.',
|
|
98
|
+
'pt': 'Escolha o idioma em que você é mais competente para pensar e responder.',
|
|
99
|
+
'ru': 'Выберите язык, в котором вы наиболее компетентны, чтобы думать и отвечать.',
|
|
100
|
+
'sw': 'Chagua lugha ambayo unamudu zaidi kufikiri na kujibu.',
|
|
101
|
+
'te': 'మీరు అత్యంత స్థిరంగా ఉన్న భాషను స్వీకరించి ఆలోచించిตอบ。',
|
|
102
|
+
'th': 'เลือกภาษาที่คุณมีความสามารถมากที่สุดในการคิดและตอบคำถาม.',
|
|
103
|
+
'vi': 'Chọn ngôn ngữ mà bạn thành thạo nhất để suy nghĩ và trả lời.',
|
|
104
|
+
}
|
|
105
|
+
}
|
|
File without changes
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from typing import Any, Dict, List
|
|
2
|
+
|
|
3
|
+
from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
|
|
4
|
+
from evalscope.api.dataset import Sample
|
|
5
|
+
from evalscope.api.evaluator.state import TaskState
|
|
6
|
+
from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
|
|
7
|
+
from evalscope.api.metric.scorer import AggScore, SampleScore, Score
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
10
|
+
from evalscope.utils.io_utils import bytes_to_base64
|
|
11
|
+
from evalscope.utils.logger import get_logger
|
|
12
|
+
|
|
13
|
+
logger = get_logger()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@register_benchmark(
|
|
17
|
+
BenchmarkMeta(
|
|
18
|
+
name='pope',
|
|
19
|
+
pretty_name='POPE',
|
|
20
|
+
tags=[Tags.MULTI_MODAL, Tags.HALLUCINATION, Tags.YES_NO],
|
|
21
|
+
description=
|
|
22
|
+
'POPE (Polling-based Object Probing Evaluation) is a benchmark designed to evaluate object hallucination in large vision-language models (LVLMs). It tests models by having them answer simple yes/no questions about the presence of specific objects in an image. This method helps measure how accurately a model\'s responses align with the visual content, with a focus on identifying instances where models claim objects exist that are not actually present. The benchmark employs various sampling strategies, including random, popular, and adversarial sampling, to create a robust set of questions for assessment.', # noqa: E501
|
|
23
|
+
dataset_id='lmms-lab/POPE',
|
|
24
|
+
metric_list=['accuracy', 'precision', 'recall', 'f1_score', 'yes_ratio'],
|
|
25
|
+
aggregation='f1',
|
|
26
|
+
subset_list=['popular', 'adversarial', 'random'],
|
|
27
|
+
default_subset='Full',
|
|
28
|
+
prompt_template='{question}\nPlease answer YES or NO without an explanation.',
|
|
29
|
+
)
|
|
30
|
+
)
|
|
31
|
+
class PopeAdapter(VisionLanguageAdapter):
|
|
32
|
+
|
|
33
|
+
def __init__(self, **kwargs):
|
|
34
|
+
super().__init__(**kwargs)
|
|
35
|
+
self.split_as_subset = True
|
|
36
|
+
self.add_overall_metric = False
|
|
37
|
+
|
|
38
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
39
|
+
|
|
40
|
+
input_text = self.prompt_template.format(question=record['question'])
|
|
41
|
+
content_list: List[Content] = [ContentText(text=input_text)]
|
|
42
|
+
image = record.get('image')
|
|
43
|
+
if image:
|
|
44
|
+
image_base64 = bytes_to_base64(image['bytes'], format='png', add_header=True)
|
|
45
|
+
content_list.append(ContentImage(image=image_base64))
|
|
46
|
+
answer = record['answer'].upper() # 'YES' or 'NO'
|
|
47
|
+
return Sample(
|
|
48
|
+
input=[ChatMessageUser(content=content_list)],
|
|
49
|
+
target=answer,
|
|
50
|
+
metadata={
|
|
51
|
+
'id': record.get('id'),
|
|
52
|
+
'answer': answer,
|
|
53
|
+
'category': record.get('category'),
|
|
54
|
+
'question_id': record.get('question_id'),
|
|
55
|
+
}
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
def match_score(self, original_prediction, filtered_prediction, reference, task_state) -> Score:
|
|
59
|
+
score = Score(
|
|
60
|
+
extracted_prediction=filtered_prediction,
|
|
61
|
+
prediction=original_prediction,
|
|
62
|
+
)
|
|
63
|
+
# Check if the reference answer is in the filtered prediction
|
|
64
|
+
result = 1 if reference in filtered_prediction.strip().upper() else 0
|
|
65
|
+
score.value = {'acc': result}
|
|
66
|
+
return score
|
|
67
|
+
|
|
68
|
+
def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
|
|
69
|
+
"""
|
|
70
|
+
Custom aggregation to compute accuracy, precision, recall, f1_score, and yes_ratio.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def compute_metrics(scores: List[SampleScore]):
|
|
74
|
+
tp = fp = tn = fn = 0
|
|
75
|
+
yes_count = 0
|
|
76
|
+
total_count = len(scores)
|
|
77
|
+
|
|
78
|
+
for ss in scores:
|
|
79
|
+
gt = ss.sample_metadata['answer'].strip().upper()
|
|
80
|
+
# Get prediction based on score
|
|
81
|
+
pred = gt if ss.score.main_value == 1 else ('NO' if gt == 'YES' else 'YES')
|
|
82
|
+
if pred == 'YES':
|
|
83
|
+
yes_count += 1
|
|
84
|
+
if pred == 'YES' and gt == 'YES':
|
|
85
|
+
tp += 1
|
|
86
|
+
elif pred == 'YES' and gt == 'NO':
|
|
87
|
+
fp += 1
|
|
88
|
+
elif pred == 'NO' and gt == 'NO':
|
|
89
|
+
tn += 1
|
|
90
|
+
elif pred == 'NO' and gt == 'YES':
|
|
91
|
+
fn += 1
|
|
92
|
+
|
|
93
|
+
accuracy = (tp + tn) / total_count if total_count > 0 else 0.0
|
|
94
|
+
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
|
|
95
|
+
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
|
|
96
|
+
f1_score = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
|
|
97
|
+
yes_ratio = yes_count / total_count if total_count > 0 else 0.0
|
|
98
|
+
|
|
99
|
+
return {
|
|
100
|
+
'accuracy': accuracy,
|
|
101
|
+
'precision': precision,
|
|
102
|
+
'recall': recall,
|
|
103
|
+
'f1_score': f1_score,
|
|
104
|
+
'yes_ratio': yes_ratio
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
overall_metrics = compute_metrics(sample_scores)
|
|
108
|
+
agg_scores = []
|
|
109
|
+
for metric_name, value in overall_metrics.items():
|
|
110
|
+
agg_scores.append(AggScore(metric_name=metric_name, score=value, num=len(sample_scores), metadata={}))
|
|
111
|
+
|
|
112
|
+
return agg_scores
|
|
File without changes
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
# flake8: noqa: E501
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.metric import Score
|
|
9
|
+
from evalscope.api.metric.scorer import AggScore, SampleScore
|
|
10
|
+
from evalscope.api.registry import register_benchmark
|
|
11
|
+
from evalscope.constants import Tags
|
|
12
|
+
from evalscope.utils.logger import get_logger
|
|
13
|
+
|
|
14
|
+
logger = get_logger()
|
|
15
|
+
|
|
16
|
+
CRITIQUE_TEMPLATE = """CThe following is a math problem and a solution (split into paragraphs, enclosed with tags and indexed from 0):
|
|
17
|
+
|
|
18
|
+
[Math Problem]
|
|
19
|
+
|
|
20
|
+
{problem}
|
|
21
|
+
|
|
22
|
+
[Solution]
|
|
23
|
+
|
|
24
|
+
{tagged_response}
|
|
25
|
+
|
|
26
|
+
Your task is to review and critique the solution paragraph by paragraph. Once you identify an error in a paragraph, return the index of the paragraph where the earliest error occurs. Otherwise, return the index of -1 (which typically denotes "not found").
|
|
27
|
+
|
|
28
|
+
Please put your final answer (i.e., the index) in \boxed{{}}.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@register_benchmark(
|
|
33
|
+
BenchmarkMeta(
|
|
34
|
+
name='process_bench',
|
|
35
|
+
pretty_name='ProcessBench',
|
|
36
|
+
tags=[Tags.MATH, Tags.REASONING],
|
|
37
|
+
description=
|
|
38
|
+
'ProcessBench is a benchmark for evaluating AI models on mathematical reasoning tasks. It includes various subsets such as GSM8K, Math, OlympiadBench, and OmniMath, each with its own set of problems that require step-by-step reasoning to arrive at the correct answer.', # noqa: E501
|
|
39
|
+
dataset_id='Qwen/ProcessBench',
|
|
40
|
+
subset_list=['gsm8k', 'math', 'olympiadbench', 'omnimath'],
|
|
41
|
+
metric_list=['error_acc', 'correct_acc', 'simple_f1_score'],
|
|
42
|
+
aggregation='f1',
|
|
43
|
+
eval_split='test',
|
|
44
|
+
prompt_template=CRITIQUE_TEMPLATE
|
|
45
|
+
)
|
|
46
|
+
)
|
|
47
|
+
class ProcessBenchAdapter(DefaultDataAdapter):
|
|
48
|
+
|
|
49
|
+
def __init__(self, **kwargs):
|
|
50
|
+
super().__init__(**kwargs)
|
|
51
|
+
self.split_as_subset = True # Use split as subset
|
|
52
|
+
|
|
53
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
54
|
+
"""
|
|
55
|
+
Convert a data record to a Sample object.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
record (Dict[str, Any]): Input data record.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
Sample: Sample object with input, target, and metadata.
|
|
62
|
+
"""
|
|
63
|
+
problem = record['problem']
|
|
64
|
+
steps = record['steps']
|
|
65
|
+
tagged_response = ''
|
|
66
|
+
for sdx, step in enumerate(steps):
|
|
67
|
+
tagged_response += f'<paragraph_{sdx}>\n{step}\n</paragraph_{sdx}>\n\n'
|
|
68
|
+
tagged_response = tagged_response.strip()
|
|
69
|
+
|
|
70
|
+
return Sample(
|
|
71
|
+
input=problem,
|
|
72
|
+
target=str(record['label']),
|
|
73
|
+
metadata={
|
|
74
|
+
'steps': steps,
|
|
75
|
+
'tagged_response': tagged_response,
|
|
76
|
+
'final_answer_correct': record['final_answer_correct']
|
|
77
|
+
}
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
def format_prompt_template(self, sample):
|
|
81
|
+
"""Format the prompt template with problem and tagged response."""
|
|
82
|
+
problem = sample.input
|
|
83
|
+
tagged_response = sample.metadata['tagged_response']
|
|
84
|
+
return self.prompt_template.format(problem=problem, tagged_response=tagged_response)
|
|
85
|
+
|
|
86
|
+
def extract_answer(self, prediction: str, task_state: TaskState):
|
|
87
|
+
"""Extract the answer from the model prediction."""
|
|
88
|
+
pred = self._extract_answer_from_text(prediction)
|
|
89
|
+
try:
|
|
90
|
+
pred = int(pred)
|
|
91
|
+
except Exception:
|
|
92
|
+
pred = None
|
|
93
|
+
return pred
|
|
94
|
+
|
|
95
|
+
def match_score(
|
|
96
|
+
self, original_prediction: str, filtered_prediction: str, reference: str, task_state: TaskState
|
|
97
|
+
) -> Score:
|
|
98
|
+
"""Calculate evaluation scores by comparing prediction with reference."""
|
|
99
|
+
score = Score(
|
|
100
|
+
extracted_prediction=str(filtered_prediction) if filtered_prediction is not None else None,
|
|
101
|
+
prediction=original_prediction,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Convert filtered_prediction to int if possible
|
|
105
|
+
try:
|
|
106
|
+
pred_int = int(filtered_prediction) if filtered_prediction is not None else None
|
|
107
|
+
except (ValueError, TypeError):
|
|
108
|
+
pred_int = None
|
|
109
|
+
|
|
110
|
+
# Calculate accuracy
|
|
111
|
+
reference = int(reference) if reference is not None else None
|
|
112
|
+
accuracy = 1.0 if reference == pred_int else 0.0
|
|
113
|
+
|
|
114
|
+
# Determine metric name based on label
|
|
115
|
+
if reference == -1:
|
|
116
|
+
metric_name = 'correct_acc'
|
|
117
|
+
else:
|
|
118
|
+
metric_name = 'error_acc'
|
|
119
|
+
|
|
120
|
+
score.value = {metric_name: accuracy}
|
|
121
|
+
score.main_score_name = metric_name
|
|
122
|
+
|
|
123
|
+
return score
|
|
124
|
+
|
|
125
|
+
def aggregate_scores(self, sample_scores: List[SampleScore]) -> List[AggScore]:
|
|
126
|
+
"""Aggregate scores to compute final metrics including F1 score."""
|
|
127
|
+
correct_scores = []
|
|
128
|
+
error_scores = []
|
|
129
|
+
|
|
130
|
+
for sample_score in sample_scores:
|
|
131
|
+
score = sample_score.score
|
|
132
|
+
if 'correct_acc' in score.value:
|
|
133
|
+
correct_scores.append(score.value['correct_acc'])
|
|
134
|
+
elif 'error_acc' in score.value:
|
|
135
|
+
error_scores.append(score.value['error_acc'])
|
|
136
|
+
|
|
137
|
+
agg_list = []
|
|
138
|
+
|
|
139
|
+
if correct_scores:
|
|
140
|
+
agg_list.append(
|
|
141
|
+
AggScore(
|
|
142
|
+
metric_name='correct_acc', score=sum(correct_scores) / len(correct_scores), num=len(correct_scores)
|
|
143
|
+
)
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
if error_scores:
|
|
147
|
+
agg_list.append(
|
|
148
|
+
AggScore(metric_name='error_acc', score=sum(error_scores) / len(error_scores), num=len(error_scores))
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Calculate simple F1 score
|
|
152
|
+
if correct_scores and error_scores:
|
|
153
|
+
from evalscope.metrics import simple_f1_score
|
|
154
|
+
agg_list.append(
|
|
155
|
+
AggScore(
|
|
156
|
+
metric_name='simple_f1_score',
|
|
157
|
+
score=simple_f1_score((correct_scores, error_scores)),
|
|
158
|
+
num=len(correct_scores) + len(error_scores)
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
return agg_list
|
|
163
|
+
|
|
164
|
+
@staticmethod
|
|
165
|
+
def _extract_answer_from_text(solution_text: str):
|
|
166
|
+
"""Extract answer from solution text using boxed pattern."""
|
|
167
|
+
boxed_pattern = r'\\boxed\{([^}]*)\}'
|
|
168
|
+
matches = re.findall(boxed_pattern, solution_text)
|
|
169
|
+
if matches:
|
|
170
|
+
return matches[-1].strip()
|
|
171
|
+
return None
|
|
File without changes
|