evalscope 0.10.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +4 -1
- evalscope/api/benchmark/__init__.py +11 -0
- evalscope/api/benchmark/adapters/__init__.py +7 -0
- evalscope/api/benchmark/adapters/agent_adapter.py +8 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +754 -0
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +86 -0
- evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +157 -0
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +404 -0
- evalscope/api/benchmark/meta.py +124 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +370 -0
- evalscope/api/dataset/loader.py +266 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +382 -0
- evalscope/api/evaluator/evaluator.py +61 -0
- evalscope/api/evaluator/state.py +280 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +12 -0
- evalscope/api/messages/chat_message.py +248 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +60 -0
- evalscope/api/metric/scorer.py +113 -0
- evalscope/api/mixin/__init__.py +2 -0
- evalscope/api/mixin/llm_judge_mixin.py +170 -0
- evalscope/api/mixin/sandbox_mixin.py +182 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +161 -0
- evalscope/api/model/model.py +386 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/__init__.py +28 -0
- evalscope/app/app.py +38 -0
- evalscope/app/arguments.py +11 -0
- evalscope/app/constants.py +22 -0
- evalscope/app/ui/__init__.py +20 -0
- evalscope/app/ui/app_ui.py +53 -0
- evalscope/app/ui/multi_model.py +353 -0
- evalscope/app/ui/sidebar.py +42 -0
- evalscope/app/ui/single_model.py +220 -0
- evalscope/app/ui/visualization.py +36 -0
- evalscope/app/utils/data_utils.py +195 -0
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/localization.py +221 -0
- evalscope/app/utils/text_utils.py +119 -0
- evalscope/app/utils/visualization.py +96 -0
- evalscope/arguments.py +32 -9
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +10 -7
- evalscope/backend/rag_eval/__init__.py +1 -1
- evalscope/backend/rag_eval/backend_manager.py +23 -6
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +33 -21
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/cmteb/arguments.py +14 -1
- evalscope/backend/rag_eval/cmteb/task_template.py +19 -3
- evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +1 -1
- evalscope/backend/rag_eval/ragas/arguments.py +0 -1
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +9 -3
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -6
- evalscope/backend/rag_eval/utils/embedding.py +125 -32
- evalscope/backend/rag_eval/utils/llm.py +16 -16
- evalscope/backend/vlm_eval_kit/backend_manager.py +8 -3
- evalscope/benchmarks/__init__.py +17 -5
- evalscope/benchmarks/aa_lcr/__init__.py +0 -0
- evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
- evalscope/benchmarks/ai2d/__init__.py +0 -0
- evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
- evalscope/benchmarks/aime/__init__.py +0 -0
- evalscope/benchmarks/aime/aime24_adapter.py +55 -0
- evalscope/benchmarks/aime/aime25_adapter.py +181 -0
- evalscope/benchmarks/aime/grader.py +307 -0
- evalscope/{metrics/math_accuracy.py → benchmarks/aime/math_normalize.py} +61 -72
- evalscope/benchmarks/alpaca_eval/__init__.py +0 -0
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +133 -0
- evalscope/benchmarks/amc/__init__.py +0 -0
- evalscope/benchmarks/amc/amc_adapter.py +51 -0
- evalscope/benchmarks/arc/arc_adapter.py +34 -149
- evalscope/benchmarks/arena_hard/__init__.py +0 -0
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +149 -0
- evalscope/benchmarks/arena_hard/utils.py +186 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +117 -157
- evalscope/benchmarks/bfcl/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v3/bfcl_v3_adapter.py +370 -0
- evalscope/benchmarks/bfcl/v3/generation.py +222 -0
- evalscope/benchmarks/bfcl/v3/utils.py +23 -0
- evalscope/benchmarks/bfcl/v4/__init__.py +0 -0
- evalscope/benchmarks/bfcl/v4/bfcl_v4_adapter.py +229 -0
- evalscope/benchmarks/bfcl/v4/utils.py +410 -0
- evalscope/benchmarks/biomix_qa/__init__.py +0 -0
- evalscope/benchmarks/biomix_qa/biomix_qa_adapter.py +36 -0
- evalscope/benchmarks/blink/__init__.py +0 -0
- evalscope/benchmarks/blink/blink_adapter.py +61 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +93 -174
- evalscope/benchmarks/chartqa/__init__.py +0 -0
- evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
- evalscope/benchmarks/chartqa/utils.py +38 -0
- evalscope/benchmarks/chinese_simple_qa/__init__.py +0 -0
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +170 -0
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -140
- evalscope/benchmarks/coin_flip/__init__.py +0 -0
- evalscope/benchmarks/coin_flip/coin_flip_adapter.py +128 -0
- evalscope/benchmarks/commonsense_qa/__init__.py +0 -0
- evalscope/benchmarks/commonsense_qa/commonsense_qa_adapter.py +32 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +64 -112
- evalscope/benchmarks/data_collection/__init__.py +0 -0
- evalscope/benchmarks/data_collection/data_collection_adapter.py +215 -0
- evalscope/benchmarks/docmath/__init__.py +0 -0
- evalscope/benchmarks/docmath/docmath_adapter.py +143 -0
- evalscope/benchmarks/docmath/utils.py +219 -0
- evalscope/benchmarks/docvqa/__init__.py +0 -0
- evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
- evalscope/benchmarks/drivelology/__init__.py +0 -0
- evalscope/benchmarks/drivelology/drivelology_binary_adapter.py +170 -0
- evalscope/benchmarks/drivelology/drivelology_multilabel_adapter.py +254 -0
- evalscope/benchmarks/drivelology/drivelology_selection_adapter.py +49 -0
- evalscope/benchmarks/drivelology/drivelology_writing_adapter.py +218 -0
- evalscope/benchmarks/drop/__init__.py +0 -0
- evalscope/benchmarks/drop/drop_adapter.py +155 -0
- evalscope/benchmarks/drop/utils.py +156 -0
- evalscope/benchmarks/frames/__init__.py +0 -0
- evalscope/benchmarks/frames/frames_adapter.py +175 -0
- evalscope/benchmarks/frames/utils.py +37 -0
- evalscope/benchmarks/general_arena/__init__.py +0 -0
- evalscope/benchmarks/general_arena/general_arena_adapter.py +454 -0
- evalscope/benchmarks/general_arena/utils.py +223 -0
- evalscope/benchmarks/general_mcq/__init__.py +0 -0
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +58 -0
- evalscope/benchmarks/general_qa/general_qa_adapter.py +75 -107
- evalscope/benchmarks/gpqa/__init__.py +0 -0
- evalscope/benchmarks/gpqa/gpqa_adapter.py +90 -0
- evalscope/benchmarks/gpqa/prompt.py +88 -0
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +77 -144
- evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
- evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +159 -0
- evalscope/benchmarks/halu_eval/__init__.py +0 -0
- evalscope/benchmarks/halu_eval/halu_eval_adapter.py +128 -0
- evalscope/benchmarks/halu_eval/halu_eval_instructions.py +84 -0
- evalscope/benchmarks/healthbench/__init__.py +0 -0
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +36 -134
- evalscope/benchmarks/hle/__init__.py +0 -0
- evalscope/benchmarks/hle/hle_adapter.py +153 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +80 -88
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/ifeval_adapter.py +71 -45
- evalscope/benchmarks/ifeval/instructions.py +112 -68
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/infovqa/__init__.py +0 -0
- evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -58
- evalscope/benchmarks/live_code_bench/__init__.py +0 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +195 -0
- evalscope/benchmarks/live_code_bench/extract_utils.py +70 -0
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +150 -0
- evalscope/benchmarks/live_code_bench/load_utils.py +63 -0
- evalscope/benchmarks/live_code_bench/pass_k_utils.py +56 -0
- evalscope/benchmarks/live_code_bench/prompts.py +207 -0
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/live_code_bench/testing_util.py +544 -0
- evalscope/benchmarks/logi_qa/__int__.py +0 -0
- evalscope/benchmarks/logi_qa/logi_qa_adapter.py +41 -0
- evalscope/benchmarks/maritime_bench/__init__.py +0 -0
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +56 -0
- evalscope/benchmarks/math_500/__init__.py +0 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +55 -0
- evalscope/benchmarks/math_qa/__init__.py +0 -0
- evalscope/benchmarks/math_qa/math_qa_adapter.py +35 -0
- evalscope/benchmarks/math_verse/__init__.py +0 -0
- evalscope/benchmarks/math_verse/math_verse_adapter.py +105 -0
- evalscope/benchmarks/math_vision/__init__.py +0 -0
- evalscope/benchmarks/math_vision/math_vision_adapter.py +116 -0
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +114 -0
- evalscope/benchmarks/med_mcqa/__init__.py +0 -0
- evalscope/benchmarks/med_mcqa/med_mcqa_adapter.py +32 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +53 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -210
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +87 -103
- evalscope/benchmarks/mmlu_redux/__init__.py +0 -0
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +139 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/mri_mcqa/__init__.py +0 -0
- evalscope/benchmarks/mri_mcqa/mri_mcqa_adapter.py +34 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/music_trivia/__init__.py +0 -0
- evalscope/benchmarks/music_trivia/music_trivia_adapter.py +36 -0
- evalscope/benchmarks/musr/__init__.py +0 -0
- evalscope/benchmarks/musr/musr_adapter.py +43 -0
- evalscope/benchmarks/needle_haystack/__init__.py +0 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +389 -0
- evalscope/benchmarks/needle_haystack/utils.py +79 -0
- evalscope/benchmarks/ner/__init__.py +0 -0
- evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
- evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
- evalscope/benchmarks/ner/copious_adapter.py +85 -0
- evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
- evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
- evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
- evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
- evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
- evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
- evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
- evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
- evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
- evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
- evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
- evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
- evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
- evalscope/benchmarks/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench/ocr_bench_adapter.py +101 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/IoUscore_metric.py +87 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/TEDS_metric.py +963 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/page_ocr_metric.py +50 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/parallel.py +46 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/__init__.py +0 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/readme.txt +26 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_eval/script.py +481 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/spotting_metric.py +179 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/utils.py +433 -0
- evalscope/benchmarks/ocr_bench/ocr_bench_v2/vqa_metric.py +254 -0
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
- evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
- evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
- evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
- evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
- evalscope/benchmarks/piqa/__init__.py +0 -0
- evalscope/benchmarks/piqa/piqa_adapter.py +32 -0
- evalscope/benchmarks/poly_math/__init__.py +0 -0
- evalscope/benchmarks/poly_math/poly_math_adapter.py +132 -0
- evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
- evalscope/benchmarks/pope/__init__.py +0 -0
- evalscope/benchmarks/pope/pope_adapter.py +112 -0
- evalscope/benchmarks/process_bench/__init__.py +0 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +171 -0
- evalscope/benchmarks/pumed_qa/__init__.py +0 -0
- evalscope/benchmarks/pumed_qa/pubmed_qa_adapter.py +175 -0
- evalscope/benchmarks/qasc/__init__.py +0 -0
- evalscope/benchmarks/qasc/qasc_adapter.py +35 -0
- evalscope/benchmarks/race/race_adapter.py +33 -120
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/sciq/__init__.py +0 -0
- evalscope/benchmarks/sciq/sciq_adapter.py +36 -0
- evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
- evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
- evalscope/benchmarks/simple_qa/__init__.py +0 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +169 -0
- evalscope/benchmarks/simple_vqa/__init__.py +0 -0
- evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
- evalscope/benchmarks/siqa/__init__.py +0 -0
- evalscope/benchmarks/siqa/siqa_adapter.py +39 -0
- evalscope/benchmarks/super_gpqa/__init__.py +0 -0
- evalscope/benchmarks/super_gpqa/prompt.py +88 -0
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +165 -0
- evalscope/benchmarks/super_gpqa/utils.py +86 -0
- evalscope/benchmarks/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau2_bench/generation.py +158 -0
- evalscope/benchmarks/tau_bench/tau2_bench/tau2_bench_adapter.py +146 -0
- evalscope/benchmarks/tau_bench/tau_bench/__init__.py +0 -0
- evalscope/benchmarks/tau_bench/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench/tau_bench_adapter.py +168 -0
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/text2image/evalmuse_adapter.py +78 -0
- evalscope/benchmarks/text2image/genai_bench_adapter.py +53 -0
- evalscope/benchmarks/text2image/general_t2i_adapter.py +42 -0
- evalscope/benchmarks/text2image/hpdv2_adapter.py +52 -0
- evalscope/benchmarks/text2image/tifa_adapter.py +27 -0
- evalscope/benchmarks/tool_bench/__init__.py +0 -0
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +102 -0
- evalscope/benchmarks/tool_bench/utils.py +203 -0
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -118
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -270
- evalscope/benchmarks/visu_logic/__init__.py +0 -0
- evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
- evalscope/benchmarks/winogrande/__init__.py +0 -0
- evalscope/benchmarks/winogrande/winogrande_adapter.py +34 -0
- evalscope/benchmarks/wmt/__init__.py +0 -0
- evalscope/benchmarks/wmt/wmt24_adapter.py +294 -0
- evalscope/benchmarks/zerobench/__init__.py +0 -0
- evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_app.py +12 -2
- evalscope/cli/start_eval.py +4 -3
- evalscope/cli/start_perf.py +10 -2
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +27 -3
- evalscope/collections/sampler.py +12 -11
- evalscope/collections/schema.py +13 -12
- evalscope/config.py +218 -147
- evalscope/constants.py +78 -82
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +334 -318
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +59 -3
- evalscope/metrics/bert_score/__init__.py +0 -0
- evalscope/metrics/bert_score/scorer.py +338 -0
- evalscope/metrics/bert_score/utils.py +697 -0
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +20 -15
- evalscope/metrics/llm_judge.py +211 -0
- evalscope/metrics/math_parser.py +545 -0
- evalscope/metrics/metric.py +611 -0
- evalscope/metrics/metrics.py +112 -23
- evalscope/metrics/rouge_metric.py +11 -13
- evalscope/metrics/t2v_metrics/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/clipscore.py +14 -0
- evalscope/metrics/t2v_metrics/constants.py +12 -0
- evalscope/metrics/t2v_metrics/itmscore.py +14 -0
- evalscope/metrics/t2v_metrics/models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/__init__.py +30 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/base_model.py +6 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +134 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +282 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +115 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +87 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +86 -0
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +62 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +85 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +99 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +176 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +82 -0
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +74 -0
- evalscope/metrics/t2v_metrics/models/model.py +45 -0
- evalscope/metrics/t2v_metrics/models/utils.py +25 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/__init__.py +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/__init__.py +1 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +306 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/builder.py +12 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +84 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_projector/builder.py +50 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +223 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +153 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/__init__.py +26 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +465 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +141 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +24 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +190 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +100 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +313 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +416 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +8 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +192 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +320 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/default.yaml +10 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_caption_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_coco.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_flant5xxl.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain.yaml +36 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_no_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_iter_80k_total_100k_prefix.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xl_vitL.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_flant5xxl.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt2.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_opt6.7b.yaml +42 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_pretrain_vitL.yaml +37 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna13b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/blip2/blip2_vicuna7b.yaml +43 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_config_albef.json +22 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/configs/models/med_large_config.json +21 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/base_model.py +231 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +1111 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/__init__.py +0 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2.py +211 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_image_text_matching.py +109 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +457 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +370 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +765 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +274 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +896 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +1876 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +83 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +58 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_caption.py +212 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_classification.py +164 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_feature_extractor.py +202 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +187 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +179 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +115 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_pretrain.py +371 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +348 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +870 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +273 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +514 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +1291 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +476 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +35 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/base_processor.py +27 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/blip_processors.py +233 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +393 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +129 -0
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +18 -0
- evalscope/metrics/t2v_metrics/score.py +78 -0
- evalscope/metrics/t2v_metrics/vqascore.py +14 -0
- evalscope/models/__init__.py +23 -13
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +69 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +144 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +708 -0
- evalscope/perf/__init__.py +0 -1
- evalscope/perf/arguments.py +103 -69
- evalscope/perf/benchmark.py +114 -163
- evalscope/perf/http_client.py +59 -89
- evalscope/perf/main.py +91 -18
- evalscope/perf/plugin/__init__.py +3 -2
- evalscope/perf/plugin/api/__init__.py +4 -3
- evalscope/perf/plugin/api/base.py +27 -7
- evalscope/perf/plugin/api/custom_api.py +170 -57
- evalscope/perf/plugin/api/dashscope_api.py +4 -10
- evalscope/perf/plugin/api/default_api.py +214 -0
- evalscope/perf/plugin/api/openai_api.py +120 -41
- evalscope/perf/plugin/datasets/__init__.py +10 -6
- evalscope/perf/plugin/datasets/base.py +43 -1
- evalscope/perf/plugin/datasets/custom.py +22 -3
- evalscope/perf/plugin/datasets/flickr8k.py +5 -27
- evalscope/perf/plugin/datasets/kontext_bench.py +28 -0
- evalscope/perf/plugin/datasets/line_by_line.py +7 -3
- evalscope/perf/plugin/datasets/longalpaca.py +7 -3
- evalscope/perf/plugin/datasets/openqa.py +13 -14
- evalscope/perf/plugin/datasets/random_dataset.py +67 -0
- evalscope/perf/plugin/datasets/random_vl_dataset.py +80 -0
- evalscope/perf/plugin/datasets/speed_benchmark.py +11 -0
- evalscope/perf/plugin/registry.py +36 -16
- evalscope/perf/utils/analysis_result.py +24 -23
- evalscope/perf/utils/benchmark_util.py +95 -55
- evalscope/perf/utils/db_util.py +115 -78
- evalscope/perf/utils/local_server.py +12 -47
- evalscope/perf/utils/log_utils.py +63 -0
- evalscope/perf/utils/rich_display.py +192 -0
- evalscope/report/__init__.py +46 -3
- evalscope/report/combinator.py +143 -32
- evalscope/report/generator.py +74 -34
- evalscope/report/report.py +238 -0
- evalscope/run.py +71 -46
- evalscope/summarizer.py +5 -5
- evalscope/third_party/longbench_write/infer.py +1 -1
- evalscope/third_party/thinkbench/__init__.py +3 -0
- evalscope/third_party/thinkbench/eval.py +441 -0
- evalscope/third_party/thinkbench/infer.py +130 -0
- evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
- evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
- evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- evalscope/third_party/thinkbench/tools/llm.py +48 -0
- evalscope/third_party/thinkbench/tools/utils.py +13 -0
- evalscope/third_party/toolbench_static/llm/swift_infer.py +46 -20
- evalscope/third_party/toolbench_static/toolbench_static.py +2 -1
- evalscope/utils/__init__.py +82 -2
- evalscope/utils/argument_utils.py +64 -0
- evalscope/utils/chat_service.py +8 -6
- evalscope/utils/deprecation_utils.py +53 -0
- evalscope/utils/function_utils.py +266 -0
- evalscope/utils/import_utils.py +154 -0
- evalscope/utils/io_utils.py +336 -8
- evalscope/utils/json_schema.py +231 -0
- evalscope/utils/logger.py +121 -31
- evalscope/utils/model_utils.py +57 -1
- evalscope/utils/multi_choices.py +303 -0
- evalscope/utils/ner.py +377 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- evalscope-1.2.0.dist-info/METADATA +553 -0
- evalscope-1.2.0.dist-info/RECORD +628 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/WHEEL +1 -1
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/top_level.txt +0 -1
- evalscope/backend/vlm_eval_kit/custom_dataset.py +0 -46
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -76
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/ceval/samples.jsonl +0 -1
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -291
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/collections/evaluator.py +0 -198
- evalscope/evaluator/rating_eval.py +0 -157
- evalscope/evaluator/reviewer/__init__.py +0 -1
- evalscope/evaluator/reviewer/auto_reviewer.py +0 -391
- evalscope/metrics/code_metric.py +0 -98
- evalscope/metrics/named_metrics.py +0 -17
- evalscope/metrics/resources/gpt2-zhcn3-v4.bpe +0 -58485
- evalscope/metrics/resources/gpt2-zhcn3-v4.json +0 -1
- evalscope/models/base_adapter.py +0 -52
- evalscope/models/chat_adapter.py +0 -138
- evalscope/models/choice_adapter.py +0 -211
- evalscope/models/custom/__init__.py +0 -3
- evalscope/models/custom/custom_model.py +0 -53
- evalscope/models/custom/dummy_model.py +0 -63
- evalscope/models/custom_adapter.py +0 -67
- evalscope/models/local_model.py +0 -74
- evalscope/models/model.py +0 -229
- evalscope/models/server_adapter.py +0 -111
- evalscope/registry/__init__.py +0 -1
- evalscope/registry/config/cfg_arena.yaml +0 -77
- evalscope/registry/config/cfg_arena_zhihu.yaml +0 -63
- evalscope/registry/config/cfg_pairwise_baseline.yaml +0 -83
- evalscope/registry/config/cfg_single.yaml +0 -78
- evalscope/registry/data/prompt_template/lmsys_v2.jsonl +0 -8
- evalscope/registry/data/prompt_template/prompt_templates.jsonl +0 -8
- evalscope/registry/data/qa_browser/battle.jsonl +0 -634
- evalscope/registry/data/qa_browser/category_mapping.yaml +0 -10
- evalscope/registry/data/question.jsonl +0 -80
- evalscope/registry/tasks/arc.yaml +0 -28
- evalscope/registry/tasks/bbh.yaml +0 -26
- evalscope/registry/tasks/bbh_mini.yaml +0 -26
- evalscope/registry/tasks/ceval.yaml +0 -27
- evalscope/registry/tasks/ceval_mini.yaml +0 -26
- evalscope/registry/tasks/cmmlu.yaml +0 -27
- evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +0 -28
- evalscope/registry/tasks/general_qa.yaml +0 -27
- evalscope/registry/tasks/gsm8k.yaml +0 -29
- evalscope/registry/tasks/mmlu.yaml +0 -29
- evalscope/registry/tasks/mmlu_mini.yaml +0 -27
- evalscope/report/app.py +0 -506
- evalscope/report/utils.py +0 -133
- evalscope/run_arena.py +0 -202
- evalscope/utils/arena_utils.py +0 -217
- evalscope/utils/completion_parsers.py +0 -82
- evalscope/utils/utils.py +0 -301
- evalscope-0.10.0.dist-info/METADATA +0 -565
- evalscope-0.10.0.dist-info/RECORD +0 -286
- tests/__init__.py +0 -1
- tests/cli/__init__.py +0 -1
- tests/cli/test_collection.py +0 -57
- tests/cli/test_run.py +0 -165
- tests/perf/__init__.py +0 -1
- tests/perf/test_perf.py +0 -101
- tests/rag/test_clip_benchmark.py +0 -85
- tests/rag/test_mteb.py +0 -138
- tests/rag/test_ragas.py +0 -120
- tests/swift/__init__.py +0 -1
- tests/swift/test_run_swift_eval.py +0 -145
- tests/swift/test_run_swift_vlm_eval.py +0 -127
- tests/swift/test_run_swift_vlm_jugde_eval.py +0 -156
- tests/test_run_all.py +0 -12
- tests/vlm/__init__.py +0 -1
- tests/vlm/test_vlmeval.py +0 -60
- {tests/rag → evalscope/api}/__init__.py +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.10.0.dist-info → evalscope-1.2.0.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.metric import Score
|
|
9
|
+
from evalscope.api.registry import register_benchmark
|
|
10
|
+
from evalscope.constants import Tags
|
|
11
|
+
from evalscope.utils.logger import get_logger
|
|
12
|
+
|
|
13
|
+
logger = get_logger()
|
|
14
|
+
|
|
15
|
+
GRADER_TEMPLATE = """
|
|
16
|
+
Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
|
|
17
|
+
First, I will give examples of each grade, and then you will grade a new example.
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
The following are examples of CORRECT predicted answers.
|
|
21
|
+
```
|
|
22
|
+
Question: What are the names of Barack Obama's children?
|
|
23
|
+
Gold target: Malia Obama and Sasha Obama
|
|
24
|
+
Predicted answer 1: sasha and malia obama
|
|
25
|
+
Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
|
|
26
|
+
Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
|
|
27
|
+
```
|
|
28
|
+
These predicted answers are all CORRECT because:
|
|
29
|
+
- They fully contain the important information in the gold target.
|
|
30
|
+
- They do not contain any information that contradicts the gold target.
|
|
31
|
+
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
|
|
32
|
+
- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
The following are examples of INCORRECT predicted answers.
|
|
36
|
+
```
|
|
37
|
+
Question: What are the names of Barack Obama's children?
|
|
38
|
+
Gold target: Malia and Sasha
|
|
39
|
+
Predicted answer 1: Malia.
|
|
40
|
+
Predicted answer 2: Malia, Sasha, and Susan.
|
|
41
|
+
Predicted answer 3: Barack Obama does not have any children.
|
|
42
|
+
Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
|
|
43
|
+
Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
|
|
44
|
+
Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
|
|
45
|
+
Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
|
|
46
|
+
```
|
|
47
|
+
These predicted answers are all INCORRECT because:
|
|
48
|
+
- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
The following are examples of NOT_ATTEMPTED predicted answers.
|
|
52
|
+
```
|
|
53
|
+
Question: What are the names of Barack Obama's children?
|
|
54
|
+
Gold target: Malia and Sasha
|
|
55
|
+
Predicted answer 1: I don't know.
|
|
56
|
+
Predicted answer 2: I need more context about which Obama you are talking about.
|
|
57
|
+
Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
|
|
58
|
+
Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
|
|
59
|
+
```
|
|
60
|
+
These predicted answers are all NOT_ATTEMPTED because:
|
|
61
|
+
- The important information in the gold target is not included in the answer.
|
|
62
|
+
- No statements in the answer contradict the gold target.
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
Also note the following things:
|
|
66
|
+
- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
|
|
67
|
+
- Predicted answers "120k", "124k", and 115k" are all CORRECT.
|
|
68
|
+
- Predicted answers "100k" and "113k" are INCORRECT.
|
|
69
|
+
- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
|
|
70
|
+
- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
|
|
71
|
+
- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
|
|
72
|
+
- Do not punish predicted answers if they omit information that would be clearly inferred from the question.
|
|
73
|
+
- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
|
|
74
|
+
- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
|
|
75
|
+
- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
|
|
76
|
+
- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
|
|
77
|
+
- Do not punish for typos in people's name if it's clearly the same name.
|
|
78
|
+
- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT_ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
82
|
+
```
|
|
83
|
+
Question: {question}
|
|
84
|
+
Gold target: {target}
|
|
85
|
+
Predicted answer: {predicted_answer}
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
Grade the predicted answer of this new question as one of:
|
|
89
|
+
A: CORRECT
|
|
90
|
+
B: INCORRECT
|
|
91
|
+
C: NOT_ATTEMPTED
|
|
92
|
+
|
|
93
|
+
Just return the letters "A", "B", or "C", with no text around it.
|
|
94
|
+
""".strip() # noqa: E501
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@register_benchmark(
|
|
98
|
+
BenchmarkMeta(
|
|
99
|
+
name='simple_qa',
|
|
100
|
+
pretty_name='SimpleQA',
|
|
101
|
+
tags=[Tags.KNOWLEDGE, Tags.QA],
|
|
102
|
+
description=
|
|
103
|
+
'SimpleQA is a benchmark designed to evaluate the performance of language models on simple question-answering tasks. It includes a set of straightforward questions that require basic reasoning and understanding capabilities.', # noqa: E501
|
|
104
|
+
dataset_id='evalscope/SimpleQA',
|
|
105
|
+
metric_list=['is_correct', 'is_incorrect', 'is_not_attempted'],
|
|
106
|
+
few_shot_num=0,
|
|
107
|
+
train_split=None,
|
|
108
|
+
eval_split='test',
|
|
109
|
+
prompt_template='Answer the question:\n\n{question}'
|
|
110
|
+
)
|
|
111
|
+
)
|
|
112
|
+
class SimpleQAAdapter(DefaultDataAdapter):
|
|
113
|
+
|
|
114
|
+
def __init__(self, *args, **kwargs):
|
|
115
|
+
super().__init__(*args, **kwargs)
|
|
116
|
+
|
|
117
|
+
self._use_llm_judge = True # Use LLM as a judge by default
|
|
118
|
+
|
|
119
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
120
|
+
"""
|
|
121
|
+
Convert a data record to a Sample object.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
record (Dict[str, Any]): Input data record.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Sample: Sample object with input, target, and metadata.
|
|
128
|
+
"""
|
|
129
|
+
question = record['problem']
|
|
130
|
+
answer = record['answer']
|
|
131
|
+
metadata = record.get('metadata')
|
|
132
|
+
|
|
133
|
+
return Sample(input=question, target=answer, metadata=ast.literal_eval(metadata))
|
|
134
|
+
|
|
135
|
+
def llm_match_score(
|
|
136
|
+
self,
|
|
137
|
+
original_prediction: str,
|
|
138
|
+
filtered_prediction: str,
|
|
139
|
+
reference: str,
|
|
140
|
+
task_state: TaskState,
|
|
141
|
+
) -> Score:
|
|
142
|
+
score = Score(
|
|
143
|
+
extracted_prediction=filtered_prediction,
|
|
144
|
+
prediction=original_prediction,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
question = task_state.input_text
|
|
148
|
+
|
|
149
|
+
# Request judge and obtain score
|
|
150
|
+
prompt = GRADER_TEMPLATE.format(question=question, target=reference, predicted_answer=filtered_prediction)
|
|
151
|
+
judge_response = self.llm_judge.judge(prompt)
|
|
152
|
+
# parse grading response
|
|
153
|
+
match = re.search(r'(A|B|C)', judge_response)
|
|
154
|
+
res = match.group(0) if match else 'C'
|
|
155
|
+
|
|
156
|
+
# Set score based on the match result
|
|
157
|
+
score.value = {
|
|
158
|
+
'is_correct': 1 if res == 'A' else 0,
|
|
159
|
+
'is_incorrect': 1 if res == 'B' else 0,
|
|
160
|
+
'is_not_attempted': 1 if res == 'C' else 0,
|
|
161
|
+
}
|
|
162
|
+
score.explanation = f'LLM judge: {judge_response}'
|
|
163
|
+
score.metadata = {
|
|
164
|
+
'source': 'llm_judge',
|
|
165
|
+
'judge_strategy': self.judge_strategy,
|
|
166
|
+
'model': self.llm_judge.model_id
|
|
167
|
+
}
|
|
168
|
+
score.main_score_name = 'is_correct'
|
|
169
|
+
return score
|
|
File without changes
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# flake8: noqa: E501
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, VisionLanguageAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.messages import ChatMessageUser, Content, ContentImage, ContentText
|
|
9
|
+
from evalscope.api.metric.scorer import Score
|
|
10
|
+
from evalscope.api.registry import register_benchmark
|
|
11
|
+
from evalscope.constants import Tags
|
|
12
|
+
from evalscope.utils.logger import get_logger
|
|
13
|
+
|
|
14
|
+
logger = get_logger()
|
|
15
|
+
|
|
16
|
+
GRADER_TEMPLATE = """
|
|
17
|
+
Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
|
|
18
|
+
First, I will give examples of each grade, and then you will grade a new example.
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
The following are examples of CORRECT predicted answers.
|
|
22
|
+
```
|
|
23
|
+
Question: What are the names of Barack Obama's children?
|
|
24
|
+
Gold target: Malia Obama and Sasha Obama
|
|
25
|
+
Predicted answer 1: sasha and malia obama
|
|
26
|
+
Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
|
|
27
|
+
Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
|
|
28
|
+
```
|
|
29
|
+
These predicted answers are all CORRECT because:
|
|
30
|
+
- They fully contain the important information in the gold target.
|
|
31
|
+
- They do not contain any information that contradicts the gold target.
|
|
32
|
+
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
|
|
33
|
+
- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
The following are examples of INCORRECT predicted answers.
|
|
37
|
+
```
|
|
38
|
+
Question: What are the names of Barack Obama's children?
|
|
39
|
+
Gold target: Malia and Sasha
|
|
40
|
+
Predicted answer 1: Malia.
|
|
41
|
+
Predicted answer 2: Malia, Sasha, and Susan.
|
|
42
|
+
Predicted answer 3: Barack Obama does not have any children.
|
|
43
|
+
Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
|
|
44
|
+
Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
|
|
45
|
+
Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
|
|
46
|
+
Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
|
|
47
|
+
```
|
|
48
|
+
These predicted answers are all INCORRECT because:
|
|
49
|
+
- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
The following are examples of NOT_ATTEMPTED predicted answers.
|
|
53
|
+
```
|
|
54
|
+
Question: What are the names of Barack Obama's children?
|
|
55
|
+
Gold target: Malia and Sasha
|
|
56
|
+
Predicted answer 1: I don't know.
|
|
57
|
+
Predicted answer 2: I need more context about which Obama you are talking about.
|
|
58
|
+
Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
|
|
59
|
+
Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
|
|
60
|
+
```
|
|
61
|
+
These predicted answers are all NOT_ATTEMPTED because:
|
|
62
|
+
- The important information in the gold target is not included in the answer.
|
|
63
|
+
- No statements in the answer contradict the gold target.
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
Also note the following things:
|
|
67
|
+
- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
|
|
68
|
+
- Predicted answers "120k", "124k", and 115k" are all CORRECT.
|
|
69
|
+
- Predicted answers "100k" and "113k" are INCORRECT.
|
|
70
|
+
- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
|
|
71
|
+
- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
|
|
72
|
+
- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
|
|
73
|
+
- Do not punish predicted answers if they omit information that would be clearly inferred from the question.
|
|
74
|
+
- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
|
|
75
|
+
- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
|
|
76
|
+
- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
|
|
77
|
+
- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
|
|
78
|
+
- Do not punish for typos in people's name if it's clearly the same name.
|
|
79
|
+
- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT_ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
|
|
83
|
+
```
|
|
84
|
+
Question: {question}
|
|
85
|
+
Gold target: {target}
|
|
86
|
+
Predicted answer: {predicted_answer}
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
Grade the predicted answer of this new question as one of:
|
|
90
|
+
A: CORRECT
|
|
91
|
+
B: INCORRECT
|
|
92
|
+
C: NOT_ATTEMPTED
|
|
93
|
+
|
|
94
|
+
Just return the letters "A", "B", or "C", with no text around it.
|
|
95
|
+
""".strip() # noqa: E501
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@register_benchmark(
|
|
99
|
+
BenchmarkMeta(
|
|
100
|
+
name='simple_vqa',
|
|
101
|
+
pretty_name='SimpleVQA',
|
|
102
|
+
dataset_id='m-a-p/SimpleVQA',
|
|
103
|
+
tags=[Tags.REASONING, Tags.MULTI_MODAL, Tags.QA],
|
|
104
|
+
description=
|
|
105
|
+
'SimpleVQA, the first comprehensive multi-modal benchmark to evaluate the factuality ability of MLLMs to answer natural language short questions. SimpleVQA is characterized by six key features: it covers multiple tasks and multiple scenarios, ensures high quality and challenging queries, maintains static and timeless reference answers, and is straightforward to evaluate.',
|
|
106
|
+
metric_list=['acc'],
|
|
107
|
+
eval_split='test',
|
|
108
|
+
prompt_template='Answer the question:\n\n{question}',
|
|
109
|
+
)
|
|
110
|
+
)
|
|
111
|
+
class SimpleVQAAdapter(VisionLanguageAdapter):
|
|
112
|
+
|
|
113
|
+
def __init__(self, **kwargs):
|
|
114
|
+
super().__init__(**kwargs)
|
|
115
|
+
self._use_llm_judge = True # Use LLM as a judge by default
|
|
116
|
+
|
|
117
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
118
|
+
content_list: list[Content] = [ContentText(text=self.prompt_template.format(question=record['question']))]
|
|
119
|
+
image_base64 = record['image']
|
|
120
|
+
content_list.append(ContentImage(image=f'data:image/jpeg;base64,{image_base64}'))
|
|
121
|
+
return Sample(
|
|
122
|
+
input=[ChatMessageUser(content=content_list)],
|
|
123
|
+
target=record['answer'],
|
|
124
|
+
metadata={
|
|
125
|
+
'data_id': record['data_id'],
|
|
126
|
+
'image_description': record['image_description'],
|
|
127
|
+
'language': record['language'],
|
|
128
|
+
'original_category': record['original_category'],
|
|
129
|
+
'source': record['source'],
|
|
130
|
+
'atomic_question': record['atomic_question'],
|
|
131
|
+
'atomic_fact': record['atomic_fact'],
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def llm_match_score(
|
|
136
|
+
self,
|
|
137
|
+
original_prediction: str,
|
|
138
|
+
filtered_prediction: str,
|
|
139
|
+
reference: str,
|
|
140
|
+
task_state: TaskState,
|
|
141
|
+
) -> Score:
|
|
142
|
+
score = Score(
|
|
143
|
+
extracted_prediction=filtered_prediction,
|
|
144
|
+
prediction=original_prediction,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
question = task_state.input_text
|
|
148
|
+
|
|
149
|
+
# Request judge and obtain score
|
|
150
|
+
prompt = GRADER_TEMPLATE.format(question=question, target=reference, predicted_answer=filtered_prediction)
|
|
151
|
+
judge_response = self.llm_judge.judge(prompt)
|
|
152
|
+
# parse grading response
|
|
153
|
+
match = re.search(r'(A|B|C)', judge_response)
|
|
154
|
+
res = match.group(0) if match else 'C'
|
|
155
|
+
|
|
156
|
+
# Set score based on the match result
|
|
157
|
+
score.value = {
|
|
158
|
+
'is_correct': 1 if res == 'A' else 0,
|
|
159
|
+
'is_incorrect': 1 if res == 'B' else 0,
|
|
160
|
+
'is_not_attempted': 1 if res == 'C' else 0,
|
|
161
|
+
}
|
|
162
|
+
score.explanation = f'LLM judge: {judge_response}'
|
|
163
|
+
score.metadata = {
|
|
164
|
+
'source': 'llm_judge',
|
|
165
|
+
'judge_strategy': self.judge_strategy,
|
|
166
|
+
'model': self.llm_judge.model_id
|
|
167
|
+
}
|
|
168
|
+
score.main_score_name = 'is_correct'
|
|
169
|
+
return score
|
|
File without changes
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
2
|
+
from evalscope.api.dataset import Sample
|
|
3
|
+
from evalscope.api.registry import register_benchmark
|
|
4
|
+
from evalscope.constants import Tags
|
|
5
|
+
from evalscope.utils.multi_choices import MultipleChoiceTemplate
|
|
6
|
+
|
|
7
|
+
DESCRIPTION = (
|
|
8
|
+
'Social Interaction QA (SIQA) is a question-answering benchmark for testing social commonsense intelligence. '
|
|
9
|
+
'Contrary to many prior benchmarks that focus on physical or taxonomic knowledge, Social IQa focuses on '
|
|
10
|
+
"reasoning about people's actions and their social implications."
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@register_benchmark(
|
|
15
|
+
BenchmarkMeta(
|
|
16
|
+
name='siqa',
|
|
17
|
+
pretty_name='SIQA',
|
|
18
|
+
tags=[Tags.COMMONSENSE, Tags.REASONING, Tags.MULTIPLE_CHOICE],
|
|
19
|
+
description=DESCRIPTION.strip(),
|
|
20
|
+
dataset_id='extraordinarylab/siqa',
|
|
21
|
+
metric_list=['acc'],
|
|
22
|
+
few_shot_num=0,
|
|
23
|
+
train_split=None,
|
|
24
|
+
eval_split='validation',
|
|
25
|
+
prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
|
|
26
|
+
)
|
|
27
|
+
)
|
|
28
|
+
class SIQAAdapter(MultiChoiceAdapter):
|
|
29
|
+
|
|
30
|
+
def __init__(self, **kwargs):
|
|
31
|
+
super().__init__(**kwargs)
|
|
32
|
+
|
|
33
|
+
def record_to_sample(self, record) -> Sample:
|
|
34
|
+
return Sample(
|
|
35
|
+
input=record['question'],
|
|
36
|
+
choices=record['choices'],
|
|
37
|
+
target=record['answer'],
|
|
38
|
+
metadata={},
|
|
39
|
+
)
|
|
File without changes
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# flake8: noqa
|
|
2
|
+
FEW_SHOT_SAMPLES = """Answer the following multiple choice question. There is only one correct answer. The last line of your response should be in the format 'ANSWER: $LETTER' (without quotes), where LETTER is one of A, B, C, D, E, F, G, H, I, or J.
|
|
3
|
+
|
|
4
|
+
Question:
|
|
5
|
+
A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is
|
|
6
|
+
A) 10
|
|
7
|
+
B) 40
|
|
8
|
+
C) 6
|
|
9
|
+
D) 25
|
|
10
|
+
E) 15
|
|
11
|
+
F) 50
|
|
12
|
+
G) 30
|
|
13
|
+
H) 4
|
|
14
|
+
I) 5
|
|
15
|
+
J) 20
|
|
16
|
+
|
|
17
|
+
ANSWER: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4.
|
|
18
|
+
ANSWER: H.
|
|
19
|
+
|
|
20
|
+
Question:
|
|
21
|
+
Say the pupil of your eye has a diameter of 5 mm and you have a telescope with an aperture of 50 cm. How much more light can the telescope gather than your eye?
|
|
22
|
+
A) 1000 times more
|
|
23
|
+
B) 50 times more
|
|
24
|
+
C) 5000 times more
|
|
25
|
+
D) 500 times more
|
|
26
|
+
E) 10000 times more
|
|
27
|
+
F) 20000 times more
|
|
28
|
+
G) 2000 times more
|
|
29
|
+
H) 100 times more
|
|
30
|
+
I) 10 times more
|
|
31
|
+
J) N/A
|
|
32
|
+
|
|
33
|
+
ANSWER: Let's think step by step. The amount of light a telescope can gather compared to the human eye is proportional to the area of its apertures. The area of a circle is given by the formula $A = \pi \left(\frac{{D}}{{2}}\right)^2$, where $D$ is the diameter. Therefore, the relative light-gathering power is calculated as:
|
|
34
|
+
\[
|
|
35
|
+
\frac{{\left(\frac{{50 \text{{ cm}}}}{{2}}\right)^2}}{{\left(\frac{{5 \text{{ mm}}}}{{2}}\right)^2}} = \frac{{\left(\frac{{50 \text{{ cm}}}}{{0.1 \text{{ cm}}}}\right)^2}}{{\left(\frac{{5 \text{{ mm}}}}{{0.1 \text{{ cm}}}}\right)^2}} = \frac{{500^2}}{{5^2}} = 10000.
|
|
36
|
+
\]
|
|
37
|
+
ANSWER: E.
|
|
38
|
+
|
|
39
|
+
Question:
|
|
40
|
+
Where do most short-period comets come from and how do we know?
|
|
41
|
+
A) The Kuiper belt; short period comets tend to be in the plane of the solar system like the Kuiper belt.
|
|
42
|
+
B) The asteroid belt; short period comets tend to come from random directions indicating a spherical distribution of comets called the asteroid belt.
|
|
43
|
+
C) The asteroid belt; short period comets tend to be in the plane of the solar system just like the asteroid belt.
|
|
44
|
+
D) The Oort cloud; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the Oort cloud.
|
|
45
|
+
E) The Oort Cloud; short period comets tend to come from random directions indicating a spherical distribution of comets called the Oort Cloud.
|
|
46
|
+
F) The Oort cloud; short period comets tend to be in the plane of the solar system just like the Oort cloud.
|
|
47
|
+
G) The asteroid belt; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the asteroid belt.
|
|
48
|
+
|
|
49
|
+
ANSWER: Let's think step by step. Most short-period comets originate from the Kuiper belt. This is deduced from the observation that these comets tend to follow orbits that lie in the plane of the solar system, similar to the distribution of objects in the Kuiper belt itself. Thus, the alignment of these cometary orbits with the ecliptic plane points to their Kuiper belt origin.
|
|
50
|
+
ANSWER: A.
|
|
51
|
+
|
|
52
|
+
Question:
|
|
53
|
+
Colors in a soap bubble result from light
|
|
54
|
+
A) dispersion
|
|
55
|
+
B) deflection
|
|
56
|
+
C) refraction
|
|
57
|
+
D) reflection
|
|
58
|
+
E) interference
|
|
59
|
+
F) converted to a different frequency
|
|
60
|
+
G) polarization
|
|
61
|
+
H) absorption
|
|
62
|
+
I) diffraction
|
|
63
|
+
J) transmission
|
|
64
|
+
|
|
65
|
+
ANSWER: Let's think step by step. The colorful patterns observed in a soap bubble are caused by the phenomenon of light interference. This occurs when light waves bounce between the two surfaces of the soap film, combining constructively or destructively based on their phase differences and the varying thickness of the film. These interactions result in vibrant color patterns due to variations in the intensity of different wavelengths of light.
|
|
66
|
+
ANSWER: E.
|
|
67
|
+
|
|
68
|
+
Question:
|
|
69
|
+
A microwave oven is connected to an outlet, 120 V, and draws a current of 2 amps. At what rate is energy being used by the microwave oven?
|
|
70
|
+
A) 240 W
|
|
71
|
+
B) 120 W
|
|
72
|
+
C) 10 W
|
|
73
|
+
D) 480 W
|
|
74
|
+
E) 360 W
|
|
75
|
+
F) 200 W
|
|
76
|
+
G) 30 W
|
|
77
|
+
H) 150 W
|
|
78
|
+
I) 60 W
|
|
79
|
+
J) 300 W
|
|
80
|
+
|
|
81
|
+
ANSWER: Let's think step by step. The rate of energy usage, known as power, in an electrical circuit is calculated by the product of voltage and current. For a microwave oven connected to a 120 V outlet and drawing a current of 2 amps, the power consumption can be calculated as follows:
|
|
82
|
+
\[
|
|
83
|
+
\text{{Power}} = \text{{Voltage}} \times \text{{Current}} = 120 \, \text{{V}} \times 2 \, \text{{A}} = 240 \, \text{{W}}.
|
|
84
|
+
\]
|
|
85
|
+
Therefore, the microwave oven uses energy at a rate of 240 watts.
|
|
86
|
+
ANSWER: A.
|
|
87
|
+
|
|
88
|
+
"""
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict
|
|
5
|
+
|
|
6
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
7
|
+
from evalscope.api.dataset import Sample
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
10
|
+
from evalscope.utils.logger import get_logger
|
|
11
|
+
from evalscope.utils.multi_choices import FEW_SHOT_TEMPLATE, MultipleChoiceTemplate
|
|
12
|
+
|
|
13
|
+
logger = get_logger()
|
|
14
|
+
|
|
15
|
+
SUBSET_MAPPING = {
|
|
16
|
+
'Electronic Science and Technology': ['Engineering'],
|
|
17
|
+
'Philosophy': ['Philosophy'],
|
|
18
|
+
'Traditional Chinese Medicine': ['Medicine'],
|
|
19
|
+
'Applied Economics': ['Economics'],
|
|
20
|
+
'Mathematics': ['Science'],
|
|
21
|
+
'Physics': ['Science'],
|
|
22
|
+
'Clinical Medicine': ['Medicine'],
|
|
23
|
+
'Computer Science and Technology': ['Engineering'],
|
|
24
|
+
'Information and Communication Engineering': ['Engineering'],
|
|
25
|
+
'Control Science and Engineering': ['Engineering'],
|
|
26
|
+
'Theoretical Economics': ['Economics'],
|
|
27
|
+
'Law': ['Law'],
|
|
28
|
+
'History': ['History'],
|
|
29
|
+
'Basic Medicine': ['Medicine'],
|
|
30
|
+
'Education': ['Education'],
|
|
31
|
+
'Materials Science and Engineering': ['Engineering'],
|
|
32
|
+
'Electrical Engineering': ['Engineering'],
|
|
33
|
+
'Systems Science': ['Science'],
|
|
34
|
+
'Power Engineering and Engineering Thermophysics': ['Engineering'],
|
|
35
|
+
'Military Science': ['Military Science'],
|
|
36
|
+
'Biology': ['Science'],
|
|
37
|
+
'Business Administration': ['Management'],
|
|
38
|
+
'Language and Literature': ['Literature and Arts'],
|
|
39
|
+
'Public Health and Preventive Medicine': ['Medicine'],
|
|
40
|
+
'Political Science': ['Law'],
|
|
41
|
+
'Chemistry': ['Science'],
|
|
42
|
+
'Hydraulic Engineering': ['Engineering'],
|
|
43
|
+
'Chemical Engineering and Technology': ['Engineering'],
|
|
44
|
+
'Pharmacy': ['Medicine'],
|
|
45
|
+
'Geography': ['Science'],
|
|
46
|
+
'Art Studies': ['Literature and Arts'],
|
|
47
|
+
'Architecture': ['Engineering'],
|
|
48
|
+
'Forestry Engineering': ['Engineering'],
|
|
49
|
+
'Public Administration': ['Management'],
|
|
50
|
+
'Oceanography': ['Science'],
|
|
51
|
+
'Journalism and Communication': ['Literature and Arts'],
|
|
52
|
+
'Nuclear Science and Technology': ['Engineering'],
|
|
53
|
+
'Weapon Science and Technology': ['Engineering'],
|
|
54
|
+
'Naval Architecture and Ocean Engineering': ['Engineering'],
|
|
55
|
+
'Environmental Science and Engineering': ['Engineering'],
|
|
56
|
+
'Transportation Engineering': ['Engineering'],
|
|
57
|
+
'Geology': ['Science'],
|
|
58
|
+
'Physical Oceanography': ['Science'],
|
|
59
|
+
'Musicology': ['Literature and Arts'],
|
|
60
|
+
'Stomatology': ['Medicine'],
|
|
61
|
+
'Aquaculture': ['Agronomy'],
|
|
62
|
+
'Mechanical Engineering': ['Engineering'],
|
|
63
|
+
'Aeronautical and Astronautical Science and Technology': ['Engineering'],
|
|
64
|
+
'Civil Engineering': ['Engineering'],
|
|
65
|
+
'Mechanics': ['Engineering'],
|
|
66
|
+
'Petroleum and Natural Gas Engineering': ['Engineering'],
|
|
67
|
+
'Sociology': ['Sociology'],
|
|
68
|
+
'Food Science and Engineering': ['Engineering'],
|
|
69
|
+
'Agricultural Engineering': ['Engineering'],
|
|
70
|
+
'Surveying and Mapping Science and Technology': ['Engineering'],
|
|
71
|
+
'Metallurgical Engineering': ['Engineering'],
|
|
72
|
+
'Library, Information and Archival Management': ['Management'],
|
|
73
|
+
'Mining Engineering': ['Engineering'],
|
|
74
|
+
'Astronomy': ['Science'],
|
|
75
|
+
'Geological Resources and Geological Engineering': ['Engineering'],
|
|
76
|
+
'Atmospheric Science': ['Science'],
|
|
77
|
+
'Optical Engineering': ['Engineering'],
|
|
78
|
+
'Animal Husbandry': ['Agronomy'],
|
|
79
|
+
'Geophysics': ['Science'],
|
|
80
|
+
'Crop Science': ['Agronomy'],
|
|
81
|
+
'Management Science and Engineering': ['Management'],
|
|
82
|
+
'Psychology': ['Education'],
|
|
83
|
+
'Forestry': ['Agronomy'],
|
|
84
|
+
'Textile Science and Engineering': ['Engineering'],
|
|
85
|
+
'Veterinary Medicine': ['Agronomy'],
|
|
86
|
+
'Instrument Science and Technology': ['Engineering'],
|
|
87
|
+
'Physical Education': ['Education']
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@register_benchmark(
|
|
92
|
+
BenchmarkMeta(
|
|
93
|
+
name='super_gpqa',
|
|
94
|
+
pretty_name='SuperGPQA',
|
|
95
|
+
tags=[Tags.KNOWLEDGE, Tags.MULTIPLE_CHOICE],
|
|
96
|
+
description=
|
|
97
|
+
'SuperGPQA is a large-scale multiple-choice question answering dataset, designed to evaluate the generalization ability of models across different fields. It contains 100,000+ questions from 50+ fields, with each question having 10 options.', # noqa: E501
|
|
98
|
+
dataset_id='m-a-p/SuperGPQA',
|
|
99
|
+
subset_list=list(SUBSET_MAPPING.keys()),
|
|
100
|
+
metric_list=['acc'],
|
|
101
|
+
few_shot_num=0,
|
|
102
|
+
train_split=None,
|
|
103
|
+
eval_split='train', # only have train split
|
|
104
|
+
prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER_COT,
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
class SuperGPQAAdapter(MultiChoiceAdapter):
|
|
108
|
+
|
|
109
|
+
def __init__(self, **kwargs):
|
|
110
|
+
|
|
111
|
+
super().__init__(**kwargs)
|
|
112
|
+
if self.few_shot_num > 0 and self.few_shot_num != 5:
|
|
113
|
+
logger.warning(
|
|
114
|
+
f'Only support few_shot_num 0 or 5 for SuperGPQA, but got {self.few_shot_num}. Use 5-shot by default.'
|
|
115
|
+
)
|
|
116
|
+
self.few_shot_num = 5
|
|
117
|
+
|
|
118
|
+
self.reformat_subset = True
|
|
119
|
+
self.category_map = SUBSET_MAPPING
|
|
120
|
+
|
|
121
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
122
|
+
return Sample(
|
|
123
|
+
input=record['question'],
|
|
124
|
+
choices=record['options'],
|
|
125
|
+
target=record['answer_letter'],
|
|
126
|
+
subset_key=record['field'],
|
|
127
|
+
metadata={
|
|
128
|
+
'field': record['field'],
|
|
129
|
+
'discipline': record['discipline'],
|
|
130
|
+
'uuid': record.get('uuid', ''),
|
|
131
|
+
'explanation': record.get('answer', ''),
|
|
132
|
+
},
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def format_fewshot_template(self, fewshot, sample):
|
|
136
|
+
from .prompt import FEW_SHOT_SAMPLES
|
|
137
|
+
|
|
138
|
+
return FEW_SHOT_TEMPLATE.format(fewshot=FEW_SHOT_SAMPLES, ) + self.format_prompt_template(sample)
|
|
139
|
+
|
|
140
|
+
def extract_answer(self, prediction: str, task_state) -> str:
|
|
141
|
+
"""
|
|
142
|
+
Extract the answer from the prediction.
|
|
143
|
+
"""
|
|
144
|
+
from .utils import extract_option_content, extract_option_labels
|
|
145
|
+
|
|
146
|
+
choices = [choice.value for choice in task_state.choices]
|
|
147
|
+
if self.few_shot_num == 0:
|
|
148
|
+
predict = extract_option_labels(prediction, 'ABCDEFGHIJ')
|
|
149
|
+
if predict is None:
|
|
150
|
+
# Try to extract by content matching
|
|
151
|
+
predict = extract_option_content(prediction, choices)
|
|
152
|
+
predict = chr(choices.index(predict) + 65) if predict else None
|
|
153
|
+
else:
|
|
154
|
+
response = prediction.split('Question:')[0]
|
|
155
|
+
predict = extract_option_labels(response, 'ABCDEFGHIJ')
|
|
156
|
+
if predict is None:
|
|
157
|
+
predict = extract_option_content(response, choices)
|
|
158
|
+
predict = chr(choices.index(predict) + 65) if predict else None
|
|
159
|
+
if predict is None:
|
|
160
|
+
predict = extract_option_labels(prediction, 'ABCDEFGHIJ')
|
|
161
|
+
if predict is None:
|
|
162
|
+
predict = extract_option_content(prediction, choices)
|
|
163
|
+
predict = chr(choices.index(predict) + 65) if predict else None
|
|
164
|
+
|
|
165
|
+
return predict or ''
|