evalscope 0.17.1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +4 -1
- evalscope/api/__init__.py +0 -0
- evalscope/api/benchmark/__init__.py +3 -0
- evalscope/api/benchmark/adapters/__init__.py +3 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +683 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
- evalscope/api/benchmark/adapters/text2image_adapter.py +155 -0
- evalscope/api/benchmark/benchmark.py +321 -0
- evalscope/api/benchmark/meta.py +115 -0
- evalscope/api/dataset/__init__.py +2 -0
- evalscope/api/dataset/dataset.py +349 -0
- evalscope/api/dataset/loader.py +261 -0
- evalscope/api/dataset/utils.py +143 -0
- evalscope/api/evaluator/__init__.py +3 -0
- evalscope/api/evaluator/cache.py +355 -0
- evalscope/api/evaluator/evaluator.py +56 -0
- evalscope/api/evaluator/state.py +264 -0
- evalscope/api/filter/__init__.py +1 -0
- evalscope/api/filter/filter.py +72 -0
- evalscope/api/messages/__init__.py +11 -0
- evalscope/api/messages/chat_message.py +198 -0
- evalscope/api/messages/content.py +102 -0
- evalscope/api/messages/utils.py +35 -0
- evalscope/api/metric/__init__.py +2 -0
- evalscope/api/metric/metric.py +55 -0
- evalscope/api/metric/scorer.py +105 -0
- evalscope/api/mixin/__init__.py +2 -0
- evalscope/api/mixin/dataset_mixin.py +105 -0
- evalscope/api/mixin/llm_judge_mixin.py +168 -0
- evalscope/api/model/__init__.py +12 -0
- evalscope/api/model/generate_config.py +157 -0
- evalscope/api/model/model.py +383 -0
- evalscope/api/model/model_output.py +285 -0
- evalscope/api/registry.py +182 -0
- evalscope/api/tool/__init__.py +3 -0
- evalscope/api/tool/tool_call.py +101 -0
- evalscope/api/tool/tool_info.py +173 -0
- evalscope/api/tool/utils.py +64 -0
- evalscope/app/ui/app_ui.py +2 -1
- evalscope/app/ui/multi_model.py +50 -25
- evalscope/app/ui/single_model.py +23 -11
- evalscope/app/utils/data_utils.py +42 -26
- evalscope/app/utils/text_utils.py +0 -2
- evalscope/app/utils/visualization.py +9 -4
- evalscope/arguments.py +6 -7
- evalscope/backend/opencompass/api_meta_template.py +2 -1
- evalscope/backend/opencompass/backend_manager.py +6 -3
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
- evalscope/backend/rag_eval/ragas/task_template.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
- evalscope/backend/rag_eval/utils/embedding.py +2 -1
- evalscope/backend/rag_eval/utils/llm.py +13 -12
- evalscope/benchmarks/__init__.py +0 -2
- evalscope/benchmarks/aigc/i2i/__init__.py +0 -0
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +44 -0
- evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +53 -55
- evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +41 -46
- evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +29 -45
- evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +34 -44
- evalscope/benchmarks/aigc/t2i/tifa_adapter.py +16 -27
- evalscope/benchmarks/aime/aime24_adapter.py +38 -40
- evalscope/benchmarks/aime/aime25_adapter.py +34 -40
- evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
- evalscope/benchmarks/arc/arc_adapter.py +34 -147
- evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
- evalscope/benchmarks/arena_hard/utils.py +37 -1
- evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
- evalscope/benchmarks/bfcl/bfcl_adapter.py +181 -160
- evalscope/benchmarks/bfcl/generation.py +222 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +94 -162
- evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
- evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
- evalscope/benchmarks/data_collection/data_collection_adapter.py +183 -45
- evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
- evalscope/benchmarks/docmath/utils.py +4 -5
- evalscope/benchmarks/drop/drop_adapter.py +88 -40
- evalscope/benchmarks/frames/frames_adapter.py +135 -52
- evalscope/benchmarks/general_arena/general_arena_adapter.py +136 -98
- evalscope/benchmarks/general_arena/utils.py +23 -27
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
- evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
- evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
- evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
- evalscope/benchmarks/hle/hle_adapter.py +127 -93
- evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
- evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
- evalscope/benchmarks/ifeval/instructions.py +109 -64
- evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
- evalscope/benchmarks/ifeval/utils.py +6 -7
- evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
- evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
- evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
- evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
- evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
- evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
- evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
- evalscope/benchmarks/musr/musr_adapter.py +33 -64
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +192 -152
- evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
- evalscope/benchmarks/race/race_adapter.py +33 -119
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
- evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
- evalscope/benchmarks/super_gpqa/utils.py +2 -1
- evalscope/benchmarks/tau_bench/generation.py +147 -0
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +112 -54
- evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -265
- evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
- evalscope/cli/cli.py +2 -0
- evalscope/cli/start_server.py +6 -3
- evalscope/collections/__init__.py +2 -10
- evalscope/collections/sampler.py +10 -10
- evalscope/collections/schema.py +13 -11
- evalscope/config.py +95 -54
- evalscope/constants.py +29 -61
- evalscope/evaluator/__init__.py +1 -1
- evalscope/evaluator/evaluator.py +277 -423
- evalscope/filters/__init__.py +2 -0
- evalscope/filters/extraction.py +126 -0
- evalscope/filters/selection.py +57 -0
- evalscope/metrics/__init__.py +13 -13
- evalscope/metrics/llm_judge.py +32 -30
- evalscope/metrics/math_parser.py +27 -22
- evalscope/metrics/metric.py +307 -0
- evalscope/metrics/metrics.py +22 -18
- evalscope/metrics/t2v_metrics/__init__.py +0 -52
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
- evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
- evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
- evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
- evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
- evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
- evalscope/models/__init__.py +6 -29
- evalscope/models/mockllm.py +65 -0
- evalscope/models/model_apis.py +47 -0
- evalscope/models/modelscope.py +455 -0
- evalscope/models/openai_compatible.py +123 -0
- evalscope/models/text2image_model.py +124 -0
- evalscope/models/utils/openai.py +698 -0
- evalscope/perf/benchmark.py +2 -1
- evalscope/perf/http_client.py +4 -2
- evalscope/perf/plugin/api/custom_api.py +5 -4
- evalscope/perf/plugin/api/openai_api.py +11 -9
- evalscope/perf/plugin/datasets/custom.py +2 -1
- evalscope/perf/plugin/datasets/flickr8k.py +1 -1
- evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
- evalscope/perf/plugin/datasets/line_by_line.py +2 -1
- evalscope/perf/plugin/datasets/longalpaca.py +2 -1
- evalscope/perf/plugin/datasets/openqa.py +4 -2
- evalscope/perf/utils/benchmark_util.py +7 -5
- evalscope/perf/utils/db_util.py +9 -6
- evalscope/perf/utils/local_server.py +8 -3
- evalscope/perf/utils/rich_display.py +16 -10
- evalscope/report/__init__.py +2 -2
- evalscope/report/combinator.py +18 -12
- evalscope/report/generator.py +101 -6
- evalscope/report/{utils.py → report.py} +8 -6
- evalscope/run.py +26 -44
- evalscope/summarizer.py +1 -1
- evalscope/utils/__init__.py +21 -2
- evalscope/utils/chat_service.py +2 -1
- evalscope/utils/deprecation_utils.py +12 -1
- evalscope/utils/function_utils.py +29 -0
- evalscope/utils/io_utils.py +100 -5
- evalscope/utils/json_schema.py +208 -0
- evalscope/utils/logger.py +51 -12
- evalscope/utils/model_utils.py +10 -7
- evalscope/utils/multi_choices.py +271 -0
- evalscope/utils/url_utils.py +65 -0
- evalscope/version.py +2 -2
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/METADATA +98 -49
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/RECORD +234 -216
- tests/aigc/test_t2i.py +22 -4
- tests/benchmark/__init__.py +1 -0
- tests/benchmark/test_eval.py +386 -0
- tests/cli/test_all.py +3 -5
- tests/cli/test_collection.py +13 -4
- tests/cli/test_custom.py +22 -15
- tests/rag/test_clip_benchmark.py +1 -0
- evalscope/benchmarks/aigc/t2i/base.py +0 -56
- evalscope/benchmarks/arc/ai2_arc.py +0 -151
- evalscope/benchmarks/benchmark.py +0 -81
- evalscope/benchmarks/ceval/ceval_exam.py +0 -146
- evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
- evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
- evalscope/benchmarks/competition_math/competition_math.py +0 -79
- evalscope/benchmarks/data_adapter.py +0 -528
- evalscope/benchmarks/filters.py +0 -59
- evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
- evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
- evalscope/benchmarks/humaneval/humaneval.py +0 -79
- evalscope/benchmarks/mmlu/mmlu.py +0 -160
- evalscope/benchmarks/mmlu/samples.jsonl +0 -5
- evalscope/benchmarks/process_bench/critique_template.txt +0 -13
- evalscope/benchmarks/race/race.py +0 -104
- evalscope/benchmarks/race/samples.jsonl +0 -5
- evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
- evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
- evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
- evalscope/benchmarks/utils.py +0 -60
- evalscope/collections/evaluator.py +0 -375
- evalscope/metrics/completion_parsers.py +0 -227
- evalscope/metrics/named_metrics.py +0 -55
- evalscope/models/adapters/__init__.py +0 -14
- evalscope/models/adapters/base_adapter.py +0 -84
- evalscope/models/adapters/bfcl_adapter.py +0 -246
- evalscope/models/adapters/chat_adapter.py +0 -207
- evalscope/models/adapters/choice_adapter.py +0 -222
- evalscope/models/adapters/custom_adapter.py +0 -71
- evalscope/models/adapters/server_adapter.py +0 -236
- evalscope/models/adapters/t2i_adapter.py +0 -79
- evalscope/models/adapters/tau_bench_adapter.py +0 -189
- evalscope/models/custom/__init__.py +0 -4
- evalscope/models/custom/custom_model.py +0 -50
- evalscope/models/custom/dummy_model.py +0 -99
- evalscope/models/local_model.py +0 -128
- evalscope/models/register.py +0 -41
- tests/cli/test_run.py +0 -489
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/LICENSE +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/WHEEL +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,5 +1,12 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
3
10
|
from evalscope.utils.logger import get_logger
|
|
4
11
|
|
|
5
12
|
# flake8: noqa
|
|
@@ -7,46 +14,37 @@ from evalscope.utils.logger import get_logger
|
|
|
7
14
|
logger = get_logger()
|
|
8
15
|
|
|
9
16
|
|
|
10
|
-
@
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
17
|
+
@register_benchmark(
|
|
18
|
+
BenchmarkMeta(
|
|
19
|
+
name='aime24',
|
|
20
|
+
pretty_name='AIME-2024',
|
|
21
|
+
tags=[Tags.MATH, Tags.REASONING],
|
|
22
|
+
description=
|
|
23
|
+
'The AIME 2024 benchmark is based on problems from the American Invitational Mathematics Examination, a prestigious high school mathematics competition. This benchmark tests a model\'s ability to solve challenging mathematics problems by generating step-by-step solutions and providing the correct final answer.', # noqa: E501
|
|
24
|
+
dataset_id='HuggingFaceH4/aime_2024',
|
|
25
|
+
subset_list=['default'],
|
|
26
|
+
metric_list=[{
|
|
27
|
+
'acc': {
|
|
28
|
+
'numeric': True
|
|
29
|
+
}
|
|
30
|
+
}],
|
|
31
|
+
few_shot_num=0,
|
|
32
|
+
train_split=None,
|
|
33
|
+
eval_split='train', # Only train set is available
|
|
34
|
+
prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
|
|
35
|
+
)
|
|
23
36
|
)
|
|
24
|
-
class AIME24Adapter(
|
|
37
|
+
class AIME24Adapter(DefaultDataAdapter):
|
|
25
38
|
|
|
26
39
|
def __init__(self, *args, **kwargs):
|
|
27
40
|
super().__init__(*args, **kwargs)
|
|
28
41
|
|
|
29
|
-
def
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
39
|
-
# Extract the gold answer from the input dict.
|
|
40
|
-
return strip_answer_string(input_d['answer'])
|
|
41
|
-
|
|
42
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
43
|
-
"""
|
|
44
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
45
|
-
"""
|
|
46
|
-
# Note: Use same extraction method for both of checkpoint/service/custom
|
|
47
|
-
result = strip_answer_string(extract_answer(result))
|
|
48
|
-
return result
|
|
49
|
-
|
|
50
|
-
def match(self, gold: str, pred: str) -> float:
|
|
51
|
-
res = math_equal(pred, gold)
|
|
52
|
-
return 1.0 if res else 0.0
|
|
42
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
43
|
+
return Sample(
|
|
44
|
+
input=record['problem'],
|
|
45
|
+
target=record['answer'],
|
|
46
|
+
metadata={
|
|
47
|
+
'problem_id': record.get('id', ''),
|
|
48
|
+
'solution': record.get('solution', ''),
|
|
49
|
+
},
|
|
50
|
+
)
|
|
@@ -1,5 +1,12 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
6
|
+
from evalscope.api.dataset import Sample
|
|
7
|
+
from evalscope.api.evaluator import TaskState
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
3
10
|
from evalscope.utils.logger import get_logger
|
|
4
11
|
|
|
5
12
|
# flake8: noqa
|
|
@@ -7,46 +14,33 @@ from evalscope.utils.logger import get_logger
|
|
|
7
14
|
logger = get_logger()
|
|
8
15
|
|
|
9
16
|
|
|
10
|
-
@
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
17
|
+
@register_benchmark(
|
|
18
|
+
BenchmarkMeta(
|
|
19
|
+
name='aime25',
|
|
20
|
+
pretty_name='AIME-2025',
|
|
21
|
+
tags=[Tags.MATH, Tags.REASONING],
|
|
22
|
+
description=
|
|
23
|
+
'The AIME 2025 benchmark is based on problems from the American Invitational Mathematics Examination, a prestigious high school mathematics competition. This benchmark tests a model\'s ability to solve challenging mathematics problems by generating step-by-step solutions and providing the correct final answer.',
|
|
24
|
+
dataset_id='opencompass/AIME2025',
|
|
25
|
+
subset_list=['AIME2025-I', 'AIME2025-II'],
|
|
26
|
+
metric_list=[{
|
|
27
|
+
'acc': {
|
|
28
|
+
'numeric': True
|
|
29
|
+
}
|
|
30
|
+
}],
|
|
31
|
+
few_shot_num=0,
|
|
32
|
+
train_split=None,
|
|
33
|
+
eval_split='test',
|
|
34
|
+
prompt_template='{question}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
|
|
35
|
+
)
|
|
23
36
|
)
|
|
24
|
-
class AIME25Adapter(
|
|
37
|
+
class AIME25Adapter(DefaultDataAdapter):
|
|
25
38
|
|
|
26
39
|
def __init__(self, *args, **kwargs):
|
|
27
40
|
super().__init__(*args, **kwargs)
|
|
28
41
|
|
|
29
|
-
def
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
full_prompt = self.prompt_template.format(query=problem)
|
|
35
|
-
|
|
36
|
-
return self.gen_prompt_data(full_prompt)
|
|
37
|
-
|
|
38
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
39
|
-
# Extract the gold answer from the input dict.
|
|
40
|
-
return strip_answer_string(input_d['answer'])
|
|
41
|
-
|
|
42
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
43
|
-
"""
|
|
44
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
45
|
-
"""
|
|
46
|
-
# Note: Use same extraction method for both of checkpoint/service/custom
|
|
47
|
-
result = strip_answer_string(extract_answer(result))
|
|
48
|
-
return result
|
|
49
|
-
|
|
50
|
-
def match(self, gold: str, pred: str) -> float:
|
|
51
|
-
res = math_equal(pred, gold)
|
|
52
|
-
return 1.0 if res else 0.0
|
|
42
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
43
|
+
return Sample(
|
|
44
|
+
input=record['question'],
|
|
45
|
+
target=record['answer'],
|
|
46
|
+
)
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
import re
|
|
2
|
-
from
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
from evalscope.
|
|
6
|
-
from evalscope.
|
|
2
|
+
from typing import Any, Dict
|
|
3
|
+
|
|
4
|
+
from evalscope.api.benchmark import BenchmarkMeta, DefaultDataAdapter
|
|
5
|
+
from evalscope.api.dataset import Sample
|
|
6
|
+
from evalscope.api.evaluator import TaskState
|
|
7
|
+
from evalscope.api.metric import Score
|
|
8
|
+
from evalscope.api.registry import register_benchmark
|
|
9
|
+
from evalscope.constants import Tags
|
|
7
10
|
from evalscope.utils.logger import get_logger
|
|
8
11
|
|
|
9
|
-
# flake8: noqa
|
|
10
|
-
|
|
11
12
|
logger = get_logger()
|
|
12
13
|
|
|
13
|
-
GRADER_SYSTEM_PROMPT = """You are a highly efficient assistant, who evaluates and selects the best large language model (LLMs) based on the quality of their responses to a given instruction. This process will be used to create a leaderboard reflecting the most accurate and human-preferred answers."""
|
|
14
|
+
GRADER_SYSTEM_PROMPT = """You are a highly efficient assistant, who evaluates and selects the best large language model (LLMs) based on the quality of their responses to a given instruction. This process will be used to create a leaderboard reflecting the most accurate and human-preferred answers.""" # noqa: E501
|
|
14
15
|
|
|
15
16
|
GRADER_TEMPLATE = """
|
|
16
17
|
I require a leaderboard for various large language models. I'll provide you with prompts given to these models and their corresponding outputs. Your task is to assess these responses, and select the model that produces the best output from a human perspective.
|
|
@@ -44,64 +45,89 @@ Evaluate the models based on the quality and relevance of their outputs, and sel
|
|
|
44
45
|
""".strip() # noqa: E501
|
|
45
46
|
|
|
46
47
|
|
|
47
|
-
@
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
48
|
+
@register_benchmark(
|
|
49
|
+
BenchmarkMeta(
|
|
50
|
+
name='alpaca_eval',
|
|
51
|
+
pretty_name='AlpacaEval2.0',
|
|
52
|
+
tags=[Tags.INSTRUCTION_FOLLOWING, Tags.ARENA],
|
|
53
|
+
description='Alpaca Eval 2.0 is an enhanced framework for evaluating instruction-following language models, '
|
|
54
|
+
'featuring an improved auto-annotator, updated baselines, and continuous preference calculation to '
|
|
55
|
+
'provide more accurate and cost-effective model assessments. '
|
|
56
|
+
'Currently not support `length-controlled winrate`; the official Judge model is `gpt-4-1106-preview`, while the baseline model is `gpt-4-turbo`.', # noqa: E501
|
|
57
|
+
dataset_id='AI-ModelScope/alpaca_eval',
|
|
58
|
+
subset_list=['alpaca_eval_gpt4_baseline'],
|
|
59
|
+
metric_list=['winrate'],
|
|
60
|
+
few_shot_num=0,
|
|
61
|
+
train_split=None,
|
|
62
|
+
eval_split='eval',
|
|
63
|
+
prompt_template='{question}'
|
|
64
|
+
)
|
|
65
|
+
)
|
|
66
|
+
class AlpacaEvalAdapter(DefaultDataAdapter):
|
|
62
67
|
|
|
63
68
|
def __init__(self, *args, **kwargs):
|
|
64
69
|
super().__init__(*args, **kwargs)
|
|
65
70
|
|
|
66
|
-
#
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
71
|
+
self._use_llm_judge = True # Use LLM as a judge by default
|
|
72
|
+
|
|
73
|
+
def record_to_sample(self, record: Dict[str, Any]) -> Sample:
|
|
74
|
+
"""
|
|
75
|
+
Convert a data record to a Sample object.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
record (Dict[str, Any]): Input data record.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Sample: Sample object with input, target, and metadata.
|
|
82
|
+
"""
|
|
83
|
+
instruction = record['instruction']
|
|
84
|
+
baseline_output = record['output'] # baseline model output
|
|
85
|
+
|
|
86
|
+
return Sample(
|
|
87
|
+
input=instruction,
|
|
88
|
+
target=baseline_output,
|
|
89
|
+
metadata={
|
|
90
|
+
'generator': record.get('generator', 'unknown'),
|
|
91
|
+
'dataset': record.get('dataset', 'unknown')
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
def llm_match_score(
|
|
96
|
+
self,
|
|
97
|
+
original_prediction: str,
|
|
98
|
+
filtered_prediction: str,
|
|
99
|
+
reference: str,
|
|
100
|
+
task_state: TaskState,
|
|
101
|
+
) -> Score:
|
|
102
|
+
score = Score(
|
|
103
|
+
extracted_prediction=filtered_prediction,
|
|
104
|
+
prediction=original_prediction,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
instruction = task_state.input_text
|
|
108
|
+
|
|
109
|
+
# Request judge and obtain score
|
|
110
|
+
# reference is baseline answer 'm', filtered_prediction is model answer 'M'
|
|
111
|
+
prompt = GRADER_TEMPLATE.format(instruction=instruction, output_1=reference, output_2=filtered_prediction)
|
|
112
|
+
judge_response = self.llm_judge.judge(prompt, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
78
113
|
|
|
79
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, **kwargs) -> str:
|
|
80
|
-
return result.strip()
|
|
81
|
-
|
|
82
|
-
def match(self, gold: str, pred: str):
|
|
83
|
-
# simple match
|
|
84
|
-
logger.warning(f'Please use LLMJudge to match the result for {self.name}')
|
|
85
|
-
return None
|
|
86
|
-
|
|
87
|
-
def llm_match(self, gold: Any, pred: Any, judge: LLMJudge, **kwargs) -> bool:
|
|
88
|
-
raw_input = kwargs.get('raw_input', None)
|
|
89
|
-
instruction = raw_input['instruction']
|
|
90
|
-
# gold is baseline answer 'm', pred is model answer 'M'
|
|
91
|
-
prompt = GRADER_TEMPLATE.format(instruction=instruction, output_1=gold, output_2=pred)
|
|
92
|
-
# get grading response
|
|
93
|
-
grading_response = judge(prompt, system_prompt=GRADER_SYSTEM_PROMPT)
|
|
94
114
|
# parse grading response
|
|
95
|
-
match = re.search(r'(m|M)',
|
|
115
|
+
match = re.search(r'(m|M)', judge_response)
|
|
96
116
|
res = match.group(0) if match else None
|
|
117
|
+
|
|
97
118
|
if res:
|
|
98
|
-
|
|
119
|
+
winrate = 1 if res == 'M' else 0
|
|
99
120
|
else:
|
|
100
|
-
logger.info(f'Failed to parse grading response: {prompt=}\n {
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
121
|
+
logger.info(f'Failed to parse grading response: {prompt=}\n {judge_response=}')
|
|
122
|
+
winrate = 0
|
|
123
|
+
|
|
124
|
+
# Set score based on the match result
|
|
125
|
+
score.value = {'winrate': winrate}
|
|
126
|
+
score.explanation = f'LLM judge: {judge_response}'
|
|
127
|
+
score.metadata = {
|
|
128
|
+
'source': 'llm_judge',
|
|
129
|
+
'judge_strategy': self.judge_strategy,
|
|
130
|
+
'model': self.llm_judge.model_id
|
|
131
|
+
}
|
|
132
|
+
score.main_score_name = 'winrate'
|
|
133
|
+
return score
|
|
@@ -1,159 +1,46 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
2
|
|
|
3
|
-
import
|
|
4
|
-
import
|
|
5
|
-
|
|
6
|
-
from evalscope.
|
|
7
|
-
from evalscope.constants import EvalType, OutputType
|
|
8
|
-
from evalscope.metrics import exact_match
|
|
9
|
-
from evalscope.metrics.completion_parsers import ResponseParser
|
|
3
|
+
from evalscope.api.benchmark import BenchmarkMeta, MultiChoiceAdapter
|
|
4
|
+
from evalscope.api.dataset import Sample
|
|
5
|
+
from evalscope.api.registry import register_benchmark
|
|
6
|
+
from evalscope.constants import Tags
|
|
10
7
|
from evalscope.utils.logger import get_logger
|
|
11
|
-
|
|
12
|
-
# flake8: noqa
|
|
8
|
+
from evalscope.utils.multi_choices import MultipleChoiceTemplate
|
|
13
9
|
|
|
14
10
|
logger = get_logger()
|
|
15
11
|
|
|
16
12
|
|
|
17
|
-
@
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
'Given the following question and four candidate answers (A, B, C and D), choose the best answer.\n{query}\nYour response should end with "The best answer is [the_answer_letter]" where the [the_answer_letter] is one of A, B, C or D.', # noqa
|
|
13
|
+
@register_benchmark(
|
|
14
|
+
BenchmarkMeta(
|
|
15
|
+
name='arc',
|
|
16
|
+
pretty_name='ARC',
|
|
17
|
+
tags=[Tags.REASONING, Tags.MULTIPLE_CHOICE],
|
|
18
|
+
description=
|
|
19
|
+
'The ARC (AI2 Reasoning Challenge) benchmark is designed to evaluate the reasoning capabilities of AI models through multiple-choice questions derived from science exams. It includes two subsets: ARC-Easy and ARC-Challenge, which vary in difficulty.', # noqa: E501
|
|
20
|
+
dataset_id='allenai/ai2_arc',
|
|
21
|
+
subset_list=['ARC-Easy', 'ARC-Challenge'],
|
|
22
|
+
metric_list=['acc'],
|
|
23
|
+
few_shot_num=0,
|
|
24
|
+
train_split='train',
|
|
25
|
+
eval_split='test',
|
|
26
|
+
prompt_template=MultipleChoiceTemplate.SINGLE_ANSWER,
|
|
27
|
+
)
|
|
33
28
|
)
|
|
34
|
-
class ARCAdapter(
|
|
29
|
+
class ARCAdapter(MultiChoiceAdapter):
|
|
35
30
|
|
|
36
31
|
def __init__(self, **kwargs):
|
|
37
|
-
few_shot_num = kwargs.get('few_shot_num', None)
|
|
38
|
-
if few_shot_num is None:
|
|
39
|
-
# Use 0-shot by default
|
|
40
|
-
logger.info(f'Set 0-shot examples by system for ARC.')
|
|
41
|
-
few_shot_num = 0
|
|
42
|
-
|
|
43
|
-
if few_shot_num != 0:
|
|
44
|
-
logger.warning(f'few_shot_num is recommended to set 0 for ARC, got {few_shot_num}.')
|
|
45
|
-
|
|
46
32
|
super().__init__(**kwargs)
|
|
47
33
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
if os.path.exists(dataset_name_or_path):
|
|
62
|
-
subset_path = os.path.join(dataset_name_or_path, subset_name)
|
|
63
|
-
else:
|
|
64
|
-
subset_path = os.path.join(work_dir, dataset_name_or_path, subset_name)
|
|
65
|
-
for split_name in ['Train', 'Test']:
|
|
66
|
-
split_path = os.path.join(subset_path, f'{subset_name}-{split_name}.jsonl')
|
|
67
|
-
if os.path.exists(split_path):
|
|
68
|
-
with open(split_path, 'r', errors='ignore', encoding='utf-8') as in_f:
|
|
69
|
-
rows = []
|
|
70
|
-
for line in in_f:
|
|
71
|
-
item = json.loads(line.strip())
|
|
72
|
-
raw_choices = item['question']['choices']
|
|
73
|
-
rows.append({
|
|
74
|
-
'id': item['id'],
|
|
75
|
-
'question': item['question']['stem'],
|
|
76
|
-
'choices': {
|
|
77
|
-
'text': [d['text'] for d in raw_choices],
|
|
78
|
-
'label': [d['label'] for d in raw_choices]
|
|
79
|
-
},
|
|
80
|
-
'answerKey': item['answerKey'],
|
|
81
|
-
})
|
|
82
|
-
|
|
83
|
-
if subset_name in data_dict:
|
|
84
|
-
data_dict[subset_name].update({split_name.lower(): rows})
|
|
85
|
-
else:
|
|
86
|
-
data_dict[subset_name] = {split_name.lower(): rows}
|
|
87
|
-
|
|
88
|
-
return data_dict
|
|
89
|
-
|
|
90
|
-
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
|
|
91
|
-
"""
|
|
92
|
-
Generate model prompt from raw data, unify the prompt format for ARC benchmark.
|
|
93
|
-
|
|
94
|
-
Args:
|
|
95
|
-
input_d (dict): The raw input. A single data format of the ARC:
|
|
96
|
-
|
|
97
|
-
{
|
|
98
|
-
'id': 'Mercury_7220990',
|
|
99
|
-
'question': 'Which factor will most likely cause a person to develop a fever?',
|
|
100
|
-
'choices':
|
|
101
|
-
{
|
|
102
|
-
'text':['a leg muscle relaxing after exercise',
|
|
103
|
-
'a bacterial population in the bloodstream',
|
|
104
|
-
'several viral particles on the skin',
|
|
105
|
-
'carbohydrates being digested in the stomach'],
|
|
106
|
-
'label': ['A', 'B', 'C', 'D']
|
|
107
|
-
},
|
|
108
|
-
'answerKey': 'B'
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
Returns:
|
|
112
|
-
{'data': ['xxx'], 'multi_choices': ['A', 'B', 'C', 'D']}
|
|
113
|
-
"""
|
|
114
|
-
few_shot_prompts = [self._generate_prompt(input_d=sample, include_answer=True) for sample in few_shot_list]
|
|
115
|
-
context = '\n'.join(few_shot_prompts) + self._generate_prompt(input_d=input_d, include_answer=False)
|
|
116
|
-
|
|
117
|
-
full_prompt = self.prompt_template.format(query=context)
|
|
118
|
-
|
|
119
|
-
return self.gen_prompt_data(full_prompt)
|
|
120
|
-
|
|
121
|
-
def get_gold_answer(self, input_d: dict) -> str:
|
|
122
|
-
# Get the gold choice
|
|
123
|
-
return input_d.get('answerKey', '')
|
|
124
|
-
|
|
125
|
-
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
|
|
126
|
-
"""
|
|
127
|
-
Parse the model output to get the answer. Could be the best choice index.
|
|
128
|
-
|
|
129
|
-
Args:
|
|
130
|
-
result: Predicted answer from the model. Usually a string for chat.
|
|
131
|
-
raw_input_d (dict): The raw input. Depending on the dataset.
|
|
132
|
-
eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
|
|
133
|
-
|
|
134
|
-
Returns:
|
|
135
|
-
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
136
|
-
"""
|
|
137
|
-
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
138
|
-
return result
|
|
139
|
-
else:
|
|
140
|
-
return ResponseParser.parse_first_option(text=result, options=self.choices)
|
|
141
|
-
|
|
142
|
-
def match(self, gold: str, pred: str) -> float:
|
|
143
|
-
return exact_match(gold=gold, pred=pred)
|
|
144
|
-
|
|
145
|
-
@classmethod
|
|
146
|
-
def _generate_prompt(cls, input_d: dict, include_answer=True) -> str:
|
|
147
|
-
|
|
148
|
-
example: str = input_d['question']
|
|
149
|
-
|
|
150
|
-
choices_texts: list = input_d['choices']['text']
|
|
151
|
-
choices_labels: list = input_d['choices']['label']
|
|
152
|
-
choices_prompts: str = '\n'.join([label + '. ' + text for text, label in zip(choices_texts, choices_labels)])
|
|
153
|
-
example += '\n' + choices_prompts
|
|
154
|
-
|
|
155
|
-
if include_answer:
|
|
156
|
-
example += '\nAnswer:'
|
|
157
|
-
example += ' {}\n\n'.format(input_d['answerKey'])
|
|
158
|
-
|
|
159
|
-
return example
|
|
34
|
+
def record_to_sample(self, record) -> Sample:
|
|
35
|
+
# Convert choice labels to indices (A->0, B->1, etc.)
|
|
36
|
+
choice_texts = record['choices']['text']
|
|
37
|
+
answer_key = record['answerKey']
|
|
38
|
+
|
|
39
|
+
return Sample(
|
|
40
|
+
input=record['question'],
|
|
41
|
+
choices=choice_texts,
|
|
42
|
+
target=answer_key,
|
|
43
|
+
metadata={
|
|
44
|
+
'id': record.get('id', ''),
|
|
45
|
+
},
|
|
46
|
+
)
|