evalscope 0.11.0__py3-none-any.whl → 0.12.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/arguments.py +3 -1
- evalscope/benchmarks/{aime24 → aime}/aime24_adapter.py +3 -3
- evalscope/benchmarks/aime/aime25_adapter.py +49 -0
- evalscope/benchmarks/arc/arc_adapter.py +14 -17
- evalscope/benchmarks/bbh/bbh_adapter.py +6 -11
- evalscope/benchmarks/benchmark.py +12 -10
- evalscope/benchmarks/ceval/ceval_adapter.py +10 -15
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +11 -16
- evalscope/benchmarks/competition_math/competition_math_adapter.py +6 -20
- evalscope/benchmarks/data_adapter.py +82 -19
- evalscope/benchmarks/data_collection/data_collection_adapter.py +0 -1
- evalscope/benchmarks/general_mcq/general_mcq_adapter.py +15 -22
- evalscope/benchmarks/general_qa/general_qa_adapter.py +29 -16
- evalscope/benchmarks/gpqa/gpqa_adapter.py +13 -8
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +3 -4
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +8 -12
- evalscope/benchmarks/humaneval/humaneval_adapter.py +2 -2
- evalscope/benchmarks/ifeval/ifeval_adapter.py +3 -4
- evalscope/benchmarks/iquiz/iquiz_adapter.py +9 -5
- evalscope/benchmarks/math_500/math_500_adapter.py +9 -4
- evalscope/benchmarks/mmlu/mmlu_adapter.py +11 -16
- evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +24 -36
- evalscope/benchmarks/musr/__init__.py +0 -0
- evalscope/benchmarks/musr/musr_adapter.py +71 -0
- evalscope/benchmarks/process_bench/__init__.py +0 -0
- evalscope/benchmarks/process_bench/critique_template.txt +13 -0
- evalscope/benchmarks/process_bench/process_bench_adapter.py +99 -0
- evalscope/benchmarks/race/race_adapter.py +12 -16
- evalscope/benchmarks/simple_qa/__init__.py +0 -0
- evalscope/benchmarks/simple_qa/simple_qa_adapter.py +20 -0
- evalscope/benchmarks/super_gpqa/__init__.py +0 -0
- evalscope/benchmarks/super_gpqa/five_shot_prompt.txt +89 -0
- evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +191 -0
- evalscope/benchmarks/super_gpqa/utils.py +90 -0
- evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +3 -0
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +3 -4
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +7 -14
- evalscope/benchmarks/utils.py +43 -0
- evalscope/cli/start_app.py +4 -1
- evalscope/cli/start_eval.py +4 -3
- evalscope/cli/start_perf.py +4 -2
- evalscope/collections/evaluator.py +16 -1
- evalscope/config.py +13 -3
- evalscope/constants.py +7 -0
- evalscope/evaluator/evaluator.py +3 -1
- evalscope/metrics/__init__.py +2 -1
- evalscope/metrics/metrics.py +23 -2
- evalscope/metrics/named_metrics.py +1 -0
- evalscope/models/__init__.py +2 -1
- evalscope/models/base_adapter.py +32 -6
- evalscope/models/chat_adapter.py +4 -1
- evalscope/models/choice_adapter.py +4 -0
- evalscope/models/custom_adapter.py +2 -0
- evalscope/models/local_model.py +3 -2
- evalscope/models/register.py +28 -0
- evalscope/models/server_adapter.py +107 -29
- evalscope/perf/__init__.py +0 -1
- evalscope/perf/arguments.py +18 -8
- evalscope/perf/http_client.py +8 -6
- evalscope/perf/plugin/api/openai_api.py +11 -1
- evalscope/perf/utils/analysis_result.py +1 -1
- evalscope/perf/utils/benchmark_util.py +6 -2
- evalscope/report/app.py +15 -8
- evalscope/report/combinator.py +2 -2
- evalscope/run.py +6 -5
- evalscope/third_party/thinkbench/__init__.py +3 -0
- evalscope/third_party/thinkbench/eval.py +429 -0
- evalscope/third_party/thinkbench/infer.py +130 -0
- evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
- evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
- evalscope/third_party/thinkbench/tools/__init__.py +0 -0
- evalscope/third_party/thinkbench/tools/llm.py +48 -0
- evalscope/third_party/thinkbench/tools/utils.py +13 -0
- evalscope/third_party/toolbench_static/llm/swift_infer.py +50 -20
- evalscope/utils/chat_service.py +1 -0
- evalscope/utils/filters.py +59 -0
- evalscope/utils/logger.py +3 -3
- evalscope/utils/model_utils.py +17 -1
- evalscope/utils/utils.py +45 -45
- evalscope/version.py +2 -2
- {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/METADATA +14 -5
- {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/RECORD +89 -65
- tests/cli/test_collection.py +1 -1
- tests/cli/test_run.py +151 -32
- /evalscope/benchmarks/{aime24 → aime}/__init__.py +0 -0
- {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/LICENSE +0 -0
- {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/WHEEL +0 -0
- {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/entry_points.txt +0 -0
- {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/top_level.txt +0 -0
evalscope/arguments.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import json
|
|
3
3
|
|
|
4
|
-
from evalscope.constants import EvalBackend, EvalStage, EvalType
|
|
4
|
+
from evalscope.constants import EvalBackend, EvalStage, EvalType, OutputType
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class ParseStrArgsAction(argparse.Action):
|
|
@@ -71,6 +71,8 @@ def add_argument(parser: argparse.ArgumentParser):
|
|
|
71
71
|
parser.add_argument('--seed', type=int, default=42, help='Random seed for reproducibility.')
|
|
72
72
|
parser.add_argument('--api-key', type=str, default='EMPTY', help='The API key for the remote API model.')
|
|
73
73
|
parser.add_argument('--api-url', type=str, default=None, help='The API url for the remote API model.')
|
|
74
|
+
parser.add_argument('--timeout', type=float, default=None, help='The timeout for the remote API model.')
|
|
75
|
+
parser.add_argument('--stream', action='store_true', default=False, help='Stream mode.') # noqa: E501
|
|
74
76
|
# yapf: enable
|
|
75
77
|
|
|
76
78
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
2
|
+
from evalscope.constants import OutputType
|
|
2
3
|
from evalscope.metrics.math_parser import extract_answer, math_equal, strip_answer_string
|
|
3
|
-
from evalscope.models import ChatGenerationModelAdapter
|
|
4
4
|
from evalscope.utils.logger import get_logger
|
|
5
5
|
|
|
6
6
|
# flake8: noqa
|
|
@@ -10,8 +10,8 @@ logger = get_logger()
|
|
|
10
10
|
|
|
11
11
|
@Benchmark.register(
|
|
12
12
|
name='aime24',
|
|
13
|
+
pretty_name='AIME-2024',
|
|
13
14
|
dataset_id='HuggingFaceH4/aime_2024',
|
|
14
|
-
model_adapter=ChatGenerationModelAdapter,
|
|
15
15
|
subset_list=['default'],
|
|
16
16
|
metric_list=['AveragePass@1'],
|
|
17
17
|
few_shot_num=0,
|
|
@@ -31,7 +31,7 @@ class AIME24Adapter(DataAdapter):
|
|
|
31
31
|
problem = input_d['problem']
|
|
32
32
|
full_prompt = self.prompt_template.format(query=problem)
|
|
33
33
|
|
|
34
|
-
return
|
|
34
|
+
return self.gen_prompt_data(full_prompt)
|
|
35
35
|
|
|
36
36
|
def get_gold_answer(self, input_d: dict) -> str:
|
|
37
37
|
# Extract the gold answer from the input dict.
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
2
|
+
from evalscope.constants import OutputType
|
|
3
|
+
from evalscope.metrics.math_parser import extract_answer, math_equal, strip_answer_string
|
|
4
|
+
from evalscope.utils.logger import get_logger
|
|
5
|
+
|
|
6
|
+
# flake8: noqa
|
|
7
|
+
|
|
8
|
+
logger = get_logger()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@Benchmark.register(
|
|
12
|
+
name='aime25',
|
|
13
|
+
pretty_name='AIME-2025',
|
|
14
|
+
dataset_id='TIGER-Lab/AIME25',
|
|
15
|
+
subset_list=['default'],
|
|
16
|
+
metric_list=['AveragePass@1'],
|
|
17
|
+
few_shot_num=0,
|
|
18
|
+
train_split=None,
|
|
19
|
+
eval_split='train', # Only train set is available
|
|
20
|
+
prompt_template='{query}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
|
|
21
|
+
)
|
|
22
|
+
class AIME25Adapter(DataAdapter):
|
|
23
|
+
|
|
24
|
+
def __init__(self, *args, **kwargs):
|
|
25
|
+
super().__init__(*args, **kwargs)
|
|
26
|
+
|
|
27
|
+
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
|
|
28
|
+
"""
|
|
29
|
+
Generate the prompt for the model input.
|
|
30
|
+
"""
|
|
31
|
+
problem = input_d['question']
|
|
32
|
+
full_prompt = self.prompt_template.format(query=problem)
|
|
33
|
+
|
|
34
|
+
return self.gen_prompt_data(full_prompt)
|
|
35
|
+
|
|
36
|
+
def get_gold_answer(self, input_d: dict) -> str:
|
|
37
|
+
# Extract the gold answer from the input dict.
|
|
38
|
+
return strip_answer_string(input_d['answer'])
|
|
39
|
+
|
|
40
|
+
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
41
|
+
"""
|
|
42
|
+
Parse the model output to get the answer. Could be the best choice index.
|
|
43
|
+
"""
|
|
44
|
+
# Note: Use same extraction method for both of checkpoint/service/custom
|
|
45
|
+
result = strip_answer_string(extract_answer(result))
|
|
46
|
+
return result
|
|
47
|
+
|
|
48
|
+
def match(self, gold: str, pred: str) -> float:
|
|
49
|
+
return math_equal(pred, gold)
|
|
@@ -4,9 +4,8 @@ import json
|
|
|
4
4
|
import os
|
|
5
5
|
|
|
6
6
|
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
7
|
-
from evalscope.constants import EvalType
|
|
7
|
+
from evalscope.constants import EvalType, OutputType
|
|
8
8
|
from evalscope.metrics import exact_match
|
|
9
|
-
from evalscope.models import MultiChoiceModelAdapter
|
|
10
9
|
from evalscope.utils import ResponseParser
|
|
11
10
|
from evalscope.utils.logger import get_logger
|
|
12
11
|
|
|
@@ -17,19 +16,20 @@ logger = get_logger()
|
|
|
17
16
|
|
|
18
17
|
@Benchmark.register(
|
|
19
18
|
name='arc',
|
|
19
|
+
pretty_name='ARC',
|
|
20
20
|
dataset_id='modelscope/ai2_arc',
|
|
21
|
-
model_adapter=
|
|
21
|
+
model_adapter=OutputType.MULTIPLE_CHOICE,
|
|
22
|
+
output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
|
|
22
23
|
subset_list=['ARC-Easy', 'ARC-Challenge'],
|
|
23
24
|
metric_list=['AverageAccuracy'],
|
|
24
25
|
few_shot_num=0,
|
|
25
26
|
train_split='train',
|
|
26
27
|
eval_split='test',
|
|
27
|
-
prompt_template=
|
|
28
|
+
prompt_template=
|
|
29
|
+
'The following are multiple choice questions, please output correct answer in the form of A or B or C or D, do not output explanation:\n{query}',
|
|
28
30
|
)
|
|
29
31
|
class ARCAdapter(DataAdapter):
|
|
30
32
|
|
|
31
|
-
choices = ['A', 'B', 'C', 'D']
|
|
32
|
-
|
|
33
33
|
def __init__(self, **kwargs):
|
|
34
34
|
few_shot_num = kwargs.get('few_shot_num', None)
|
|
35
35
|
if few_shot_num is None:
|
|
@@ -42,6 +42,8 @@ class ARCAdapter(DataAdapter):
|
|
|
42
42
|
|
|
43
43
|
super().__init__(**kwargs)
|
|
44
44
|
|
|
45
|
+
self.choices = ['A', 'B', 'C', 'D']
|
|
46
|
+
|
|
45
47
|
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
46
48
|
"""
|
|
47
49
|
Load the dataset from local disk.
|
|
@@ -60,7 +62,7 @@ class ARCAdapter(DataAdapter):
|
|
|
60
62
|
for split_name in ['Train', 'Test']:
|
|
61
63
|
split_path = os.path.join(subset_path, f'{subset_name}-{split_name}.jsonl')
|
|
62
64
|
if os.path.exists(split_path):
|
|
63
|
-
with open(split_path, 'r', errors='ignore') as in_f:
|
|
65
|
+
with open(split_path, 'r', errors='ignore', encoding='utf-8') as in_f:
|
|
64
66
|
rows = []
|
|
65
67
|
for line in in_f:
|
|
66
68
|
item = json.loads(line.strip())
|
|
@@ -107,12 +109,11 @@ class ARCAdapter(DataAdapter):
|
|
|
107
109
|
{'data': ['xxx'], 'multi_choices': ['A', 'B', 'C', 'D']}
|
|
108
110
|
"""
|
|
109
111
|
few_shot_prompts = [self._generate_prompt(input_d=sample, include_answer=True) for sample in few_shot_list]
|
|
110
|
-
context
|
|
112
|
+
context = '\n'.join(few_shot_prompts) + self._generate_prompt(input_d=input_d, include_answer=False)
|
|
111
113
|
|
|
112
|
-
|
|
113
|
-
full_prompt: str = context + self._generate_prompt(input_d=input_d, include_answer=False)
|
|
114
|
+
full_prompt = self.prompt_template.format(query=context)
|
|
114
115
|
|
|
115
|
-
return
|
|
116
|
+
return self.gen_prompt_data(full_prompt)
|
|
116
117
|
|
|
117
118
|
def get_gold_answer(self, input_d: dict) -> str:
|
|
118
119
|
# Get the gold choice
|
|
@@ -130,14 +131,10 @@ class ARCAdapter(DataAdapter):
|
|
|
130
131
|
Returns:
|
|
131
132
|
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
132
133
|
"""
|
|
133
|
-
if
|
|
134
|
+
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
134
135
|
return result
|
|
135
|
-
elif eval_type == EvalType.SERVICE:
|
|
136
|
-
return ResponseParser.parse_first_option_with_choices(text=result, options=self.choices)
|
|
137
|
-
elif eval_type == EvalType.CUSTOM:
|
|
138
|
-
return ResponseParser.parse_first_option_with_choices(text=result, options=self.choices)
|
|
139
136
|
else:
|
|
140
|
-
|
|
137
|
+
return ResponseParser.parse_first_capital(text=result, options=self.choices)
|
|
141
138
|
|
|
142
139
|
def match(self, gold: str, pred: str) -> float:
|
|
143
140
|
return exact_match(gold=gold, pred=pred)
|
|
@@ -8,8 +8,6 @@ import re
|
|
|
8
8
|
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
9
9
|
from evalscope.constants import AnswerKeys
|
|
10
10
|
from evalscope.metrics import exact_match
|
|
11
|
-
from evalscope.models.chat_adapter import ChatGenerationModelAdapter
|
|
12
|
-
from evalscope.utils import ResponseParser
|
|
13
11
|
from evalscope.utils.logger import get_logger
|
|
14
12
|
|
|
15
13
|
# flake8: noqa
|
|
@@ -60,8 +58,8 @@ SUBSET_LIST = MULTIPLE_CHOICE_LIST + FREE_FORM_LIST
|
|
|
60
58
|
|
|
61
59
|
@Benchmark.register(
|
|
62
60
|
name='bbh',
|
|
61
|
+
pretty_name='BBH',
|
|
63
62
|
dataset_id='modelscope/bbh',
|
|
64
|
-
model_adapter=ChatGenerationModelAdapter,
|
|
65
63
|
subset_list=SUBSET_LIST,
|
|
66
64
|
metric_list=['AverageAccuracy'],
|
|
67
65
|
few_shot_num=3,
|
|
@@ -94,7 +92,7 @@ class BBHAdapter(DataAdapter):
|
|
|
94
92
|
else:
|
|
95
93
|
file_path: str = os.path.join(work_dir, dataset_name_or_path, f'{subset_name}.json')
|
|
96
94
|
if os.path.exists(file_path):
|
|
97
|
-
with open(file_path, 'r') as f:
|
|
95
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
98
96
|
examples = json.load(f)['examples']
|
|
99
97
|
if subset_name in data_dict:
|
|
100
98
|
data_dict[subset_name].update({split_name: examples})
|
|
@@ -125,7 +123,7 @@ class BBHAdapter(DataAdapter):
|
|
|
125
123
|
cot_prompts = ''
|
|
126
124
|
full_prompt = cot_prompts + self.prompt_template.format(query=input_d['input'])
|
|
127
125
|
|
|
128
|
-
return
|
|
126
|
+
return self.gen_prompt_data(full_prompt)
|
|
129
127
|
|
|
130
128
|
def gen_prompts(self, data_dict: dict) -> dict:
|
|
131
129
|
"""
|
|
@@ -153,7 +151,9 @@ class BBHAdapter(DataAdapter):
|
|
|
153
151
|
for sub_name, sub_data_dict in data_dict.items():
|
|
154
152
|
few_shot_data = []
|
|
155
153
|
if self.few_shot_num > 0:
|
|
156
|
-
with open(
|
|
154
|
+
with open(
|
|
155
|
+
os.path.join(os.path.dirname(__file__), 'cot_prompts', f'{sub_name}.txt'), 'r',
|
|
156
|
+
encoding='utf-8') as f:
|
|
157
157
|
cot_prompt_str = f.read()
|
|
158
158
|
few_shot_data = [cot_prompt_str]
|
|
159
159
|
|
|
@@ -171,11 +171,6 @@ class BBHAdapter(DataAdapter):
|
|
|
171
171
|
prompt_d[AnswerKeys.RAW_INPUT] = sample_d_new
|
|
172
172
|
res_dict[sub_name].append(prompt_d)
|
|
173
173
|
|
|
174
|
-
rnd = random.Random()
|
|
175
|
-
rnd.seed(42)
|
|
176
|
-
for k, v in res_dict.items():
|
|
177
|
-
rnd.shuffle(v)
|
|
178
|
-
|
|
179
174
|
return res_dict
|
|
180
175
|
|
|
181
176
|
def get_gold_answer(self, input_d: dict) -> str:
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import copy
|
|
2
|
+
from collections import OrderedDict
|
|
2
3
|
from dataclasses import dataclass, field
|
|
3
4
|
from typing import TYPE_CHECKING, Dict, List, Optional
|
|
4
5
|
|
|
6
|
+
from evalscope.constants import OutputType
|
|
7
|
+
|
|
5
8
|
if TYPE_CHECKING:
|
|
6
9
|
from evalscope.benchmarks import DataAdapter
|
|
7
10
|
|
|
8
|
-
from evalscope.models import BaseModelAdapter
|
|
9
|
-
|
|
10
11
|
BENCHMARK_MAPPINGS = {}
|
|
11
12
|
|
|
12
13
|
|
|
@@ -15,8 +16,9 @@ class BenchmarkMeta:
|
|
|
15
16
|
name: str
|
|
16
17
|
dataset_id: str
|
|
17
18
|
data_adapter: 'DataAdapter'
|
|
18
|
-
model_adapter:
|
|
19
|
-
|
|
19
|
+
model_adapter: Optional[str] = OutputType.GENERATION
|
|
20
|
+
output_types: Optional[List[str]] = field(default_factory=lambda: [OutputType.GENERATION])
|
|
21
|
+
subset_list: List[str] = field(default_factory=lambda: ['default'])
|
|
20
22
|
metric_list: List[str] = field(default_factory=list)
|
|
21
23
|
few_shot_num: int = 0
|
|
22
24
|
few_shot_random: bool = False
|
|
@@ -24,6 +26,9 @@ class BenchmarkMeta:
|
|
|
24
26
|
eval_split: Optional[str] = None
|
|
25
27
|
prompt_template: Optional[str] = None
|
|
26
28
|
system_prompt: Optional[str] = None
|
|
29
|
+
query_template: Optional[str] = None
|
|
30
|
+
pretty_name: Optional[str] = None
|
|
31
|
+
filters: Optional[OrderedDict] = None
|
|
27
32
|
|
|
28
33
|
def _update(self, args: dict):
|
|
29
34
|
if args.get('local_path'):
|
|
@@ -37,10 +42,7 @@ class BenchmarkMeta:
|
|
|
37
42
|
def to_string_dict(self) -> dict:
|
|
38
43
|
cur_dict = copy.deepcopy(self.__dict__)
|
|
39
44
|
# cur_dict['data_adapter'] = self.data_adapter.__name__
|
|
40
|
-
# cur_dict['model_adapter'] = self.model_adapter.__name__
|
|
41
|
-
# cur_dict['metric_list'] = [metric['name'] for metric in self.metric_list]
|
|
42
45
|
del cur_dict['data_adapter']
|
|
43
|
-
del cur_dict['model_adapter']
|
|
44
46
|
return cur_dict
|
|
45
47
|
|
|
46
48
|
def get_data_adapter(self, config: dict = {}) -> 'DataAdapter':
|
|
@@ -59,18 +61,18 @@ class Benchmark:
|
|
|
59
61
|
@classmethod
|
|
60
62
|
def get(cls, name: str) -> 'BenchmarkMeta':
|
|
61
63
|
if name not in BENCHMARK_MAPPINGS:
|
|
62
|
-
raise Exception(f'Unknown benchmark: {name}. Available tasks: {BENCHMARK_MAPPINGS.keys()}')
|
|
64
|
+
raise Exception(f'Unknown benchmark: {name}. Available tasks: {list(BENCHMARK_MAPPINGS.keys())}')
|
|
63
65
|
benchmark = BENCHMARK_MAPPINGS[name]
|
|
64
66
|
return benchmark
|
|
65
67
|
|
|
66
68
|
@classmethod
|
|
67
|
-
def register(cls, name: str, dataset_id: str,
|
|
69
|
+
def register(cls, name: str, dataset_id: str, **kwargs):
|
|
68
70
|
|
|
69
71
|
def register_wrapper(data_adapter):
|
|
70
72
|
if name in BENCHMARK_MAPPINGS:
|
|
71
73
|
raise Exception(f'Benchmark {name} already registered')
|
|
72
74
|
BENCHMARK_MAPPINGS[name] = BenchmarkMeta(
|
|
73
|
-
name=name, data_adapter=data_adapter,
|
|
75
|
+
name=name, data_adapter=data_adapter, dataset_id=dataset_id, **kwargs)
|
|
74
76
|
return data_adapter
|
|
75
77
|
|
|
76
78
|
return register_wrapper
|
|
@@ -3,9 +3,8 @@ import csv
|
|
|
3
3
|
import os
|
|
4
4
|
|
|
5
5
|
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
6
|
-
from evalscope.constants import EvalType
|
|
6
|
+
from evalscope.constants import EvalType, OutputType
|
|
7
7
|
from evalscope.metrics.metrics import exact_match
|
|
8
|
-
from evalscope.models import MultiChoiceModelAdapter
|
|
9
8
|
from evalscope.utils import ResponseParser
|
|
10
9
|
from evalscope.utils.logger import get_logger
|
|
11
10
|
|
|
@@ -126,8 +125,10 @@ SUBJECT_MAPPING = {
|
|
|
126
125
|
|
|
127
126
|
@Benchmark.register(
|
|
128
127
|
name='ceval',
|
|
128
|
+
pretty_name='C-Eval',
|
|
129
129
|
dataset_id='modelscope/ceval-exam',
|
|
130
|
-
model_adapter=
|
|
130
|
+
model_adapter=OutputType.MULTIPLE_CHOICE,
|
|
131
|
+
output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
|
|
131
132
|
subset_list=SUBSET_LIST,
|
|
132
133
|
metric_list=['AverageAccuracy'],
|
|
133
134
|
few_shot_num=0,
|
|
@@ -137,8 +138,6 @@ SUBJECT_MAPPING = {
|
|
|
137
138
|
)
|
|
138
139
|
class CEVALAdapter(DataAdapter):
|
|
139
140
|
|
|
140
|
-
choices = ['A', 'B', 'C', 'D']
|
|
141
|
-
|
|
142
141
|
def __init__(self, **kwargs):
|
|
143
142
|
|
|
144
143
|
few_shot_num = kwargs.get('few_shot_num', 0)
|
|
@@ -148,6 +147,7 @@ class CEVALAdapter(DataAdapter):
|
|
|
148
147
|
super().__init__(**kwargs)
|
|
149
148
|
|
|
150
149
|
self.category_map = {k: v[-1] for k, v in SUBJECT_MAPPING.items()}
|
|
150
|
+
self.choices = ['A', 'B', 'C', 'D']
|
|
151
151
|
|
|
152
152
|
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
153
153
|
data_dict = {}
|
|
@@ -207,7 +207,7 @@ class CEVALAdapter(DataAdapter):
|
|
|
207
207
|
subject_name: str = SUBJECT_MAPPING.get(subset_name)[1] if SUBJECT_MAPPING.get(subset_name) else subset_name
|
|
208
208
|
full_prompt = self.prompt_template.format(subset_name=subject_name, query=query)
|
|
209
209
|
|
|
210
|
-
return
|
|
210
|
+
return self.gen_prompt_data(full_prompt)
|
|
211
211
|
|
|
212
212
|
def get_gold_answer(self, input_d: dict) -> str:
|
|
213
213
|
# Get the gold choice
|
|
@@ -225,22 +225,17 @@ class CEVALAdapter(DataAdapter):
|
|
|
225
225
|
Returns:
|
|
226
226
|
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
227
227
|
"""
|
|
228
|
-
if
|
|
228
|
+
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
229
229
|
return result
|
|
230
|
-
elif eval_type == EvalType.SERVICE:
|
|
231
|
-
return ResponseParser.parse_first_option_with_choices(result, self.choices)
|
|
232
|
-
elif eval_type == EvalType.CUSTOM:
|
|
233
|
-
return ResponseParser.parse_first_option_with_choices(result, self.choices)
|
|
234
230
|
else:
|
|
235
|
-
|
|
231
|
+
return ResponseParser.parse_first_option_with_choices(text=result, options=self.choices)
|
|
236
232
|
|
|
237
233
|
def match(self, gold: str, pred: str) -> float:
|
|
238
234
|
return exact_match(gold=gold, pred=pred)
|
|
239
235
|
|
|
240
|
-
|
|
241
|
-
def _format_example(cls, input_d: dict, include_answer=True):
|
|
236
|
+
def _format_example(self, input_d: dict, include_answer=True):
|
|
242
237
|
example = '问题:' + input_d['question']
|
|
243
|
-
for choice in
|
|
238
|
+
for choice in self.choices:
|
|
244
239
|
example += f'\n{choice}. {input_d[f"{choice}"]}'
|
|
245
240
|
|
|
246
241
|
if include_answer:
|
|
@@ -4,9 +4,8 @@ import csv
|
|
|
4
4
|
import os
|
|
5
5
|
|
|
6
6
|
from evalscope.benchmarks import Benchmark, DataAdapter
|
|
7
|
-
from evalscope.constants import EvalType
|
|
7
|
+
from evalscope.constants import EvalType, OutputType
|
|
8
8
|
from evalscope.metrics import exact_match
|
|
9
|
-
from evalscope.models import MultiChoiceModelAdapter
|
|
10
9
|
from evalscope.utils import ResponseParser
|
|
11
10
|
from evalscope.utils.logger import get_logger
|
|
12
11
|
|
|
@@ -103,8 +102,10 @@ SUBJECT_MAPPING = {
|
|
|
103
102
|
|
|
104
103
|
@Benchmark.register(
|
|
105
104
|
name='cmmlu',
|
|
105
|
+
pretty_name='C-MMLU',
|
|
106
106
|
dataset_id='modelscope/cmmlu',
|
|
107
|
-
model_adapter=
|
|
107
|
+
model_adapter=OutputType.MULTIPLE_CHOICE,
|
|
108
|
+
output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
|
|
108
109
|
subset_list=SUBSET_LIST,
|
|
109
110
|
metric_list=['AverageAccuracy'],
|
|
110
111
|
few_shot_num=5,
|
|
@@ -114,12 +115,11 @@ SUBJECT_MAPPING = {
|
|
|
114
115
|
)
|
|
115
116
|
class CMMLUAdapter(DataAdapter):
|
|
116
117
|
|
|
117
|
-
choices = ['A', 'B', 'C', 'D']
|
|
118
|
-
|
|
119
118
|
def __init__(self, **kwargs):
|
|
120
119
|
super().__init__(**kwargs)
|
|
121
120
|
|
|
122
121
|
self.category_map = {k: v[-1] for k, v in SUBJECT_MAPPING.items()}
|
|
122
|
+
self.choices = ['A', 'B', 'C', 'D']
|
|
123
123
|
|
|
124
124
|
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
125
125
|
data_dict = {}
|
|
@@ -172,7 +172,7 @@ class CMMLUAdapter(DataAdapter):
|
|
|
172
172
|
|
|
173
173
|
full_prompt = self.prompt_template.format(subset_name=self._format_subject(subset_name), query=context.strip())
|
|
174
174
|
|
|
175
|
-
return
|
|
175
|
+
return self.gen_prompt_data(full_prompt)
|
|
176
176
|
|
|
177
177
|
def get_gold_answer(self, input_d: dict) -> str:
|
|
178
178
|
# Get the gold choice
|
|
@@ -190,26 +190,21 @@ class CMMLUAdapter(DataAdapter):
|
|
|
190
190
|
Returns:
|
|
191
191
|
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
192
192
|
"""
|
|
193
|
-
if
|
|
193
|
+
if self.model_adapter == OutputType.MULTIPLE_CHOICE:
|
|
194
194
|
return result
|
|
195
|
-
elif eval_type == EvalType.SERVICE:
|
|
196
|
-
return ResponseParser.parse_first_option_with_choices(result, self.choices)
|
|
197
|
-
elif eval_type == EvalType.CUSTOM:
|
|
198
|
-
return ResponseParser.parse_first_option_with_choices(result, self.choices)
|
|
199
195
|
else:
|
|
200
|
-
|
|
196
|
+
return ResponseParser.parse_first_option_with_choices(text=result, options=self.choices)
|
|
201
197
|
|
|
202
198
|
def match(self, gold: str, pred: str) -> float:
|
|
203
199
|
return exact_match(gold=gold, pred=pred)
|
|
204
200
|
|
|
205
|
-
|
|
206
|
-
def _generate_prompt(cls, input_d: dict, include_answer=True) -> str:
|
|
201
|
+
def _generate_prompt(self, input_d: dict, include_answer=True) -> str:
|
|
207
202
|
|
|
208
203
|
input_choices: list = [input_d['A'], input_d['B'], input_d['C'], input_d['D']]
|
|
209
204
|
|
|
210
205
|
example: str = input_d['Question']
|
|
211
|
-
for j in range(len(
|
|
212
|
-
example += '\n{}. {}'.format(
|
|
206
|
+
for j in range(len(self.choices)):
|
|
207
|
+
example += '\n{}. {}'.format(self.choices[j], input_choices[j])
|
|
213
208
|
|
|
214
209
|
example += '\nAnswer:'
|
|
215
210
|
if include_answer:
|
|
@@ -18,12 +18,12 @@ logger = get_logger()
|
|
|
18
18
|
|
|
19
19
|
@Benchmark.register(
|
|
20
20
|
name='competition_math',
|
|
21
|
+
pretty_name='MATH',
|
|
21
22
|
dataset_id='modelscope/competition_math',
|
|
22
|
-
model_adapter=ChatGenerationModelAdapter,
|
|
23
23
|
subset_list=['Level 1', 'Level 2', 'Level 3', 'Level 4', 'Level 5'],
|
|
24
24
|
metric_list=['AveragePass@1'],
|
|
25
25
|
few_shot_num=4,
|
|
26
|
-
train_split=
|
|
26
|
+
train_split=None,
|
|
27
27
|
eval_split='test',
|
|
28
28
|
prompt_template='{query}\nPlease reason step by step, and put your final answer within \\boxed{{}}.',
|
|
29
29
|
)
|
|
@@ -43,7 +43,8 @@ class CompetitionMathAdapter(DataAdapter):
|
|
|
43
43
|
def load(self, **kwargs):
|
|
44
44
|
# default load all levels
|
|
45
45
|
kwargs['subset_list'] = ['default']
|
|
46
|
-
|
|
46
|
+
data_dict = super().load(**kwargs)
|
|
47
|
+
return self.reformat_subset(data_dict, subset_key='level')
|
|
47
48
|
|
|
48
49
|
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
49
50
|
data_dict = defaultdict(dict)
|
|
@@ -57,27 +58,12 @@ class CompetitionMathAdapter(DataAdapter):
|
|
|
57
58
|
split_data = []
|
|
58
59
|
for file_path in split_files:
|
|
59
60
|
if os.path.exists(file_path):
|
|
60
|
-
with open(file_path, 'r') as f:
|
|
61
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
61
62
|
split_data.append(json.load(f))
|
|
62
63
|
data_dict[subset_name][split_name] = split_data
|
|
63
64
|
|
|
64
65
|
return data_dict
|
|
65
66
|
|
|
66
|
-
def gen_prompts(self, data_dict: dict) -> dict:
|
|
67
|
-
res_dict: dict = defaultdict(list)
|
|
68
|
-
|
|
69
|
-
# use level as subset
|
|
70
|
-
for sub_name, sub_data_dict in data_dict.items():
|
|
71
|
-
for sample_d in sub_data_dict[self.eval_split]:
|
|
72
|
-
level = sample_d['level']
|
|
73
|
-
if level not in self.subset_list:
|
|
74
|
-
continue
|
|
75
|
-
prompt_d = self.gen_prompt(input_d=sample_d, few_shot_list=None)
|
|
76
|
-
prompt_d[AnswerKeys.RAW_INPUT] = sample_d
|
|
77
|
-
res_dict[level].append(prompt_d)
|
|
78
|
-
|
|
79
|
-
return res_dict
|
|
80
|
-
|
|
81
67
|
def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
|
|
82
68
|
"""
|
|
83
69
|
Generate the prompt for the model input.
|
|
@@ -95,7 +81,7 @@ class CompetitionMathAdapter(DataAdapter):
|
|
|
95
81
|
use_fewshot = self.few_shot_num > 0
|
|
96
82
|
query = self._generate_prompt(input_d, use_fewshot=use_fewshot)
|
|
97
83
|
full_prompt = self.prompt_template.format(query=query)
|
|
98
|
-
return
|
|
84
|
+
return self.gen_prompt_data(full_prompt)
|
|
99
85
|
|
|
100
86
|
def get_gold_answer(self, input_d: dict) -> str:
|
|
101
87
|
# Extract the gold answer from the input dict.
|