evalscope 0.11.0__py3-none-any.whl → 0.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (89) hide show
  1. evalscope/arguments.py +3 -1
  2. evalscope/benchmarks/{aime24 → aime}/aime24_adapter.py +3 -3
  3. evalscope/benchmarks/aime/aime25_adapter.py +49 -0
  4. evalscope/benchmarks/arc/arc_adapter.py +14 -17
  5. evalscope/benchmarks/bbh/bbh_adapter.py +6 -11
  6. evalscope/benchmarks/benchmark.py +12 -10
  7. evalscope/benchmarks/ceval/ceval_adapter.py +10 -15
  8. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +11 -16
  9. evalscope/benchmarks/competition_math/competition_math_adapter.py +6 -20
  10. evalscope/benchmarks/data_adapter.py +82 -19
  11. evalscope/benchmarks/data_collection/data_collection_adapter.py +0 -1
  12. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +15 -22
  13. evalscope/benchmarks/general_qa/general_qa_adapter.py +29 -16
  14. evalscope/benchmarks/gpqa/gpqa_adapter.py +13 -8
  15. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +3 -4
  16. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +8 -12
  17. evalscope/benchmarks/humaneval/humaneval_adapter.py +2 -2
  18. evalscope/benchmarks/ifeval/ifeval_adapter.py +3 -4
  19. evalscope/benchmarks/iquiz/iquiz_adapter.py +9 -5
  20. evalscope/benchmarks/math_500/math_500_adapter.py +9 -4
  21. evalscope/benchmarks/mmlu/mmlu_adapter.py +11 -16
  22. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +24 -36
  23. evalscope/benchmarks/musr/__init__.py +0 -0
  24. evalscope/benchmarks/musr/musr_adapter.py +71 -0
  25. evalscope/benchmarks/process_bench/__init__.py +0 -0
  26. evalscope/benchmarks/process_bench/critique_template.txt +13 -0
  27. evalscope/benchmarks/process_bench/process_bench_adapter.py +99 -0
  28. evalscope/benchmarks/race/race_adapter.py +12 -16
  29. evalscope/benchmarks/simple_qa/__init__.py +0 -0
  30. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +20 -0
  31. evalscope/benchmarks/super_gpqa/__init__.py +0 -0
  32. evalscope/benchmarks/super_gpqa/five_shot_prompt.txt +89 -0
  33. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +191 -0
  34. evalscope/benchmarks/super_gpqa/utils.py +90 -0
  35. evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +3 -0
  36. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +3 -4
  37. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +7 -14
  38. evalscope/benchmarks/utils.py +43 -0
  39. evalscope/cli/start_app.py +4 -1
  40. evalscope/cli/start_eval.py +4 -3
  41. evalscope/cli/start_perf.py +4 -2
  42. evalscope/collections/evaluator.py +16 -1
  43. evalscope/config.py +13 -3
  44. evalscope/constants.py +7 -0
  45. evalscope/evaluator/evaluator.py +3 -1
  46. evalscope/metrics/__init__.py +2 -1
  47. evalscope/metrics/metrics.py +23 -2
  48. evalscope/metrics/named_metrics.py +1 -0
  49. evalscope/models/__init__.py +2 -1
  50. evalscope/models/base_adapter.py +32 -6
  51. evalscope/models/chat_adapter.py +4 -1
  52. evalscope/models/choice_adapter.py +4 -0
  53. evalscope/models/custom_adapter.py +2 -0
  54. evalscope/models/local_model.py +3 -2
  55. evalscope/models/register.py +28 -0
  56. evalscope/models/server_adapter.py +107 -29
  57. evalscope/perf/__init__.py +0 -1
  58. evalscope/perf/arguments.py +18 -8
  59. evalscope/perf/http_client.py +8 -6
  60. evalscope/perf/plugin/api/openai_api.py +11 -1
  61. evalscope/perf/utils/analysis_result.py +1 -1
  62. evalscope/perf/utils/benchmark_util.py +6 -2
  63. evalscope/report/app.py +15 -8
  64. evalscope/report/combinator.py +2 -2
  65. evalscope/run.py +6 -5
  66. evalscope/third_party/thinkbench/__init__.py +3 -0
  67. evalscope/third_party/thinkbench/eval.py +429 -0
  68. evalscope/third_party/thinkbench/infer.py +130 -0
  69. evalscope/third_party/thinkbench/resources/critique_template.txt +17 -0
  70. evalscope/third_party/thinkbench/resources/reformat_template.txt +31 -0
  71. evalscope/third_party/thinkbench/tools/__init__.py +0 -0
  72. evalscope/third_party/thinkbench/tools/llm.py +48 -0
  73. evalscope/third_party/thinkbench/tools/utils.py +13 -0
  74. evalscope/third_party/toolbench_static/llm/swift_infer.py +50 -20
  75. evalscope/utils/chat_service.py +1 -0
  76. evalscope/utils/filters.py +59 -0
  77. evalscope/utils/logger.py +3 -3
  78. evalscope/utils/model_utils.py +17 -1
  79. evalscope/utils/utils.py +45 -45
  80. evalscope/version.py +2 -2
  81. {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/METADATA +14 -5
  82. {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/RECORD +89 -65
  83. tests/cli/test_collection.py +1 -1
  84. tests/cli/test_run.py +151 -32
  85. /evalscope/benchmarks/{aime24 → aime}/__init__.py +0 -0
  86. {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/LICENSE +0 -0
  87. {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/WHEEL +0 -0
  88. {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/entry_points.txt +0 -0
  89. {evalscope-0.11.0.dist-info → evalscope-0.12.1.dist-info}/top_level.txt +0 -0
@@ -3,9 +3,8 @@ import csv
3
3
  import os
4
4
 
5
5
  from evalscope.benchmarks import Benchmark, DataAdapter
6
- from evalscope.constants import EvalType
6
+ from evalscope.constants import EvalType, OutputType
7
7
  from evalscope.metrics import exact_match
8
- from evalscope.models import MultiChoiceModelAdapter
9
8
  from evalscope.utils import ResponseParser
10
9
  from evalscope.utils.logger import get_logger
11
10
 
@@ -136,8 +135,10 @@ SUBJECT_MAPPING = {
136
135
 
137
136
  @Benchmark.register(
138
137
  name='mmlu',
138
+ pretty_name='MMLU',
139
139
  dataset_id='modelscope/mmlu',
140
- model_adapter=MultiChoiceModelAdapter,
140
+ model_adapter=OutputType.MULTIPLE_CHOICE,
141
+ output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
141
142
  subset_list=SUBSET_LIST,
142
143
  metric_list=['AverageAccuracy'],
143
144
  few_shot_num=5,
@@ -147,8 +148,6 @@ SUBJECT_MAPPING = {
147
148
  )
148
149
  class MMLUAdapter(DataAdapter):
149
150
 
150
- choices = ['A', 'B', 'C', 'D']
151
-
152
151
  def __init__(self, **kwargs):
153
152
 
154
153
  few_shot_num = kwargs.get('few_shot_num', 5)
@@ -159,6 +158,7 @@ class MMLUAdapter(DataAdapter):
159
158
  super().__init__(**kwargs)
160
159
 
161
160
  self.category_map = {k: v[-1] for k, v in SUBJECT_MAPPING.items()}
161
+ self.choices = ['A', 'B', 'C', 'D']
162
162
 
163
163
  def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
164
164
  data_dict = {}
@@ -227,7 +227,7 @@ class MMLUAdapter(DataAdapter):
227
227
 
228
228
  full_prompt = self.prompt_template.format(subset_name=self._format_subject(subset_name), query=query)
229
229
 
230
- return {'data': [full_prompt], 'multi_choices': self.choices, 'system_prompt': self.system_prompt}
230
+ return self.gen_prompt_data(full_prompt)
231
231
 
232
232
  def get_gold_answer(self, input_d: dict) -> str:
233
233
  # Get the gold choice
@@ -245,26 +245,21 @@ class MMLUAdapter(DataAdapter):
245
245
  Returns:
246
246
  The parsed answer. Depending on the dataset. Usually a string for chat.
247
247
  """
248
- if eval_type == EvalType.CHECKPOINT:
248
+ if self.model_adapter == OutputType.MULTIPLE_CHOICE:
249
249
  return result
250
- elif eval_type == EvalType.SERVICE:
251
- return ResponseParser.parse_first_option_with_choices(result, self.choices)
252
- elif eval_type == EvalType.CUSTOM:
253
- return ResponseParser.parse_first_option_with_choices(result, self.choices)
254
250
  else:
255
- raise ValueError(f'Invalid eval_type: {eval_type}')
251
+ return ResponseParser.parse_first_option(result, self.choices)
256
252
 
257
253
  def match(self, gold: str, pred: str) -> float:
258
254
  return exact_match(gold=gold, pred=pred)
259
255
 
260
- @classmethod
261
- def _generate_prompt(cls, input_d: dict, include_answer=True) -> str:
256
+ def _generate_prompt(self, input_d: dict, include_answer=True) -> str:
262
257
 
263
258
  input_choices: list = [input_d['A'], input_d['B'], input_d['C'], input_d['D']]
264
259
 
265
260
  example: str = input_d['input']
266
- for j in range(len(cls.choices)):
267
- example += '\n{}. {}'.format(cls.choices[j], input_choices[j])
261
+ for j in range(len(self.choices)):
262
+ example += '\n{}. {}'.format(self.choices[j], input_choices[j])
268
263
 
269
264
  example += '\nAnswer:'
270
265
  if include_answer:
@@ -2,9 +2,8 @@ from collections import defaultdict
2
2
  from typing import Any, Dict
3
3
 
4
4
  from evalscope.benchmarks import Benchmark, DataAdapter
5
- from evalscope.constants import AnswerKeys, EvalType
5
+ from evalscope.constants import EvalType, OutputType
6
6
  from evalscope.metrics import exact_match
7
- from evalscope.models import ChatGenerationModelAdapter
8
7
  from evalscope.utils.utils import ResponseParser
9
8
 
10
9
  SUBSET_LIST = [
@@ -15,8 +14,10 @@ SUBSET_LIST = [
15
14
 
16
15
  @Benchmark.register(
17
16
  name='mmlu_pro',
18
- dataset_id='modelscope/mmlu-pro',
19
- model_adapter=ChatGenerationModelAdapter,
17
+ pretty_name='MMLU-Pro',
18
+ dataset_id='modelscope/MMLU-Pro',
19
+ model_adapter=OutputType.GENERATION,
20
+ output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
20
21
  subset_list=SUBSET_LIST,
21
22
  metric_list=['AverageAccuracy'],
22
23
  few_shot_num=5,
@@ -35,41 +36,25 @@ class MMLUProAdapter(DataAdapter):
35
36
  def load(self, **kwargs):
36
37
  # default load all data
37
38
  kwargs['subset_list'] = ['default']
38
- return super().load(**kwargs)
39
+ data_dict = super().load(**kwargs)
40
+ return self.reformat_subset(data_dict, subset_key='category')
39
41
 
40
- def gen_prompts(self, data_dict: dict, **kwargs) -> Dict[str, list]:
41
- """
42
- Generate model prompt from raw input, unify the prompt format for MMLU-Pro benchmark.
43
- Return a dict with category as key and list of prompts as value.
44
- """
45
-
46
- data_dict = data_dict['default'] # Only one subset for MMLU-Pro
47
- fewshot_prompts = self.get_fewshot_examples(data_dict)
48
-
49
- # Use the category as key to group the prompts
50
- res_dict = defaultdict(list)
51
- # generate prompts for each test sample
52
- for entry in data_dict[self.eval_split]:
53
- subset_name = entry['category']
54
- if subset_name not in self.subset_list:
55
- continue
56
- prefix = fewshot_prompts[subset_name]
57
- query = prefix + 'Q: ' + entry['question'] + '\n' + \
58
- self.__form_options(entry['options']) + '\n'
59
-
60
- full_prompt = self.prompt_template.format(subset_name=subset_name, query=query)
61
- prompt_d = {'data': [full_prompt], 'system_prompt': self.system_prompt, AnswerKeys.RAW_INPUT: entry}
42
+ def gen_prompt(self, input_d: Dict, subset_name: str, few_shot_list: list, **kwargs) -> Any:
43
+ if self.few_shot_num > 0:
44
+ prefix = self.format_fewshot_examples(few_shot_list)
45
+ else:
46
+ prefix = ''
47
+ query = prefix + 'Q: ' + input_d['question'] + '\n' + \
48
+ self.__form_options(input_d['options']) + '\n'
62
49
 
63
- res_dict[subset_name].append(prompt_d)
64
- return res_dict
50
+ full_prompt = self.prompt_template.format(subset_name=subset_name, query=query)
51
+ return self.gen_prompt_data(full_prompt)
65
52
 
66
- def get_fewshot_examples(self, data_dict: dict):
53
+ def format_fewshot_examples(self, few_shot_list):
67
54
  # load few-shot prompts for each category
68
- prompts = {c: '' for c in self.subset_list}
69
- for index, d in enumerate(data_dict[self.train_split]):
70
- if index >= self.few_shot_num:
71
- break
72
- prompts[d['category']] += 'Q:' + ' ' + d['question'] + '\n' + \
55
+ prompts = ''
56
+ for index, d in enumerate(few_shot_list):
57
+ prompts += 'Q: ' + d['question'] + '\n' + \
73
58
  self.__form_options(d['options']) + '\n' + \
74
59
  d['cot_content'] + '\n\n'
75
60
  return prompts
@@ -104,7 +89,10 @@ class MMLUProAdapter(DataAdapter):
104
89
  Returns:
105
90
  The parsed answer. Depending on the dataset. Usually a string for chat.
106
91
  """
107
- return ResponseParser.parse_first_option(result)
92
+ if self.model_adapter == OutputType.MULTIPLE_CHOICE:
93
+ return result
94
+ else:
95
+ return ResponseParser.parse_first_option(result)
108
96
 
109
97
  def match(self, gold: str, pred: str) -> float:
110
98
  """
File without changes
@@ -0,0 +1,71 @@
1
+ import ast
2
+ from typing import Any
3
+
4
+ from evalscope.benchmarks import Benchmark, DataAdapter
5
+ from evalscope.constants import EvalType, OutputType
6
+ from evalscope.metrics import exact_match
7
+ from evalscope.utils.utils import ResponseParser
8
+
9
+
10
+ @Benchmark.register(
11
+ name='musr',
12
+ pretty_name='MuSR',
13
+ dataset_id='AI-ModelScope/MuSR',
14
+ model_adapter=OutputType.GENERATION,
15
+ output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
16
+ subset_list=['murder_mysteries', 'object_placements', 'team_allocation'],
17
+ metric_list=['AverageAccuracy'],
18
+ few_shot_num=0,
19
+ train_split=None,
20
+ eval_split='test',
21
+ prompt_template=
22
+ '{narrative}\n\n{question}\n\n{choices}\nThink step by step and then finish your answer with \"the answer is (X)\" where X is the correct letter choice.', # noqa: E501
23
+ )
24
+ class MuSRAdapter(DataAdapter):
25
+
26
+ def __init__(self, **kwargs):
27
+ super().__init__(**kwargs)
28
+
29
+ self.choices = ['A', 'B', 'C', 'D', 'E', 'F']
30
+
31
+ def load(self, **kwargs):
32
+ # default load all levels
33
+ kwargs['split_as_subset'] = True
34
+ data_dict = super().load(**kwargs)
35
+ return data_dict
36
+
37
+ def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> Any:
38
+
39
+ choices = self.format_choice(ast.literal_eval(input_d['choices']))
40
+
41
+ full_prompt = self.prompt_template.format(
42
+ narrative=input_d['narrative'], question=input_d['question'], choices=choices)
43
+
44
+ return self.gen_prompt_data(full_prompt)
45
+
46
+ def format_choice(self, options: list):
47
+ option_str = ''
48
+ for opt, choice in zip(options, self.choices):
49
+ option_str += f'({choice}): {opt}\n'
50
+ return option_str
51
+
52
+ def get_gold_answer(self, input_d: dict) -> str:
53
+ """
54
+ Parse the raw input labels (gold).
55
+ """
56
+ return self.choices[input_d['answer_index']]
57
+
58
+ def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
59
+ """
60
+ Parse the predicted result and extract proper answer.
61
+ """
62
+ if self.model_adapter == OutputType.MULTIPLE_CHOICE:
63
+ return result
64
+ else:
65
+ return ResponseParser.parse_first_option(result)
66
+
67
+ def match(self, gold: str, pred: str) -> float:
68
+ """
69
+ Match the gold answer and the predicted answer.
70
+ """
71
+ return exact_match(gold=gold, pred=pred)
File without changes
@@ -0,0 +1,13 @@
1
+ The following is a math problem and a solution (split into paragraphs, enclosed with tags and indexed from 0):
2
+
3
+ [Math Problem]
4
+
5
+ {problem}
6
+
7
+ [Solution]
8
+
9
+ {tagged_response}
10
+
11
+ Your task is to review and critique the solution paragraph by paragraph. Once you identify an error in a paragraph, return the index of the paragraph where the earliest error occurs. Otherwise, return the index of -1 (which typically denotes "not found").
12
+
13
+ Please put your final answer (i.e., the index) in \boxed{{}}.
@@ -0,0 +1,99 @@
1
+ import os
2
+ import re
3
+ from typing import Any, List
4
+
5
+ from evalscope.benchmarks import Benchmark, DataAdapter
6
+ from evalscope.constants import AnswerKeys, EvalType
7
+ from evalscope.metrics import Metric, mean, metric_registry, simple_f1_score
8
+
9
+ cur_path = os.path.dirname(os.path.abspath(__file__))
10
+
11
+
12
+ @Benchmark.register(
13
+ name='process_bench',
14
+ pretty_name='ProcessBench',
15
+ dataset_id='Qwen/ProcessBench',
16
+ subset_list=['gsm8k', 'math', 'olympiadbench', 'omnimath'],
17
+ metric_list=['error_acc', 'correct_acc', 'simple_f1_score'],
18
+ few_shot_num=0,
19
+ train_split=None,
20
+ eval_split='test',
21
+ )
22
+ class ProcessBenchAdapter(DataAdapter):
23
+
24
+ def __init__(self, **kwargs):
25
+ super().__init__(**kwargs)
26
+
27
+ self.prompt_template = open(os.path.join(cur_path, 'critique_template.txt'), encoding='utf-8').read()
28
+
29
+ # register metrics
30
+ metric_registry.register(Metric(name='error_acc', object=mean))
31
+ metric_registry.register(Metric(name='correct_acc', object=mean))
32
+ metric_registry.register(Metric(name='simple_f1_score', object=simple_f1_score))
33
+
34
+ def load(self, **kwargs):
35
+ # default load all levels
36
+ kwargs['split_as_subset'] = True
37
+ data_dict = super().load(**kwargs)
38
+ return data_dict
39
+
40
+ def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> Any:
41
+
42
+ problem = input_d['problem']
43
+ steps = input_d['steps']
44
+ tagged_response = ''
45
+ for sdx, step in enumerate(steps):
46
+ tagged_response += f'<paragraph_{sdx}>\n{step}\n</paragraph_{sdx}>\n\n'
47
+ tagged_response = tagged_response.strip()
48
+
49
+ full_prompt = self.prompt_template.format(problem=problem, tagged_response=tagged_response)
50
+
51
+ return self.gen_prompt_data(full_prompt)
52
+
53
+ def get_gold_answer(self, input_d: dict) -> str:
54
+ """
55
+ Parse the raw input labels (gold).
56
+ """
57
+ return int(input_d['label'])
58
+
59
+ def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = EvalType.CHECKPOINT) -> str:
60
+ """
61
+ Parse the predicted result and extract proper answer.
62
+ """
63
+ pred = ProcessBenchAdapter.extract_answer(result)
64
+ try:
65
+ pred = int(pred)
66
+ except Exception:
67
+ pred = None
68
+ return pred
69
+
70
+ def match(self, gold: int, pred: int) -> float:
71
+ """
72
+ Match the gold answer and the predicted answer.
73
+ """
74
+ return gold == pred
75
+
76
+ def compute_metric(self, review_res_list: list, **kwargs) -> List[dict]:
77
+ reviews_list = kwargs['reviews_list']
78
+ error_data = []
79
+ correct_data = []
80
+ for res, raw in zip(review_res_list, reviews_list):
81
+ if raw[AnswerKeys.RAW_INPUT]['label'] == -1:
82
+ correct_data.append(res)
83
+ else:
84
+ error_data.append(res)
85
+ data = {}
86
+ if len(correct_data) != 0:
87
+ data.update({'correct_acc': correct_data})
88
+ if len(error_data) != 0:
89
+ data.update({'error_acc': error_data})
90
+ data.update({'simple_f1_score': (correct_data, error_data)})
91
+ return super().compute_metric(data)
92
+
93
+ @staticmethod
94
+ def extract_answer(solution_text: str):
95
+ boxed_pattern = r'\\boxed\{([^}]*)\}'
96
+ matches = re.findall(boxed_pattern, solution_text)
97
+ if matches:
98
+ return matches[-1].strip()
99
+ return None
@@ -3,9 +3,8 @@
3
3
  import os
4
4
 
5
5
  from evalscope.benchmarks import Benchmark, DataAdapter
6
- from evalscope.constants import EvalType
6
+ from evalscope.constants import EvalType, OutputType
7
7
  from evalscope.metrics import exact_match
8
- from evalscope.models import MultiChoiceModelAdapter
9
8
  from evalscope.utils import ResponseParser
10
9
  from evalscope.utils.io_utils import jsonl_to_list
11
10
  from evalscope.utils.logger import get_logger
@@ -17,8 +16,10 @@ logger = get_logger()
17
16
 
18
17
  @Benchmark.register(
19
18
  name='race',
19
+ pretty_name='RACE',
20
20
  dataset_id='modelscope/race',
21
- model_adapter=MultiChoiceModelAdapter,
21
+ model_adapter=OutputType.MULTIPLE_CHOICE,
22
+ output_types=[OutputType.MULTIPLE_CHOICE, OutputType.GENERATION],
22
23
  subset_list=['high', 'middle'],
23
24
  metric_list=['AverageAccuracy'],
24
25
  few_shot_num=3,
@@ -27,8 +28,6 @@ logger = get_logger()
27
28
  )
28
29
  class RACEAdapter(DataAdapter):
29
30
 
30
- choices = ['A', 'B', 'C', 'D']
31
-
32
31
  def __init__(self, **kwargs):
33
32
  few_shot_num = kwargs.get('few_shot_num', 3)
34
33
  if few_shot_num > 3:
@@ -37,6 +36,8 @@ class RACEAdapter(DataAdapter):
37
36
 
38
37
  super().__init__(**kwargs)
39
38
 
39
+ self.choices = ['A', 'B', 'C', 'D']
40
+
40
41
  def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
41
42
  data_dict = {}
42
43
  for subset_name in subset_list:
@@ -82,7 +83,7 @@ class RACEAdapter(DataAdapter):
82
83
 
83
84
  full_prompt: str = context.strip() + self._generate_prompt(input_d=input_d, include_answer=False)
84
85
 
85
- return {'data': [full_prompt], 'multi_choices': self.choices, 'system_prompt': self.system_prompt}
86
+ return self.gen_prompt_data(full_prompt)
86
87
 
87
88
  def get_gold_answer(self, input_d: dict) -> str:
88
89
  # Get the gold choice
@@ -100,26 +101,21 @@ class RACEAdapter(DataAdapter):
100
101
  Returns:
101
102
  The parsed answer. Depending on the dataset. Usually a string for chat.
102
103
  """
103
- if eval_type == EvalType.CHECKPOINT:
104
+ if self.model_adapter == OutputType.MULTIPLE_CHOICE:
104
105
  return result
105
- elif eval_type == EvalType.SERVICE:
106
- return ResponseParser.parse_first_option_with_choices(result, self.choices) # TODO: to be checked !
107
- elif eval_type == EvalType.CUSTOM:
108
- return ResponseParser.parse_first_option_with_choices(result, self.choices) # TODO: to be checked !
109
106
  else:
110
- raise ValueError(f'Unknown eval_type: {eval_type}')
107
+ return ResponseParser.parse_first_option_with_choices(result, self.choices)
111
108
 
112
109
  def match(self, gold: str, pred: str) -> float:
113
110
  return exact_match(gold=gold, pred=pred)
114
111
 
115
- @classmethod
116
- def _generate_prompt(cls, input_d: dict, include_answer=True) -> str:
112
+ def _generate_prompt(self, input_d: dict, include_answer=True) -> str:
117
113
 
118
114
  input_choices: list = input_d['options']
119
115
 
120
116
  example: str = 'Article:\n{}\nQuestion:\n{}'.format(input_d['article'], input_d['question'])
121
- for j in range(len(cls.choices)):
122
- example += '\n{}. {}'.format(cls.choices[j], input_choices[j])
117
+ for j in range(len(self.choices)):
118
+ example += '\n{}. {}'.format(self.choices[j], input_choices[j])
123
119
 
124
120
  example += '\nAnswer:'
125
121
  if include_answer:
File without changes
@@ -0,0 +1,20 @@
1
+ from evalscope.benchmarks import Benchmark, DataAdapter
2
+ from evalscope.utils.logger import get_logger
3
+
4
+ # flake8: noqa
5
+
6
+ logger = get_logger()
7
+
8
+
9
+ @Benchmark.register(
10
+ name='simple_qa',
11
+ pretty_name='SimpleQA',
12
+ dataset_id='AI-ModelScope/SimpleQA',
13
+ metric_list=['AverageAccuracy'],
14
+ few_shot_num=0,
15
+ train_split=None,
16
+ eval_split='test')
17
+ class SimpleQAAdapter(DataAdapter):
18
+
19
+ def __init__(self, *args, **kwargs):
20
+ super().__init__(*args, **kwargs)
File without changes
@@ -0,0 +1,89 @@
1
+ Answer the following multiple choice question. There is only one correct answer. The last line of your response should be in the format 'Answer: $LETTER' (without quotes), where LETTER is one of A, B, C, D, E, F, G, H, I, or J.
2
+
3
+ Question:
4
+ A refracting telescope consists of two converging lenses separated by 100 cm. The eye-piece lens has a focal length of 20 cm. The angular magnification of the telescope is
5
+ A) 10
6
+ B) 40
7
+ C) 6
8
+ D) 25
9
+ E) 15
10
+ F) 50
11
+ G) 30
12
+ H) 4
13
+ I) 5
14
+ J) 20
15
+
16
+ Answer: Let's think step by step. In a refracting telescope, if both lenses are converging, the focus of both lenses must be between the two lenses, and thus the focal lengths of the two lenses must add up to their separation. Since the focal length of one lens is 20 cm, the focal length of the other must be 80 cm. The magnification is the ratio of these two focal lengths, or 4.
17
+ Answer: H.
18
+
19
+ Question:
20
+ Say the pupil of your eye has a diameter of 5 mm and you have a telescope with an aperture of 50 cm. How much more light can the telescope gather than your eye?
21
+ A) 1000 times more
22
+ B) 50 times more
23
+ C) 5000 times more
24
+ D) 500 times more
25
+ E) 10000 times more
26
+ F) 20000 times more
27
+ G) 2000 times more
28
+ H) 100 times more
29
+ I) 10 times more
30
+ J) N/A
31
+
32
+ Answer: Let's think step by step. The amount of light a telescope can gather compared to the human eye is proportional to the area of its apertures. The area of a circle is given by the formula $A = \pi \left(\frac{{D}}{{2}}\right)^2$, where $D$ is the diameter. Therefore, the relative light-gathering power is calculated as:
33
+ \[
34
+ \frac{{\left(\frac{{50 \text{{ cm}}}}{{2}}\right)^2}}{{\left(\frac{{5 \text{{ mm}}}}{{2}}\right)^2}} = \frac{{\left(\frac{{50 \text{{ cm}}}}{{0.1 \text{{ cm}}}}\right)^2}}{{\left(\frac{{5 \text{{ mm}}}}{{0.1 \text{{ cm}}}}\right)^2}} = \frac{{500^2}}{{5^2}} = 10000.
35
+ \]
36
+ Answer: E.
37
+
38
+ Question:
39
+ Where do most short-period comets come from and how do we know?
40
+ A) The Kuiper belt; short period comets tend to be in the plane of the solar system like the Kuiper belt.
41
+ B) The asteroid belt; short period comets tend to come from random directions indicating a spherical distribution of comets called the asteroid belt.
42
+ C) The asteroid belt; short period comets tend to be in the plane of the solar system just like the asteroid belt.
43
+ D) The Oort cloud; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the Oort cloud.
44
+ E) The Oort Cloud; short period comets tend to come from random directions indicating a spherical distribution of comets called the Oort Cloud.
45
+ F) The Oort cloud; short period comets tend to be in the plane of the solar system just like the Oort cloud.
46
+ G) The asteroid belt; short period comets have orbital periods similar to asteroids like Vesta and are found in the plane of the solar system just like the asteroid belt.
47
+ Answer: Let's think step by step. Most short-period comets originate from the Kuiper belt. This is deduced from the observation that these comets tend to follow orbits that lie in the plane of the solar system, similar to the distribution of objects in the Kuiper belt itself. Thus, the alignment of these cometary orbits with the ecliptic plane points to their Kuiper belt origin.
48
+ Answer: A.
49
+
50
+ Question:
51
+ Colors in a soap bubble result from light
52
+ A) dispersion
53
+ B) deflection
54
+ C) refraction
55
+ D) reflection
56
+ E) interference
57
+ F) converted to a different frequency
58
+ G) polarization
59
+ H) absorption
60
+ I) diffraction
61
+ J) transmission
62
+
63
+ Answer: Let's think step by step. The colorful patterns observed in a soap bubble are caused by the phenomenon of light interference. This occurs when light waves bounce between the two surfaces of the soap film, combining constructively or destructively based on their phase differences and the varying thickness of the film. These interactions result in vibrant color patterns due to variations in the intensity of different wavelengths of light.
64
+ Answer: E.
65
+
66
+ Question:
67
+ A microwave oven is connected to an outlet, 120 V, and draws a current of 2 amps. At what rate is energy being used by the microwave oven?
68
+ A) 240 W
69
+ B) 120 W
70
+ C) 10 W
71
+ D) 480 W
72
+ E) 360 W
73
+ F) 200 W
74
+ G) 30 W
75
+ H) 150 W
76
+ I) 60 W
77
+ J) 300 W
78
+
79
+ Answer: Let's think step by step. The rate of energy usage, known as power, in an electrical circuit is calculated by the product of voltage and current. For a microwave oven connected to a 120 V outlet and drawing a current of 2 amps, the power consumption can be calculated as follows:
80
+ \[
81
+ \text{{Power}} = \text{{Voltage}} \times \text{{Current}} = 120 \, \text{{V}} \times 2 \, \text{{A}} = 240 \, \text{{W}}.
82
+ \]
83
+ Therefore, the microwave oven uses energy at a rate of 240 watts.
84
+ Answer: A.
85
+
86
+ Question:
87
+ {query}
88
+
89
+ Answer: Let's think step by step.