evalscope 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. evalscope/__init__.py +3 -0
  2. evalscope/backend/__init__.py +3 -0
  3. evalscope/backend/base.py +27 -0
  4. evalscope/backend/opencompass/__init__.py +3 -0
  5. evalscope/backend/opencompass/api_meta_template.py +64 -0
  6. evalscope/backend/opencompass/backend_manager.py +247 -0
  7. evalscope/backend/opencompass/tasks/__init__.py +1 -0
  8. evalscope/backend/opencompass/tasks/eval_api.py +30 -0
  9. evalscope/backend/opencompass/tasks/eval_datasets.py +71 -0
  10. evalscope/backend/vlm_eval_kit/__init__.py +1 -0
  11. evalscope/backend/vlm_eval_kit/backend_manager.py +153 -0
  12. evalscope/benchmarks/__init__.py +4 -0
  13. evalscope/benchmarks/arc/__init__.py +5 -0
  14. evalscope/benchmarks/arc/ai2_arc.py +148 -0
  15. evalscope/benchmarks/arc/arc_adapter.py +231 -0
  16. evalscope/benchmarks/bbh/__init__.py +6 -0
  17. evalscope/benchmarks/bbh/bbh_adapter.py +308 -0
  18. evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +23 -0
  19. evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +25 -0
  20. evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +33 -0
  21. evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +37 -0
  22. evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +72 -0
  23. evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +44 -0
  24. evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +78 -0
  25. evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +28 -0
  26. evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +37 -0
  27. evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +37 -0
  28. evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +37 -0
  29. evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +42 -0
  30. evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +25 -0
  31. evalscope/benchmarks/bbh/cot_prompts/navigate.txt +43 -0
  32. evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +37 -0
  33. evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +41 -0
  34. evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +63 -0
  35. evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +44 -0
  36. evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +40 -0
  37. evalscope/benchmarks/bbh/cot_prompts/snarks.txt +30 -0
  38. evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +10 -0
  39. evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +77 -0
  40. evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +40 -0
  41. evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +40 -0
  42. evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +40 -0
  43. evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +28 -0
  44. evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +17 -0
  45. evalscope/benchmarks/benchmark.py +65 -0
  46. evalscope/benchmarks/ceval/__init__.py +5 -0
  47. evalscope/benchmarks/ceval/ceval_adapter.py +340 -0
  48. evalscope/benchmarks/ceval/ceval_exam.py +159 -0
  49. evalscope/benchmarks/cmmlu/__init__.py +5 -0
  50. evalscope/benchmarks/cmmlu/cmmlu.py +166 -0
  51. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +369 -0
  52. evalscope/benchmarks/competition_math/__init__.py +5 -0
  53. evalscope/benchmarks/competition_math/competition_math.py +88 -0
  54. evalscope/benchmarks/competition_math/competition_math_adapter.py +470 -0
  55. evalscope/benchmarks/data_adapter.py +263 -0
  56. evalscope/benchmarks/general_qa/__init__.py +5 -0
  57. evalscope/benchmarks/general_qa/general_qa_adapter.py +186 -0
  58. evalscope/benchmarks/gsm8k/__init__.py +5 -0
  59. evalscope/benchmarks/gsm8k/gsm8k.py +127 -0
  60. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +236 -0
  61. evalscope/benchmarks/hellaswag/__init__.py +5 -0
  62. evalscope/benchmarks/hellaswag/hellaswag.py +116 -0
  63. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +222 -0
  64. evalscope/benchmarks/humaneval/__init__.py +5 -0
  65. evalscope/benchmarks/humaneval/humaneval.py +82 -0
  66. evalscope/benchmarks/humaneval/humaneval_adapter.py +21 -0
  67. evalscope/benchmarks/mmlu/__init__.py +5 -0
  68. evalscope/benchmarks/mmlu/mmlu.py +174 -0
  69. evalscope/benchmarks/mmlu/mmlu_adapter.py +375 -0
  70. evalscope/benchmarks/race/__init__.py +5 -0
  71. evalscope/benchmarks/race/race.py +118 -0
  72. evalscope/benchmarks/race/race_adapter.py +229 -0
  73. evalscope/benchmarks/trivia_qa/__init__.py +5 -0
  74. evalscope/benchmarks/trivia_qa/trivia_qa.py +104 -0
  75. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +207 -0
  76. evalscope/benchmarks/truthful_qa/__init__.py +5 -0
  77. evalscope/benchmarks/truthful_qa/truthful_qa.py +167 -0
  78. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +351 -0
  79. evalscope/cache.py +98 -0
  80. evalscope/cli/__init__.py +1 -0
  81. evalscope/cli/base.py +20 -0
  82. evalscope/cli/cli.py +26 -0
  83. evalscope/cli/start_perf.py +37 -0
  84. evalscope/cli/start_server.py +138 -0
  85. evalscope/config.py +165 -0
  86. evalscope/constants.py +150 -0
  87. evalscope/evaluator/__init__.py +3 -0
  88. evalscope/evaluator/evaluator.py +689 -0
  89. evalscope/evaluator/rating_eval.py +178 -0
  90. evalscope/evaluator/reviewer/__init__.py +1 -0
  91. evalscope/evaluator/reviewer/auto_reviewer.py +411 -0
  92. evalscope/metrics/__init__.py +1 -0
  93. evalscope/metrics/bundled_rouge_score/__init__.py +14 -0
  94. evalscope/metrics/bundled_rouge_score/rouge_scorer.py +342 -0
  95. evalscope/metrics/code_metric.py +104 -0
  96. evalscope/metrics/math_accuracy.py +60 -0
  97. evalscope/metrics/metrics.py +405 -0
  98. evalscope/metrics/rouge_metric.py +129 -0
  99. evalscope/models/__init__.py +4 -0
  100. evalscope/models/custom/__init__.py +4 -0
  101. evalscope/models/custom/custom_model.py +53 -0
  102. evalscope/models/dummy_chat_model.py +50 -0
  103. evalscope/models/model.py +88 -0
  104. evalscope/models/model_adapter.py +586 -0
  105. evalscope/models/openai_model.py +103 -0
  106. evalscope/models/template.py +1446 -0
  107. evalscope/perf/__init__.py +0 -0
  108. evalscope/perf/_logging.py +32 -0
  109. evalscope/perf/api_plugin_base.py +60 -0
  110. evalscope/perf/custom_api.py +87 -0
  111. evalscope/perf/dashscope_api.py +84 -0
  112. evalscope/perf/dataset_plugin_base.py +64 -0
  113. evalscope/perf/datasets/__init__.py +0 -0
  114. evalscope/perf/datasets/line_by_line.py +18 -0
  115. evalscope/perf/datasets/longalpaca_12k.py +20 -0
  116. evalscope/perf/datasets/openqa.py +22 -0
  117. evalscope/perf/how_to_analysis_result.py +24 -0
  118. evalscope/perf/http_client.py +756 -0
  119. evalscope/perf/openai_api.py +130 -0
  120. evalscope/perf/plugin_registry.py +35 -0
  121. evalscope/perf/query_parameters.py +42 -0
  122. evalscope/perf/server_sent_event.py +43 -0
  123. evalscope/preprocess/__init__.py +1 -0
  124. evalscope/preprocess/tokenizers/__init__.py +0 -0
  125. evalscope/preprocess/tokenizers/gpt2_tokenizer.py +221 -0
  126. evalscope/registry/__init__.py +1 -0
  127. evalscope/registry/tasks/arc.yaml +29 -0
  128. evalscope/registry/tasks/bbh.yaml +27 -0
  129. evalscope/registry/tasks/bbh_mini.yaml +27 -0
  130. evalscope/registry/tasks/ceval.yaml +27 -0
  131. evalscope/registry/tasks/ceval_mini.yaml +27 -0
  132. evalscope/registry/tasks/cmmlu.yaml +27 -0
  133. evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +28 -0
  134. evalscope/registry/tasks/general_qa.yaml +27 -0
  135. evalscope/registry/tasks/gsm8k.yaml +29 -0
  136. evalscope/registry/tasks/mmlu.yaml +29 -0
  137. evalscope/registry/tasks/mmlu_mini.yaml +27 -0
  138. evalscope/run.py +404 -0
  139. evalscope/run_arena.py +204 -0
  140. evalscope/run_ms.py +140 -0
  141. evalscope/summarizer.py +144 -0
  142. evalscope/third_party/__init__.py +1 -0
  143. evalscope/third_party/toolbench_static/__init__.py +3 -0
  144. evalscope/third_party/toolbench_static/eval.py +219 -0
  145. evalscope/third_party/toolbench_static/infer.py +278 -0
  146. evalscope/third_party/toolbench_static/llm/__init__.py +1 -0
  147. evalscope/third_party/toolbench_static/llm/swift_infer.py +45 -0
  148. evalscope/third_party/toolbench_static/toolbench_static.py +50 -0
  149. evalscope/tools/__init__.py +1 -0
  150. evalscope/tools/combine_reports.py +140 -0
  151. evalscope/tools/gen_mmlu_subject_mapping.py +90 -0
  152. evalscope/tools/rewrite_eval_results.py +95 -0
  153. evalscope/utils/__init__.py +4 -0
  154. evalscope/utils/arena_utils.py +247 -0
  155. evalscope/utils/completion_parsers.py +87 -0
  156. evalscope/utils/logger.py +64 -0
  157. evalscope/utils/task_cfg_parser.py +10 -0
  158. evalscope/utils/task_utils.py +19 -0
  159. evalscope/utils/utils.py +625 -0
  160. evalscope/version.py +4 -0
  161. evalscope-0.5.0.dist-info/METADATA +566 -0
  162. evalscope-0.5.0.dist-info/RECORD +165 -0
  163. evalscope-0.5.0.dist-info/WHEEL +5 -0
  164. evalscope-0.5.0.dist-info/entry_points.txt +3 -0
  165. evalscope-0.5.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,148 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+ # Copyright (c) Allen Institute, and its affiliates.
3
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
+
5
+ """AI2 ARC (Abstraction and Reasoning Corpus) for General Artificial Intelligence Benchmark."""
6
+
7
+ """AUTO GENERATED, DO NOT EDIT"""
8
+
9
+ import json
10
+ import os
11
+ import datasets
12
+
13
+ # flake8: noqa
14
+
15
+
16
+ _CITATION = """\
17
+ @article{allenai:arc,
18
+ author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
19
+ Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
20
+ title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
21
+ journal = {arXiv:1803.05457v1},
22
+ year = {2018},
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ A new dataset of 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in
28
+ advanced question-answering. The dataset is partitioned into a Challenge Set and an Easy Set, where the former contains
29
+ only questions answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. We are also
30
+ including a corpus of over 14 million science sentences relevant to the task,
31
+ and an implementation of three neural baseline models for this dataset. We pose ARC as a challenge to the community.
32
+
33
+ ARC-Easy:
34
+ train: 2251
35
+ test: 2376
36
+ validation: 570
37
+
38
+ ARC-Challenge:
39
+ train: 1119
40
+ test: 1172
41
+ validation: 299
42
+ """
43
+
44
+ _URL = 'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/arc/ARC-V1-Feb2018.zip'
45
+
46
+ # tasks: ['ARC-Easy', 'ARC-Challenge']
47
+
48
+
49
+ class Ai2ArcConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for Ai2ARC."""
51
+
52
+ def __init__(self, **kwargs):
53
+ """BuilderConfig for Ai2Arc.
54
+
55
+ Args:
56
+ **kwargs: keyword arguments forwarded to super.
57
+ """
58
+ super(Ai2ArcConfig, self).__init__(version=datasets.Version('1.0.0', ''), **kwargs)
59
+
60
+
61
+ class Ai2Arc(datasets.GeneratorBasedBuilder):
62
+ """
63
+ The AI2 Reasoning Challenge (ARC) dataset.
64
+ Subset: ARC-Easy, ARC-Challenge.
65
+ """
66
+
67
+ VERSION = datasets.Version('1.0.0')
68
+ BUILDER_CONFIGS = [
69
+ Ai2ArcConfig(
70
+ name='ARC-Challenge',
71
+ description="""\
72
+ Challenge Set of 2590 “hard” questions (those that both a retrieval and a co-occurrence method fail to answer correctly)
73
+ """,
74
+ ),
75
+ Ai2ArcConfig(
76
+ name='ARC-Easy',
77
+ description="""\
78
+ Easy Set of 5197 questions
79
+ """,
80
+ ),
81
+ ]
82
+
83
+ def _info(self):
84
+ return datasets.DatasetInfo(
85
+ # This is the description that will appear on the datasets page.
86
+ description=_DESCRIPTION,
87
+ # datasets.features.FeatureConnectors
88
+ features=datasets.Features(
89
+ {
90
+ 'id': datasets.Value('string'),
91
+ 'question': datasets.Value('string'),
92
+ 'choices': datasets.features.Sequence(
93
+ {'text': datasets.Value('string'), 'label': datasets.Value('string')}
94
+ ),
95
+ 'answerKey': datasets.Value('string')
96
+ # These are the features of your dataset like images, labels ...
97
+ }
98
+ ),
99
+ # If there's a common (input, target) tuple from the features,
100
+ # specify them here. They'll be used if as_supervised=True in
101
+ # builder.as_dataset.
102
+ supervised_keys=None,
103
+ # Homepage of the dataset for documentation
104
+ homepage='https://allenai.org/data/arc',
105
+ citation=_CITATION,
106
+ )
107
+
108
+ def _split_generators(self, dl_manager):
109
+ """Returns SplitGenerators."""
110
+ # dl_manager is a datasets.download.DownloadManager that can be used to
111
+ # download and extract URLs
112
+ dl_dir = dl_manager.download_and_extract(_URL)
113
+ data_dir = os.path.join(dl_dir, 'ARC-V1-Feb2018-2')
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ # These kwargs will be passed to _generate_examples
118
+ gen_kwargs={'filepath': os.path.join(data_dir, self.config.name, self.config.name + '-Train.jsonl')},
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST,
122
+ # These kwargs will be passed to _generate_examples
123
+ gen_kwargs={'filepath': os.path.join(data_dir, self.config.name, self.config.name + '-Test.jsonl')},
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.VALIDATION,
127
+ # These kwargs will be passed to _generate_examples
128
+ gen_kwargs={'filepath': os.path.join(data_dir, self.config.name, self.config.name + '-Dev.jsonl')},
129
+ ),
130
+ ]
131
+
132
+ def _generate_examples(self, filepath):
133
+ """Yields examples."""
134
+ with open(filepath, encoding='utf-8') as f:
135
+ for row in f:
136
+ data = json.loads(row)
137
+ answerkey = data['answerKey']
138
+ id_ = data['id']
139
+ question = data['question']['stem']
140
+ choices = data['question']['choices']
141
+ text_choices = [choice['text'] for choice in choices]
142
+ label_choices = [choice['label'] for choice in choices]
143
+ yield id_, {
144
+ 'id': id_,
145
+ 'answerKey': answerkey,
146
+ 'question': question,
147
+ 'choices': {'text': text_choices, 'label': label_choices},
148
+ }
@@ -0,0 +1,231 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ import os
4
+ import json
5
+ from evalscope.benchmarks.data_adapter import DataAdapter
6
+ from evalscope.metrics.metrics import exact_match, weighted_mean
7
+ from evalscope.utils import normalize_score, ResponseParser
8
+ from evalscope.utils.logger import get_logger
9
+
10
+ # flake8: noqa
11
+
12
+ logger = get_logger()
13
+
14
+ DATASET_ID = 'modelscope/ai2_arc'
15
+
16
+ # task_list = ['ARC-Easy', 'ARC-Challenge']
17
+ SUBSET_LIST = ['ARC-Challenge']
18
+
19
+
20
+ class ARCAdapter(DataAdapter):
21
+
22
+ choices = ['A', 'B', 'C', 'D']
23
+
24
+ def __init__(self,
25
+ subset_list: list = None,
26
+ metric_list: list = None,
27
+ few_shot_num: int = None,
28
+ train_split: str = 'train',
29
+ eval_split: str = 'test',
30
+ prompt_template: str = '',
31
+ **kwargs):
32
+
33
+ if subset_list is None:
34
+ subset_list = SUBSET_LIST
35
+
36
+ if metric_list is None:
37
+ metric_list = [{'name': 'WeightedAverageAccuracy', 'object': weighted_mean}]
38
+
39
+ if few_shot_num is None:
40
+ # Use 0-shot by default
41
+ logger.info(f'Set 0-shot examples by system for ARC.')
42
+ few_shot_num = 0
43
+
44
+ if few_shot_num != 0:
45
+ logger.warning(f'few_shot_num is recommended to set 0 for ARC, got {few_shot_num}.')
46
+
47
+ super().__init__(subset_list=subset_list,
48
+ metric_list=metric_list,
49
+ few_shot_num=few_shot_num,
50
+ train_split=train_split,
51
+ eval_split=eval_split,
52
+ prompt_template=prompt_template,
53
+ **kwargs)
54
+
55
+ def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
56
+ """
57
+ Load the dataset from local disk.
58
+
59
+ dataset_name_or_path: str, the dataset id or path. e.g. 'arc'
60
+ subset_list: list, the subset list to load. e.g. ['ARC-Easy', 'ARC-Challenge']
61
+ work_dir: str, the local root data directory. e.g. '/path/to/data'
62
+ kwargs: dict, other arguments.
63
+ """
64
+ data_dict = {}
65
+ for subset_name in subset_list:
66
+ if os.path.exists(dataset_name_or_path):
67
+ subset_path = os.path.join(dataset_name_or_path, subset_name)
68
+ else:
69
+ subset_path = os.path.join(work_dir, dataset_name_or_path, subset_name)
70
+ for split_name in ['Train', 'Test']:
71
+ split_path = os.path.join(subset_path, f'{subset_name}-{split_name}.jsonl')
72
+ if os.path.exists(split_path):
73
+ with open(split_path, 'r', errors='ignore') as in_f:
74
+ rows = []
75
+ for line in in_f:
76
+ item = json.loads(line.strip())
77
+ raw_choices = item['question']['choices']
78
+ rows.append({
79
+ 'id': item['id'],
80
+ 'question': item['question']['stem'],
81
+ 'choices': {'text': [d['text'] for d in raw_choices],
82
+ 'label': [d['label'] for d in raw_choices]},
83
+ 'answerKey': item['answerKey'],
84
+ })
85
+
86
+ if subset_name in data_dict:
87
+ data_dict[subset_name].update({split_name.lower(): rows})
88
+ else:
89
+ data_dict[subset_name] = {split_name.lower(): rows}
90
+
91
+ return data_dict
92
+
93
+ def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
94
+ """
95
+ Generate model prompt from raw data, unify the prompt format for ARC benchmark.
96
+
97
+ Args:
98
+ input_d (dict): The raw input. A single data format of the ARC:
99
+
100
+ {
101
+ 'id': 'Mercury_7220990',
102
+ 'question': 'Which factor will most likely cause a person to develop a fever?',
103
+ 'choices':
104
+ {
105
+ 'text':['a leg muscle relaxing after exercise',
106
+ 'a bacterial population in the bloodstream',
107
+ 'several viral particles on the skin',
108
+ 'carbohydrates being digested in the stomach'],
109
+ 'label': ['A', 'B', 'C', 'D']
110
+ },
111
+ 'answerKey': 'B'
112
+ }
113
+
114
+ Returns:
115
+ {'data': ['xxx'], 'multi_choices': ['A', 'B', 'C', 'D']}
116
+ """
117
+ few_shot_prompts = [self._generate_prompt(input_d=sample, include_answer=True) for sample in few_shot_list]
118
+ context: str = '\n'.join(few_shot_prompts)
119
+
120
+ context = f'{self.prompt_template}\n{context}' if self.prompt_template else context
121
+
122
+ # context = f'The following are multiple choice questions, please output correct answer in the form of A or B or C or D, do not output explanation:\n {context}'
123
+ full_prompt: str = context + self._generate_prompt(input_d=input_d, include_answer=False)
124
+
125
+ return {'data': [full_prompt], 'multi_choices': self.choices}
126
+
127
+ def get_gold_answer(self, input_d: dict) -> str:
128
+ # Get the gold choice
129
+ return input_d.get('answerKey', '')
130
+
131
+ def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
132
+ """
133
+ Parse the model output to get the answer. Could be the best choice index.
134
+
135
+ Args:
136
+ result: Predicted answer from the model. Usually a string for chat.
137
+ raw_input_d (dict): The raw input. Depending on the dataset.
138
+ eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
139
+
140
+ Returns:
141
+ The parsed answer. Depending on the dataset. Usually a string for chat.
142
+ """
143
+ if eval_type == 'checkpoint':
144
+ return result
145
+ elif eval_type == 'service':
146
+ return ResponseParser.parse_first_option_with_choices(text=result, options=self.choices) # TODO: to be checked !
147
+ elif eval_type == 'custom':
148
+ return ResponseParser.parse_first_option_with_choices(text=result, options=self.choices) # TODO: to be checked !
149
+ else:
150
+ raise ValueError(f'Invalid eval_type: {eval_type}')
151
+
152
+ def match(self, gold: str, pred: str) -> float:
153
+ return exact_match(gold=gold, pred=pred)
154
+
155
+ def compute_metric(self, review_res_list: list) -> float:
156
+ """
157
+ Compute evaluation result by specific metric.
158
+
159
+ Args:
160
+ review_res_list: review score list, e.g. [0, 1, 1, 0, ...]
161
+
162
+ Returns:
163
+ The metric score.
164
+ """
165
+ items = [(score, 1.0) for score in review_res_list]
166
+ return weighted_mean(items)
167
+
168
+ def gen_report(self, subset_score_map: dict, report_name: str = None) -> dict:
169
+ """
170
+ Generate the report for the model output.
171
+
172
+ Args:
173
+ subset_score_map: The subset-score mapping. e.g. {subset_name: (score, num), ...}
174
+ report_name: The user-defined report name.
175
+
176
+ Returns: A dict of metric calculation results. The format is like:
177
+ {
178
+ "name":"ARC",
179
+ "metric":"WeightedAverageAccuracy",
180
+ "score":0.3389,
181
+ "category":[
182
+ {
183
+ "name":"DEFAULT",
184
+ "score":0.4128,
185
+ "subset":[
186
+ {
187
+ "name":"ARC-Easy",
188
+ "score":0.5632
189
+ },
190
+ {
191
+ "name":"ARC-Challenge",
192
+ "score":0.3157
193
+ }
194
+ ]
195
+ }
196
+ ],
197
+ "total_num":7800
198
+ }
199
+ """
200
+ total_num: int = sum([num for _, num in subset_score_map.values()])
201
+ weighted_avg_acc: float = sum([score * num for score, num in subset_score_map.values()]) / total_num
202
+ weighted_avg_acc = normalize_score(score=weighted_avg_acc)
203
+ cate_avg_list = [{'name': subset_name, 'score': normalize_score(score=score)} for subset_name, (score, _) in subset_score_map.items()]
204
+
205
+ category_d = dict(name='DEFAULT',
206
+ score=weighted_avg_acc,
207
+ subset=cate_avg_list)
208
+
209
+ res_map = dict(name=report_name or 'arc',
210
+ metric=self.metric_list[0]['name'],
211
+ score=weighted_avg_acc,
212
+ category=[category_d],
213
+ total_num=total_num)
214
+
215
+ return res_map
216
+
217
+ @classmethod
218
+ def _generate_prompt(cls, input_d: dict, include_answer=True) -> str:
219
+
220
+ example: str = input_d['question']
221
+
222
+ choices_texts: list = input_d['choices']['text']
223
+ choices_labels: list = input_d['choices']['label']
224
+ choices_prompts: str = '\n'.join([label + '. ' + text for text, label in zip(choices_texts, choices_labels)])
225
+ example += '\n' + choices_prompts
226
+
227
+ example += '\nAnswer:'
228
+ if include_answer:
229
+ example += ' {}\n\n'.format(input_d['answerKey'])
230
+
231
+ return example
@@ -0,0 +1,6 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+
4
+ from evalscope.benchmarks.bbh.bbh_adapter import DATASET_ID, SUBSET_LIST
5
+ from evalscope.benchmarks.bbh.bbh_adapter import BBHAdapter as DataAdapterClass
6
+ from evalscope.models.model_adapter import ChatGenerationModelAdapter as ModelAdapterClass # noqa
@@ -0,0 +1,308 @@
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+
3
+ import os
4
+ import re
5
+ import random
6
+ import json
7
+
8
+ from evalscope.benchmarks.data_adapter import DataAdapter
9
+ from evalscope.constants import AnswerKeys
10
+ from evalscope.metrics.metrics import exact_match, weighted_mean
11
+ from evalscope.utils import normalize_score, ResponseParser
12
+ from evalscope.utils.logger import get_logger
13
+ # flake8: noqa
14
+
15
+ logger = get_logger()
16
+
17
+ DATASET_ID = 'modelscope/bbh'
18
+
19
+
20
+ # BBH multiple choice subset list
21
+ MULTIPLE_CHOICE = 'multiple_choice'
22
+ MULTIPLE_CHOICE_LIST = [
23
+ 'temporal_sequences',
24
+ 'disambiguation_qa',
25
+ 'date_understanding',
26
+ 'tracking_shuffled_objects_three_objects',
27
+ 'penguins_in_a_table',
28
+ 'geometric_shapes',
29
+ 'snarks',
30
+ 'ruin_names',
31
+ 'tracking_shuffled_objects_seven_objects',
32
+ 'tracking_shuffled_objects_five_objects',
33
+ 'logical_deduction_three_objects',
34
+ 'hyperbaton',
35
+ 'logical_deduction_five_objects',
36
+ 'logical_deduction_seven_objects',
37
+ 'movie_recommendation',
38
+ 'salient_translation_error_detection',
39
+ 'reasoning_about_colored_objects',
40
+ ]
41
+
42
+ # The free form subset list of BBH dataset
43
+ FREE_FORM = 'free_form'
44
+ FREE_FORM_LIST = [
45
+ 'multistep_arithmetic_two',
46
+ 'navigate',
47
+ 'dyck_languages',
48
+ 'word_sorting',
49
+ 'sports_understanding',
50
+ 'boolean_expressions',
51
+ 'object_counting',
52
+ 'formal_fallacies',
53
+ 'causal_judgement',
54
+ 'web_of_lies',
55
+ ]
56
+
57
+ # BBH sub-task type
58
+ TASK_TYPE = 'task_type'
59
+ SUBSET_LIST = MULTIPLE_CHOICE_LIST + FREE_FORM_LIST
60
+
61
+
62
+ class BBHAdapter(DataAdapter):
63
+ """
64
+ Adapter for BBH free-form and multiple-choices sub-tasks.
65
+ """
66
+
67
+ def __init__(self,
68
+ subset_list: list = None,
69
+ metric_list: list = None,
70
+ few_shot_num: int = None,
71
+ train_split: str = None,
72
+ eval_split: str = 'test',
73
+ **kwargs):
74
+
75
+ if subset_list is None:
76
+ subset_list = SUBSET_LIST
77
+
78
+ if metric_list is None:
79
+ metric_list = [{'name': 'WeightedAverageAccuracy', 'object': weighted_mean}]
80
+
81
+ if few_shot_num is None:
82
+ logger.info(f'Set 3-shot examples by system for BBH.')
83
+ few_shot_num = 3
84
+
85
+ if few_shot_num != 3 and few_shot_num != 0:
86
+ logger.error(f'BBH uses 3-shot examples with CoT or 0-shot by system, but got {few_shot_num}. '
87
+ f'Use 3-shot by default.')
88
+ few_shot_num = 3
89
+
90
+ super().__init__(subset_list=subset_list,
91
+ metric_list=metric_list,
92
+ few_shot_num=few_shot_num,
93
+ train_split=train_split,
94
+ eval_split=eval_split,
95
+ **kwargs)
96
+
97
+ def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
98
+ data_dict = {}
99
+ for subset_name in subset_list:
100
+ for split_name in [self.eval_split]:
101
+ if os.path.exists(dataset_name_or_path):
102
+ file_path = os.path.join(dataset_name_or_path, f'{subset_name}.json')
103
+ else:
104
+ file_path: str = os.path.join(work_dir, dataset_name_or_path, f'{subset_name}.json')
105
+ if os.path.exists(file_path):
106
+ with open(file_path, 'r') as f:
107
+ examples = json.load(f)['examples']
108
+ if subset_name in data_dict:
109
+ data_dict[subset_name].update({split_name: examples})
110
+ else:
111
+ data_dict[subset_name] = {split_name: examples}
112
+
113
+ return data_dict
114
+
115
+ def gen_prompt(self, input_d: dict, few_shot_list: list, **kwargs) -> dict:
116
+ """
117
+ Generate model prompt from raw data, unify the prompt format for bbh(multiple choice) benchmark.
118
+
119
+ Args:
120
+ input_d (dict): The raw input. A single data format of the BBH:
121
+
122
+ {
123
+ 'input': '((-1 + 2 + 9 * 5) - (-2 + -4 + -4 * -7)) =',
124
+ 'target': '24',
125
+ }
126
+
127
+ Returns:
128
+ {'data': ['xxx']}
129
+ """
130
+ # few_shot_list: should be ['xxxx']
131
+ cot_prompts: str = few_shot_list[0] if len(few_shot_list) > 0 else ''
132
+ full_prompt: str = f"Follow the given examples and answer the question.\n{cot_prompts}\n\nQ: {input_d['input']}\nA: Let's think step by step."
133
+
134
+ return {'data': [full_prompt]}
135
+
136
+ def gen_prompts(self, data_dict: dict) -> dict:
137
+ """
138
+ Generate dataset prompts from raw input, unify the prompt format for different datasets.
139
+
140
+ Args:
141
+ data_dict: Refer to the output of load method: evalscope.benchmarks.benchmark.Benchmark.load
142
+
143
+ Returns:
144
+ {'subset_name': [prompt_d_1, prompt_d_2, ...]}
145
+ prompt_d_i (dict): refer to the output of gen_prompt method.
146
+
147
+ e.g. train -- few-shot data, test -- target dataset to evaluate.
148
+ """
149
+ res_dict: dict = {}
150
+
151
+ if self.few_shot_num < 0:
152
+ raise ValueError(f'Invalid shot_num: {self.few_shot_num} for few-shot evaluation.')
153
+
154
+ logger.info(f'\n** Use default settings: \n'
155
+ f'>few_shot_num: {self.few_shot_num}, '
156
+ f'>few_shot_split: {self.train_split}, '
157
+ f'>target_eval_split: {self.eval_split}')
158
+
159
+ for sub_name, sub_data_dict in data_dict.items():
160
+ few_shot_data = []
161
+ if self.few_shot_num > 0:
162
+ with open(os.path.join(os.path.dirname(__file__), 'cot_prompts', f'{sub_name}.txt'), 'r') as f:
163
+ cot_prompt_str = f.read()
164
+ few_shot_data = [cot_prompt_str]
165
+
166
+ res_dict[sub_name] = []
167
+ for sample_d in sub_data_dict[self.eval_split]:
168
+ prompt_d = self.gen_prompt(input_d=sample_d, few_shot_list=few_shot_data)
169
+ sample_d_new = sample_d.copy()
170
+ if sub_name in MULTIPLE_CHOICE_LIST:
171
+ sample_d_new[TASK_TYPE] = MULTIPLE_CHOICE
172
+ elif sub_name in FREE_FORM_LIST:
173
+ sample_d_new[TASK_TYPE] = FREE_FORM
174
+ else:
175
+ raise ValueError(f'Invalid subset name: {sub_name}')
176
+
177
+ prompt_d[AnswerKeys.RAW_INPUT] = sample_d_new
178
+ res_dict[sub_name].append(prompt_d)
179
+
180
+ rnd = random.Random()
181
+ rnd.seed(42)
182
+ for k, v in res_dict.items():
183
+ rnd.shuffle(v)
184
+
185
+ return res_dict
186
+
187
+ def get_gold_answer(self, input_d: dict) -> str:
188
+ # Get the gold choice
189
+ gold = input_d.get('target')
190
+ if gold is None:
191
+ logger.error(f'BBHAdapter: gold is None.')
192
+ return gold
193
+
194
+ def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
195
+ """
196
+ Parse the model output to get the answer. Could be the best choice index.
197
+
198
+ Args:
199
+ result: Predicted answer from the model. Usually a string for chat.
200
+ raw_input_d (dict): The raw input. Depending on the dataset.
201
+ eval_type: 'checkpoint' or 'service' or `custom`, default: 'checkpoint'
202
+
203
+ Returns:
204
+ The parsed answer. Depending on the dataset. Usually a string for chat.
205
+ """
206
+ # Note: to use same extraction method for both of checkpoint/service/custom.
207
+ task_type: str = raw_input_d.get(TASK_TYPE)
208
+
209
+ if task_type == MULTIPLE_CHOICE:
210
+ return self._extract_mc_answer(result)
211
+ elif task_type == FREE_FORM:
212
+ return self._extract_ff_answer(result)
213
+ else:
214
+ raise ValueError(f'Invalid task type: {task_type}')
215
+
216
+ def match(self, gold: str, pred: str) -> float:
217
+ return exact_match(gold=gold, pred=pred)
218
+
219
+ def compute_metric(self, review_res_list: list) -> float:
220
+ """
221
+ Compute evaluation result by specific metric.
222
+
223
+ Args:
224
+ review_res_list: review score list, e.g. [0, 1, 1, 0, ...]
225
+
226
+ Returns:
227
+ The metric score.
228
+ """
229
+ items = [(score, 1.0) for score in review_res_list]
230
+ return weighted_mean(items)
231
+
232
+ def gen_report(self, subset_score_map: dict, report_name: str = None) -> dict:
233
+ """
234
+ Generate the report for the model output.
235
+
236
+ Args:
237
+ subset_score_map: The subset-score mapping. e.g. {subset_name: (score, num), ...}
238
+ report_name: The user-defined report name.
239
+
240
+ Returns: A dict of metric calculation results. The format is like:
241
+ {
242
+ "name":"BBH",
243
+ "metric":"WeightedAverageAccuracy",
244
+ "score":0.3389,
245
+ "category":[
246
+ {
247
+ "name":"DEFAULT",
248
+ "score":0.3389,
249
+ "subset":[
250
+ {
251
+ "name":"BBH",
252
+ "score":0.3389
253
+ },
254
+ ]
255
+ }
256
+ ],
257
+ "total_num":100
258
+ }
259
+ """
260
+ total_num: int = sum([num for _, num in subset_score_map.values()])
261
+ weighted_avg_acc: float = sum([score * num for score, num in subset_score_map.values()]) / total_num
262
+ weighted_avg_acc = normalize_score(score=weighted_avg_acc)
263
+ cate_avg_list = [{'name': subset_name, 'score': normalize_score(score=score)} for subset_name, (score, _) in subset_score_map.items()]
264
+
265
+ category_d = dict(name='DEFAULT',
266
+ score=weighted_avg_acc,
267
+ subset=cate_avg_list)
268
+
269
+ res_map = dict(name=report_name or 'bbh',
270
+ metric=self.metric_list[0]['name'],
271
+ score=weighted_avg_acc,
272
+ category=[category_d],
273
+ total_num=total_num)
274
+
275
+ return res_map
276
+
277
+ @classmethod
278
+ def _extract_mc_answer(cls, ans: str) -> str:
279
+ """
280
+ Extract the answer from the model output for Multiple choice task.
281
+ """
282
+ ans_line = ans.split('answer is ')
283
+ if len(ans_line) != 1:
284
+ ans = ans_line[1].strip()
285
+ match = re.search(r'\(([A-Z])\)*', ans)
286
+ if match:
287
+ return match.group(1)
288
+ match = re.search(r'([A-Z])', ans)
289
+ if match:
290
+ return match.group(1)
291
+ return ans
292
+
293
+ @classmethod
294
+ def _extract_ff_answer(cls, ans: str):
295
+ """
296
+ Extract the answer from the model output for Free-form task.
297
+ """
298
+ res = ResponseParser.parse_first_option(ans)
299
+ if res:
300
+ return res
301
+
302
+ ans_line = ans.split('answer is ')
303
+ if len(ans_line) != 1:
304
+ ans = ans_line[1].strip()
305
+ ans = ans.split('\n')[0]
306
+ if ans.endswith('.'):
307
+ ans = ans[:-1]
308
+ return ans