evalscope 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- evalscope/__init__.py +3 -0
- evalscope/backend/__init__.py +3 -0
- evalscope/backend/base.py +27 -0
- evalscope/backend/opencompass/__init__.py +3 -0
- evalscope/backend/opencompass/api_meta_template.py +64 -0
- evalscope/backend/opencompass/backend_manager.py +247 -0
- evalscope/backend/opencompass/tasks/__init__.py +1 -0
- evalscope/backend/opencompass/tasks/eval_api.py +30 -0
- evalscope/backend/opencompass/tasks/eval_datasets.py +71 -0
- evalscope/backend/vlm_eval_kit/__init__.py +1 -0
- evalscope/backend/vlm_eval_kit/backend_manager.py +153 -0
- evalscope/benchmarks/__init__.py +4 -0
- evalscope/benchmarks/arc/__init__.py +5 -0
- evalscope/benchmarks/arc/ai2_arc.py +148 -0
- evalscope/benchmarks/arc/arc_adapter.py +231 -0
- evalscope/benchmarks/bbh/__init__.py +6 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +308 -0
- evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +23 -0
- evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +25 -0
- evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +33 -0
- evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +37 -0
- evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +72 -0
- evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +44 -0
- evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +78 -0
- evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +28 -0
- evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +37 -0
- evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +37 -0
- evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +37 -0
- evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +42 -0
- evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +25 -0
- evalscope/benchmarks/bbh/cot_prompts/navigate.txt +43 -0
- evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +37 -0
- evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +41 -0
- evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +63 -0
- evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +44 -0
- evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +40 -0
- evalscope/benchmarks/bbh/cot_prompts/snarks.txt +30 -0
- evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +10 -0
- evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +77 -0
- evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +40 -0
- evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +40 -0
- evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +40 -0
- evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +28 -0
- evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +17 -0
- evalscope/benchmarks/benchmark.py +65 -0
- evalscope/benchmarks/ceval/__init__.py +5 -0
- evalscope/benchmarks/ceval/ceval_adapter.py +340 -0
- evalscope/benchmarks/ceval/ceval_exam.py +159 -0
- evalscope/benchmarks/cmmlu/__init__.py +5 -0
- evalscope/benchmarks/cmmlu/cmmlu.py +166 -0
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +369 -0
- evalscope/benchmarks/competition_math/__init__.py +5 -0
- evalscope/benchmarks/competition_math/competition_math.py +88 -0
- evalscope/benchmarks/competition_math/competition_math_adapter.py +470 -0
- evalscope/benchmarks/data_adapter.py +263 -0
- evalscope/benchmarks/general_qa/__init__.py +5 -0
- evalscope/benchmarks/general_qa/general_qa_adapter.py +186 -0
- evalscope/benchmarks/gsm8k/__init__.py +5 -0
- evalscope/benchmarks/gsm8k/gsm8k.py +127 -0
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +236 -0
- evalscope/benchmarks/hellaswag/__init__.py +5 -0
- evalscope/benchmarks/hellaswag/hellaswag.py +116 -0
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +222 -0
- evalscope/benchmarks/humaneval/__init__.py +5 -0
- evalscope/benchmarks/humaneval/humaneval.py +82 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +21 -0
- evalscope/benchmarks/mmlu/__init__.py +5 -0
- evalscope/benchmarks/mmlu/mmlu.py +174 -0
- evalscope/benchmarks/mmlu/mmlu_adapter.py +375 -0
- evalscope/benchmarks/race/__init__.py +5 -0
- evalscope/benchmarks/race/race.py +118 -0
- evalscope/benchmarks/race/race_adapter.py +229 -0
- evalscope/benchmarks/trivia_qa/__init__.py +5 -0
- evalscope/benchmarks/trivia_qa/trivia_qa.py +104 -0
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +207 -0
- evalscope/benchmarks/truthful_qa/__init__.py +5 -0
- evalscope/benchmarks/truthful_qa/truthful_qa.py +167 -0
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +351 -0
- evalscope/cache.py +98 -0
- evalscope/cli/__init__.py +1 -0
- evalscope/cli/base.py +20 -0
- evalscope/cli/cli.py +26 -0
- evalscope/cli/start_perf.py +37 -0
- evalscope/cli/start_server.py +138 -0
- evalscope/config.py +165 -0
- evalscope/constants.py +150 -0
- evalscope/evaluator/__init__.py +3 -0
- evalscope/evaluator/evaluator.py +689 -0
- evalscope/evaluator/rating_eval.py +178 -0
- evalscope/evaluator/reviewer/__init__.py +1 -0
- evalscope/evaluator/reviewer/auto_reviewer.py +411 -0
- evalscope/metrics/__init__.py +1 -0
- evalscope/metrics/bundled_rouge_score/__init__.py +14 -0
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +342 -0
- evalscope/metrics/code_metric.py +104 -0
- evalscope/metrics/math_accuracy.py +60 -0
- evalscope/metrics/metrics.py +405 -0
- evalscope/metrics/rouge_metric.py +129 -0
- evalscope/models/__init__.py +4 -0
- evalscope/models/custom/__init__.py +4 -0
- evalscope/models/custom/custom_model.py +53 -0
- evalscope/models/dummy_chat_model.py +50 -0
- evalscope/models/model.py +88 -0
- evalscope/models/model_adapter.py +586 -0
- evalscope/models/openai_model.py +103 -0
- evalscope/models/template.py +1446 -0
- evalscope/perf/__init__.py +0 -0
- evalscope/perf/_logging.py +32 -0
- evalscope/perf/api_plugin_base.py +60 -0
- evalscope/perf/custom_api.py +87 -0
- evalscope/perf/dashscope_api.py +84 -0
- evalscope/perf/dataset_plugin_base.py +64 -0
- evalscope/perf/datasets/__init__.py +0 -0
- evalscope/perf/datasets/line_by_line.py +18 -0
- evalscope/perf/datasets/longalpaca_12k.py +20 -0
- evalscope/perf/datasets/openqa.py +22 -0
- evalscope/perf/how_to_analysis_result.py +24 -0
- evalscope/perf/http_client.py +756 -0
- evalscope/perf/openai_api.py +130 -0
- evalscope/perf/plugin_registry.py +35 -0
- evalscope/perf/query_parameters.py +42 -0
- evalscope/perf/server_sent_event.py +43 -0
- evalscope/preprocess/__init__.py +1 -0
- evalscope/preprocess/tokenizers/__init__.py +0 -0
- evalscope/preprocess/tokenizers/gpt2_tokenizer.py +221 -0
- evalscope/registry/__init__.py +1 -0
- evalscope/registry/tasks/arc.yaml +29 -0
- evalscope/registry/tasks/bbh.yaml +27 -0
- evalscope/registry/tasks/bbh_mini.yaml +27 -0
- evalscope/registry/tasks/ceval.yaml +27 -0
- evalscope/registry/tasks/ceval_mini.yaml +27 -0
- evalscope/registry/tasks/cmmlu.yaml +27 -0
- evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +28 -0
- evalscope/registry/tasks/general_qa.yaml +27 -0
- evalscope/registry/tasks/gsm8k.yaml +29 -0
- evalscope/registry/tasks/mmlu.yaml +29 -0
- evalscope/registry/tasks/mmlu_mini.yaml +27 -0
- evalscope/run.py +404 -0
- evalscope/run_arena.py +204 -0
- evalscope/run_ms.py +140 -0
- evalscope/summarizer.py +144 -0
- evalscope/third_party/__init__.py +1 -0
- evalscope/third_party/toolbench_static/__init__.py +3 -0
- evalscope/third_party/toolbench_static/eval.py +219 -0
- evalscope/third_party/toolbench_static/infer.py +278 -0
- evalscope/third_party/toolbench_static/llm/__init__.py +1 -0
- evalscope/third_party/toolbench_static/llm/swift_infer.py +45 -0
- evalscope/third_party/toolbench_static/toolbench_static.py +50 -0
- evalscope/tools/__init__.py +1 -0
- evalscope/tools/combine_reports.py +140 -0
- evalscope/tools/gen_mmlu_subject_mapping.py +90 -0
- evalscope/tools/rewrite_eval_results.py +95 -0
- evalscope/utils/__init__.py +4 -0
- evalscope/utils/arena_utils.py +247 -0
- evalscope/utils/completion_parsers.py +87 -0
- evalscope/utils/logger.py +64 -0
- evalscope/utils/task_cfg_parser.py +10 -0
- evalscope/utils/task_utils.py +19 -0
- evalscope/utils/utils.py +625 -0
- evalscope/version.py +4 -0
- evalscope-0.5.0.dist-info/METADATA +566 -0
- evalscope-0.5.0.dist-info/RECORD +165 -0
- evalscope-0.5.0.dist-info/WHEEL +5 -0
- evalscope-0.5.0.dist-info/entry_points.txt +3 -0
- evalscope-0.5.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
import csv
|
|
3
|
+
import os
|
|
4
|
+
from evalscope.benchmarks.data_adapter import DataAdapter
|
|
5
|
+
from evalscope.metrics.metrics import exact_match, weighted_mean
|
|
6
|
+
from evalscope.utils import normalize_score, ResponseParser
|
|
7
|
+
from evalscope.utils.logger import get_logger
|
|
8
|
+
|
|
9
|
+
# flake8: noqa
|
|
10
|
+
|
|
11
|
+
logger = get_logger()
|
|
12
|
+
|
|
13
|
+
DATASET_ID = 'modelscope/ceval-exam'
|
|
14
|
+
|
|
15
|
+
SUBSET_LIST = [
|
|
16
|
+
'computer_network',
|
|
17
|
+
'operating_system',
|
|
18
|
+
'computer_architecture',
|
|
19
|
+
'college_programming',
|
|
20
|
+
'college_physics',
|
|
21
|
+
'college_chemistry',
|
|
22
|
+
'advanced_mathematics',
|
|
23
|
+
'probability_and_statistics',
|
|
24
|
+
'discrete_mathematics',
|
|
25
|
+
'electrical_engineer',
|
|
26
|
+
'metrology_engineer',
|
|
27
|
+
'high_school_mathematics',
|
|
28
|
+
'high_school_physics',
|
|
29
|
+
'high_school_chemistry',
|
|
30
|
+
'high_school_biology',
|
|
31
|
+
'middle_school_mathematics',
|
|
32
|
+
'middle_school_biology',
|
|
33
|
+
'middle_school_physics',
|
|
34
|
+
'middle_school_chemistry',
|
|
35
|
+
'veterinary_medicine',
|
|
36
|
+
'college_economics',
|
|
37
|
+
'business_administration',
|
|
38
|
+
'marxism',
|
|
39
|
+
'mao_zedong_thought',
|
|
40
|
+
'education_science',
|
|
41
|
+
'teacher_qualification',
|
|
42
|
+
'high_school_politics',
|
|
43
|
+
'high_school_geography',
|
|
44
|
+
'middle_school_politics',
|
|
45
|
+
'middle_school_geography',
|
|
46
|
+
'modern_chinese_history',
|
|
47
|
+
'ideological_and_moral_cultivation',
|
|
48
|
+
'logic',
|
|
49
|
+
'law',
|
|
50
|
+
'chinese_language_and_literature',
|
|
51
|
+
'art_studies',
|
|
52
|
+
'professional_tour_guide',
|
|
53
|
+
'legal_professional',
|
|
54
|
+
'high_school_chinese',
|
|
55
|
+
'high_school_history',
|
|
56
|
+
'middle_school_history',
|
|
57
|
+
'civil_servant',
|
|
58
|
+
'sports_science',
|
|
59
|
+
'plant_protection',
|
|
60
|
+
'basic_medicine',
|
|
61
|
+
'clinical_medicine',
|
|
62
|
+
'urban_and_rural_planner',
|
|
63
|
+
'accountant',
|
|
64
|
+
'fire_engineer',
|
|
65
|
+
'environmental_impact_assessment_engineer',
|
|
66
|
+
'tax_accountant',
|
|
67
|
+
'physician',
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
SUBJECT_MAPPING = {'computer_network': ['Computer Network', '计算机网络', 'STEM'],
|
|
71
|
+
'operating_system': ['Operating System', '操作系统', 'STEM'],
|
|
72
|
+
'computer_architecture': ['Computer Architecture', '计算机组成', 'STEM'],
|
|
73
|
+
'college_programming': ['College Programming', '大学编程', 'STEM'],
|
|
74
|
+
'college_physics': ['College Physics', '大学物理', 'STEM'],
|
|
75
|
+
'college_chemistry': ['College Chemistry', '大学化学', 'STEM'],
|
|
76
|
+
'advanced_mathematics': ['Advanced Mathematics', '高等数学', 'STEM'],
|
|
77
|
+
'probability_and_statistics': ['Probability and Statistics', '概率统计', 'STEM'],
|
|
78
|
+
'discrete_mathematics': ['Discrete Mathematics', '离散数学', 'STEM'],
|
|
79
|
+
'electrical_engineer': ['Electrical Engineer', '注册电气工程师', 'STEM'],
|
|
80
|
+
'metrology_engineer': ['Metrology Engineer', '注册计量师', 'STEM'],
|
|
81
|
+
'high_school_mathematics': ['High School Mathematics', '高中数学', 'STEM'],
|
|
82
|
+
'high_school_physics': ['High School Physics', '高中物理', 'STEM'],
|
|
83
|
+
'high_school_chemistry': ['High School Chemistry', '高中化学', 'STEM'],
|
|
84
|
+
'high_school_biology': ['High School Biology', '高中生物', 'STEM'],
|
|
85
|
+
'middle_school_mathematics': ['Middle School Mathematics', '初中数学', 'STEM'],
|
|
86
|
+
'middle_school_biology': ['Middle School Biology', '初中生物', 'STEM'],
|
|
87
|
+
'middle_school_physics': ['Middle School Physics', '初中物理', 'STEM'],
|
|
88
|
+
'middle_school_chemistry': ['Middle School Chemistry', '初中化学', 'STEM'],
|
|
89
|
+
'veterinary_medicine': ['Veterinary Medicine', '兽医学', 'STEM'],
|
|
90
|
+
'college_economics': ['College Economics', '大学经济学', 'Social Science'],
|
|
91
|
+
'business_administration': ['Business Administration', '工商管理', 'Social Science'],
|
|
92
|
+
'marxism': ['Marxism', '马克思主义基本原理', 'Social Science'],
|
|
93
|
+
'mao_zedong_thought': ['Mao Zedong Thought', '毛泽东思想和中国特色社会主义理论体系概论', 'Social Science'],
|
|
94
|
+
'education_science': ['Education Science', '教育学', 'Social Science'],
|
|
95
|
+
'teacher_qualification': ['Teacher Qualification', '教师资格', 'Social Science'],
|
|
96
|
+
'high_school_politics': ['High School Politics', '高中政治', 'Social Science'],
|
|
97
|
+
'high_school_geography': ['High School Geography', '高中地理', 'Social Science'],
|
|
98
|
+
'middle_school_politics': ['Middle School Politics', '初中政治', 'Social Science'],
|
|
99
|
+
'middle_school_geography': ['Middle School Geography', '初中地理', 'Social Science'],
|
|
100
|
+
'modern_chinese_history': ['Modern Chinese History', '近代史纲要', 'Humanities'],
|
|
101
|
+
'ideological_and_moral_cultivation': ['Ideological and Moral Cultivation', '思想道德修养与法律基础', 'Humanities'],
|
|
102
|
+
'logic': ['Logic', '逻辑学', 'Humanities'],
|
|
103
|
+
'law': ['Law', '法学', 'Humanities'],
|
|
104
|
+
'chinese_language_and_literature': ['Chinese Language and Literature', '中国语言文学', 'Humanities'],
|
|
105
|
+
'art_studies': ['Art Studies', '艺术学', 'Humanities'],
|
|
106
|
+
'professional_tour_guide': ['Professional Tour Guide', '导游资格', 'Humanities'],
|
|
107
|
+
'legal_professional': ['Legal Professional', '法律职业资格', 'Humanities'],
|
|
108
|
+
'high_school_chinese': ['High School Chinese', '高中语文', 'Humanities'],
|
|
109
|
+
'high_school_history': ['High School History', '高中历史', 'Humanities'],
|
|
110
|
+
'middle_school_history': ['Middle School History', '初中历史', 'Humanities'],
|
|
111
|
+
'civil_servant': ['Civil Servant', '公务员', 'Other'],
|
|
112
|
+
'sports_science': ['Sports Science', '体育学', 'Other'],
|
|
113
|
+
'plant_protection': ['Plant Protection', '植物保护', 'Other'],
|
|
114
|
+
'basic_medicine': ['Basic Medicine', '基础医学', 'Other'],
|
|
115
|
+
'clinical_medicine': ['Clinical Medicine', '临床医学', 'Other'],
|
|
116
|
+
'urban_and_rural_planner': ['Urban and Rural Planner', '注册城乡规划师', 'Other'],
|
|
117
|
+
'accountant': ['Accountant', '注册会计师', 'Other'],
|
|
118
|
+
'fire_engineer': ['Fire Engineer', '注册消防工程师', 'Other'],
|
|
119
|
+
'environmental_impact_assessment_engineer': ['Environmental Impact Assessment Engineer', '环境影响评价工程师', 'Other'],
|
|
120
|
+
'tax_accountant': ['Tax Accountant', '税务师', 'Other'],
|
|
121
|
+
'physician': ['Physician', '医师资格', 'Other']}
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class CEVALAdapter(DataAdapter):
|
|
125
|
+
|
|
126
|
+
choices = ['A', 'B', 'C', 'D']
|
|
127
|
+
|
|
128
|
+
def __init__(self,
|
|
129
|
+
subset_list: list = None,
|
|
130
|
+
metric_list: list = None,
|
|
131
|
+
few_shot_num: int = None,
|
|
132
|
+
train_split: str = 'dev',
|
|
133
|
+
eval_split: str = 'val',
|
|
134
|
+
**kwargs):
|
|
135
|
+
|
|
136
|
+
if subset_list is None:
|
|
137
|
+
subset_list = SUBSET_LIST
|
|
138
|
+
|
|
139
|
+
if metric_list is None:
|
|
140
|
+
metric_list = [{'name': 'WeightedAverageAccuracy', 'object': weighted_mean}]
|
|
141
|
+
|
|
142
|
+
if few_shot_num is None:
|
|
143
|
+
# Use 5-shot by default
|
|
144
|
+
logger.info(f'Set 0-shot examples by default for C-Eval.')
|
|
145
|
+
few_shot_num = 0
|
|
146
|
+
|
|
147
|
+
if few_shot_num > 5:
|
|
148
|
+
logger.warning(f'few_shot_num <= 5 for C-Eval, but got {few_shot_num}. Use 5-shot by default.')
|
|
149
|
+
few_shot_num = 5
|
|
150
|
+
|
|
151
|
+
super().__init__(subset_list=subset_list,
|
|
152
|
+
metric_list=metric_list,
|
|
153
|
+
few_shot_num=few_shot_num,
|
|
154
|
+
train_split=train_split,
|
|
155
|
+
eval_split=eval_split,
|
|
156
|
+
**kwargs)
|
|
157
|
+
|
|
158
|
+
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
159
|
+
data_dict = {}
|
|
160
|
+
for subset_name in subset_list:
|
|
161
|
+
for split_name in [self.train_split, self.eval_split]:
|
|
162
|
+
if os.path.exists(dataset_name_or_path):
|
|
163
|
+
file_path = os.path.join(dataset_name_or_path, f'{subset_name}_{split_name}.csv')
|
|
164
|
+
else:
|
|
165
|
+
file_path = os.path.join(work_dir, dataset_name_or_path, f'{subset_name}_{split_name}.csv')
|
|
166
|
+
if os.path.exists(file_path):
|
|
167
|
+
with open(file_path, encoding='utf-8') as f:
|
|
168
|
+
rows = []
|
|
169
|
+
reader = csv.reader(f)
|
|
170
|
+
header = next(reader)
|
|
171
|
+
for row in reader:
|
|
172
|
+
item = dict(zip(header, row))
|
|
173
|
+
item.setdefault('explanation', '')
|
|
174
|
+
item.setdefault('answer', '')
|
|
175
|
+
rows.append(item)
|
|
176
|
+
|
|
177
|
+
if subset_name in data_dict:
|
|
178
|
+
data_dict[subset_name].update({split_name: rows})
|
|
179
|
+
else:
|
|
180
|
+
data_dict[subset_name] = {split_name: rows}
|
|
181
|
+
|
|
182
|
+
return data_dict
|
|
183
|
+
|
|
184
|
+
def gen_prompt(self, input_d: dict, subset_name: str, few_shot_list: list, **kwargs) -> dict:
|
|
185
|
+
"""
|
|
186
|
+
Generate model prompt from raw input, unify the prompt format for C-Eval benchmark.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
input_d (dict): The raw input. A single data format of the C-Eval:
|
|
190
|
+
|
|
191
|
+
{'id': 0,
|
|
192
|
+
'question': '下列关于税法基本原则的表述中,不正确的是____。',
|
|
193
|
+
'A': '税收法定原则包括税收要件法定原则和税务合法性原则',
|
|
194
|
+
'B': '税收公平原则源于法律上的平等性原则',
|
|
195
|
+
'C': '税收效率原则包含经济效率和行政效率两个方面',
|
|
196
|
+
'D': '税务机关按法定程序依法征税,可以自由做出减征、停征或免征税款的决定',
|
|
197
|
+
'answer': 'D',
|
|
198
|
+
'explanation': ''}
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
{'data': ['prompt ...']}
|
|
202
|
+
"""
|
|
203
|
+
|
|
204
|
+
few_shot_prompts = [self._format_example(input_d=sample, include_answer=True) for sample in few_shot_list]
|
|
205
|
+
|
|
206
|
+
if len(few_shot_prompts) > 0:
|
|
207
|
+
context: str = '\n'.join(few_shot_prompts) + '\n'
|
|
208
|
+
else:
|
|
209
|
+
context = ''
|
|
210
|
+
|
|
211
|
+
full_prompt: str = context.strip() + self._format_example(input_d=input_d, include_answer=False)
|
|
212
|
+
|
|
213
|
+
subject_name: str = SUBJECT_MAPPING.get(subset_name)[1] if SUBJECT_MAPPING.get(subset_name) else subset_name
|
|
214
|
+
full_prompt = f"以下是中国关于{subject_name}考试的单项选择题,请选出其中的正确答案。\n" + full_prompt
|
|
215
|
+
|
|
216
|
+
return {'data': [full_prompt], 'multi_choices': self.choices}
|
|
217
|
+
|
|
218
|
+
def get_gold_answer(self, input_d: dict) -> str:
|
|
219
|
+
# Get the gold choice
|
|
220
|
+
return input_d.get('answer', '')
|
|
221
|
+
|
|
222
|
+
def parse_pred_result(self, result: str, raw_input_d: dict = None, eval_type: str = 'checkpoint') -> str:
|
|
223
|
+
"""
|
|
224
|
+
Parse the model output to get the answer. Could be the best choice index.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
result: Predicted answer from the model. Usually a string for chat.
|
|
228
|
+
raw_input_d (dict): The raw input. Depending on the dataset.
|
|
229
|
+
eval_type: `checkpoint` or `service` or `custom`. Default is `checkpoint`.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
The parsed answer. Depending on the dataset. Usually a string for chat.
|
|
233
|
+
"""
|
|
234
|
+
if eval_type == 'checkpoint':
|
|
235
|
+
return result
|
|
236
|
+
elif eval_type == 'service':
|
|
237
|
+
return ResponseParser.parse_first_option_with_choices(result, self.choices) # TODO: to be checked !
|
|
238
|
+
elif eval_type == 'custom':
|
|
239
|
+
return ResponseParser.parse_first_option_with_choices(result, self.choices) # TODO: to be checked !
|
|
240
|
+
else:
|
|
241
|
+
raise ValueError(f'Invalid eval_type: {eval_type}')
|
|
242
|
+
|
|
243
|
+
def match(self, gold: str, pred: str) -> float:
|
|
244
|
+
return exact_match(gold=gold, pred=pred)
|
|
245
|
+
|
|
246
|
+
def compute_metric(self, review_res_list: list) -> float:
|
|
247
|
+
"""
|
|
248
|
+
Compute evaluation result by specific metric.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
review_res_list: review score list, e.g. [0, 1, 1, 0, ...]
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
The metric score.
|
|
255
|
+
"""
|
|
256
|
+
items = [(score, 1.0) for score in review_res_list]
|
|
257
|
+
return weighted_mean(items)
|
|
258
|
+
|
|
259
|
+
def gen_report(self, subset_score_map: dict, report_name: str = None) -> dict:
|
|
260
|
+
"""
|
|
261
|
+
Generate report for the evaluation.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
subset_score_map: The subset-score mapping. e.g. {subset_name: (score, num), ...}
|
|
265
|
+
report_name: The user-defined report name.
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
{
|
|
269
|
+
"name":"C-Eval",
|
|
270
|
+
"metric":"WeightedAverageAccuracy",
|
|
271
|
+
"score":0.3389,
|
|
272
|
+
"category":[
|
|
273
|
+
{
|
|
274
|
+
"name":"STEM",
|
|
275
|
+
"score":0.2528,
|
|
276
|
+
"subset":[
|
|
277
|
+
{
|
|
278
|
+
"name":"computer_network",
|
|
279
|
+
"score":0.2632
|
|
280
|
+
},
|
|
281
|
+
{
|
|
282
|
+
"name":"operating_system",
|
|
283
|
+
"score":0.3157
|
|
284
|
+
},
|
|
285
|
+
{
|
|
286
|
+
"name":"computer_architecture",
|
|
287
|
+
"score":0.4285
|
|
288
|
+
}
|
|
289
|
+
]
|
|
290
|
+
}
|
|
291
|
+
],
|
|
292
|
+
"total_num":59
|
|
293
|
+
}
|
|
294
|
+
"""
|
|
295
|
+
total_num: int = sum([num for _, num in subset_score_map.values()])
|
|
296
|
+
weighted_avg_acc: float = sum([score * num for score, num in subset_score_map.values()]) / total_num
|
|
297
|
+
weighted_avg_acc = normalize_score(score=weighted_avg_acc)
|
|
298
|
+
|
|
299
|
+
# Get domain-subject mapping
|
|
300
|
+
subject_review_map = {}
|
|
301
|
+
for subset_name, (subset_score, num) in subset_score_map.items():
|
|
302
|
+
domain_name: str = SUBJECT_MAPPING.get(subset_name)[2] if SUBJECT_MAPPING.get(subset_name) else 'DEFAULT'
|
|
303
|
+
if domain_name in subject_review_map:
|
|
304
|
+
subject_review_map[domain_name].append((subset_name, subset_score, num))
|
|
305
|
+
else:
|
|
306
|
+
subject_review_map[domain_name] = [(subset_name, subset_score, num)]
|
|
307
|
+
|
|
308
|
+
# Get domain score
|
|
309
|
+
category_list = []
|
|
310
|
+
for domain_name, domain_res_list in subject_review_map.items():
|
|
311
|
+
domain_weighted_avg_acc = sum([score * num for _, score, num in domain_res_list]) / \
|
|
312
|
+
sum([num for _, _, num in domain_res_list])
|
|
313
|
+
domain_weighted_avg_acc = normalize_score(score=domain_weighted_avg_acc)
|
|
314
|
+
category_list.append({'name': domain_name,
|
|
315
|
+
'score': domain_weighted_avg_acc,
|
|
316
|
+
'subset': [{'name': subset_name, 'score': normalize_score(score=subset_score)}
|
|
317
|
+
for subset_name, subset_score, _ in domain_res_list]})
|
|
318
|
+
|
|
319
|
+
category_list = sorted(category_list, key=lambda x: x['name'])
|
|
320
|
+
|
|
321
|
+
# Get final dict of report
|
|
322
|
+
res_map = dict(name=report_name or 'ceval',
|
|
323
|
+
metric=self.metric_list[0]['name'],
|
|
324
|
+
score=weighted_avg_acc,
|
|
325
|
+
category=category_list,
|
|
326
|
+
total_num=total_num)
|
|
327
|
+
|
|
328
|
+
return res_map
|
|
329
|
+
|
|
330
|
+
@classmethod
|
|
331
|
+
def _format_example(cls, input_d: dict, include_answer=True):
|
|
332
|
+
example = '问题:' + input_d['question']
|
|
333
|
+
for choice in cls.choices:
|
|
334
|
+
example += f'\n{choice}. {input_d[f"{choice}"]}'
|
|
335
|
+
|
|
336
|
+
if include_answer:
|
|
337
|
+
example += '\n答案: ' + input_d['answer'] + '\n\n'
|
|
338
|
+
else:
|
|
339
|
+
example += '\n答案: '
|
|
340
|
+
return example
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
import datasets
|
|
8
|
+
import pandas as pd
|
|
9
|
+
# flake8: noqa
|
|
10
|
+
|
|
11
|
+
"""DO NOT EDIT unless you are contributing a new dataset."""
|
|
12
|
+
|
|
13
|
+
_CITATION = """\
|
|
14
|
+
@article{huang2023ceval,
|
|
15
|
+
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
|
16
|
+
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
|
|
17
|
+
journal={arXiv preprint arXiv:2305.08322},
|
|
18
|
+
year={2023}
|
|
19
|
+
}
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
_DESCRIPTION = """\
|
|
23
|
+
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
_HOMEPAGE = 'https://cevalbenchmark.com'
|
|
27
|
+
|
|
28
|
+
_LICENSE = 'Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License'
|
|
29
|
+
|
|
30
|
+
_URL = r'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/c-eval/ceval-exam.zip'
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
task_list = [
|
|
34
|
+
'computer_network',
|
|
35
|
+
'operating_system',
|
|
36
|
+
'computer_architecture',
|
|
37
|
+
'college_programming',
|
|
38
|
+
'college_physics',
|
|
39
|
+
'college_chemistry',
|
|
40
|
+
'advanced_mathematics',
|
|
41
|
+
'probability_and_statistics',
|
|
42
|
+
'discrete_mathematics',
|
|
43
|
+
'electrical_engineer',
|
|
44
|
+
'metrology_engineer',
|
|
45
|
+
'high_school_mathematics',
|
|
46
|
+
'high_school_physics',
|
|
47
|
+
'high_school_chemistry',
|
|
48
|
+
'high_school_biology',
|
|
49
|
+
'middle_school_mathematics',
|
|
50
|
+
'middle_school_biology',
|
|
51
|
+
'middle_school_physics',
|
|
52
|
+
'middle_school_chemistry',
|
|
53
|
+
'veterinary_medicine',
|
|
54
|
+
'college_economics',
|
|
55
|
+
'business_administration',
|
|
56
|
+
'marxism',
|
|
57
|
+
'mao_zedong_thought',
|
|
58
|
+
'education_science',
|
|
59
|
+
'teacher_qualification',
|
|
60
|
+
'high_school_politics',
|
|
61
|
+
'high_school_geography',
|
|
62
|
+
'middle_school_politics',
|
|
63
|
+
'middle_school_geography',
|
|
64
|
+
'modern_chinese_history',
|
|
65
|
+
'ideological_and_moral_cultivation',
|
|
66
|
+
'logic',
|
|
67
|
+
'law',
|
|
68
|
+
'chinese_language_and_literature',
|
|
69
|
+
'art_studies',
|
|
70
|
+
'professional_tour_guide',
|
|
71
|
+
'legal_professional',
|
|
72
|
+
'high_school_chinese',
|
|
73
|
+
'high_school_history',
|
|
74
|
+
'middle_school_history',
|
|
75
|
+
'civil_servant',
|
|
76
|
+
'sports_science',
|
|
77
|
+
'plant_protection',
|
|
78
|
+
'basic_medicine',
|
|
79
|
+
'clinical_medicine',
|
|
80
|
+
'urban_and_rural_planner',
|
|
81
|
+
'accountant',
|
|
82
|
+
'fire_engineer',
|
|
83
|
+
'environmental_impact_assessment_engineer',
|
|
84
|
+
'tax_accountant',
|
|
85
|
+
'physician',
|
|
86
|
+
]
|
|
87
|
+
|
|
88
|
+
class CevalExamConfig(datasets.BuilderConfig):
|
|
89
|
+
def __init__(self, **kwargs):
|
|
90
|
+
super().__init__(version=datasets.Version('1.0.0'), **kwargs)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class CevalExam(datasets.GeneratorBasedBuilder):
|
|
94
|
+
BUILDER_CONFIGS = [
|
|
95
|
+
CevalExamConfig(
|
|
96
|
+
name=task_name,
|
|
97
|
+
)
|
|
98
|
+
for task_name in task_list
|
|
99
|
+
]
|
|
100
|
+
|
|
101
|
+
def _info(self):
|
|
102
|
+
features = datasets.Features(
|
|
103
|
+
{
|
|
104
|
+
'id': datasets.Value('int32'),
|
|
105
|
+
'question': datasets.Value('string'),
|
|
106
|
+
'A': datasets.Value('string'),
|
|
107
|
+
'B': datasets.Value('string'),
|
|
108
|
+
'C': datasets.Value('string'),
|
|
109
|
+
'D': datasets.Value('string'),
|
|
110
|
+
'answer': datasets.Value('string'),
|
|
111
|
+
'explanation':datasets.Value('string'),
|
|
112
|
+
}
|
|
113
|
+
)
|
|
114
|
+
return datasets.DatasetInfo(
|
|
115
|
+
description=_DESCRIPTION,
|
|
116
|
+
features=features,
|
|
117
|
+
homepage=_HOMEPAGE,
|
|
118
|
+
license=_LICENSE,
|
|
119
|
+
citation=_CITATION,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def _split_generators(self, dl_manager):
|
|
123
|
+
data_dir = dl_manager.download_and_extract(_URL)
|
|
124
|
+
task_name = self.config.name
|
|
125
|
+
return [
|
|
126
|
+
datasets.SplitGenerator(
|
|
127
|
+
name=datasets.Split.TEST,
|
|
128
|
+
gen_kwargs={
|
|
129
|
+
'filepath': os.path.join(
|
|
130
|
+
data_dir, 'test', f'{task_name}_test.csv'
|
|
131
|
+
),
|
|
132
|
+
},
|
|
133
|
+
),
|
|
134
|
+
datasets.SplitGenerator(
|
|
135
|
+
name=datasets.Split('val'),
|
|
136
|
+
gen_kwargs={
|
|
137
|
+
'filepath': os.path.join(
|
|
138
|
+
data_dir, 'val', f'{task_name}_val.csv'
|
|
139
|
+
),
|
|
140
|
+
},
|
|
141
|
+
),
|
|
142
|
+
datasets.SplitGenerator(
|
|
143
|
+
name=datasets.Split('dev'),
|
|
144
|
+
gen_kwargs={
|
|
145
|
+
'filepath': os.path.join(
|
|
146
|
+
data_dir, 'dev', f'{task_name}_dev.csv'
|
|
147
|
+
),
|
|
148
|
+
},
|
|
149
|
+
),
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
def _generate_examples(self, filepath):
|
|
153
|
+
df = pd.read_csv(filepath, encoding='utf-8')
|
|
154
|
+
for i, instance in enumerate(df.to_dict(orient='records')):
|
|
155
|
+
if 'answer' not in instance.keys():
|
|
156
|
+
instance['answer'] = ''
|
|
157
|
+
if 'explanation' not in instance.keys():
|
|
158
|
+
instance['explanation'] = ''
|
|
159
|
+
yield i, instance
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
|
|
3
|
+
from evalscope.benchmarks.cmmlu.cmmlu_adapter import DATASET_ID, SUBJECT_MAPPING, SUBSET_LIST, CMMLUAdapter
|
|
4
|
+
from evalscope.benchmarks.cmmlu.cmmlu_adapter import CMMLUAdapter as DataAdapterClass
|
|
5
|
+
from evalscope.models.model_adapter import MultiChoiceModelAdapter as ModelAdapterClass # noqa
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
|
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
# flake8: noqa
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import datasets
|
|
19
|
+
import pandas as pd
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
_CITATION = """\
|
|
23
|
+
@misc{li2023cmmlu,
|
|
24
|
+
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
|
25
|
+
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
|
26
|
+
year={2023},
|
|
27
|
+
eprint={2306.09212},
|
|
28
|
+
archivePrefix={arXiv},
|
|
29
|
+
primaryClass={cs.CL}
|
|
30
|
+
}
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
_DESCRIPTION = """\
|
|
34
|
+
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
_HOMEPAGE = "https://modelscope.cn/datasets/modelscope/cmmlu/summary"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# _URL = r"https://huggingface.co/datasets/haonan-li/cmmlu/resolve/main/cmmlu_v1_0_1.zip"
|
|
41
|
+
_URL = r"https://modelscope.cn/api/v1/datasets/modelscope/cmmlu/repo?Revision=master&FilePath=cmmlu_v1_0_1.zip"
|
|
42
|
+
|
|
43
|
+
# contains 67 sub-tasks
|
|
44
|
+
task_list = [
|
|
45
|
+
'agronomy',
|
|
46
|
+
'anatomy',
|
|
47
|
+
'ancient_chinese',
|
|
48
|
+
'arts',
|
|
49
|
+
'astronomy',
|
|
50
|
+
'business_ethics',
|
|
51
|
+
'chinese_civil_service_exam',
|
|
52
|
+
'chinese_driving_rule',
|
|
53
|
+
'chinese_food_culture',
|
|
54
|
+
'chinese_foreign_policy',
|
|
55
|
+
'chinese_history',
|
|
56
|
+
'chinese_literature',
|
|
57
|
+
'chinese_teacher_qualification',
|
|
58
|
+
'clinical_knowledge',
|
|
59
|
+
'college_actuarial_science',
|
|
60
|
+
'college_education',
|
|
61
|
+
'college_engineering_hydrology',
|
|
62
|
+
'college_law',
|
|
63
|
+
'college_mathematics',
|
|
64
|
+
'college_medical_statistics',
|
|
65
|
+
'college_medicine',
|
|
66
|
+
'computer_science',
|
|
67
|
+
'computer_security',
|
|
68
|
+
'conceptual_physics',
|
|
69
|
+
'construction_project_management',
|
|
70
|
+
'economics',
|
|
71
|
+
'education',
|
|
72
|
+
'electrical_engineering',
|
|
73
|
+
'elementary_chinese',
|
|
74
|
+
'elementary_commonsense',
|
|
75
|
+
'elementary_information_and_technology',
|
|
76
|
+
'elementary_mathematics',
|
|
77
|
+
'ethnology',
|
|
78
|
+
'food_science',
|
|
79
|
+
'genetics',
|
|
80
|
+
'global_facts',
|
|
81
|
+
'high_school_biology',
|
|
82
|
+
'high_school_chemistry',
|
|
83
|
+
'high_school_geography',
|
|
84
|
+
'high_school_mathematics',
|
|
85
|
+
'high_school_physics',
|
|
86
|
+
'high_school_politics',
|
|
87
|
+
'human_sexuality',
|
|
88
|
+
'international_law',
|
|
89
|
+
'journalism',
|
|
90
|
+
'jurisprudence',
|
|
91
|
+
'legal_and_moral_basis',
|
|
92
|
+
'logical',
|
|
93
|
+
'machine_learning',
|
|
94
|
+
'management',
|
|
95
|
+
'marketing',
|
|
96
|
+
'marxist_theory',
|
|
97
|
+
'modern_chinese',
|
|
98
|
+
'nutrition',
|
|
99
|
+
'philosophy',
|
|
100
|
+
'professional_accounting',
|
|
101
|
+
'professional_law',
|
|
102
|
+
'professional_medicine',
|
|
103
|
+
'professional_psychology',
|
|
104
|
+
'public_relations',
|
|
105
|
+
'security_study',
|
|
106
|
+
'sociology',
|
|
107
|
+
'sports_science',
|
|
108
|
+
'traditional_chinese_medicine',
|
|
109
|
+
'virology',
|
|
110
|
+
'world_history',
|
|
111
|
+
'world_religions',
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class CMMLUConfig(datasets.BuilderConfig):
|
|
116
|
+
def __init__(self, **kwargs):
|
|
117
|
+
super().__init__(version=datasets.Version("1.0.1"), **kwargs)
|
|
118
|
+
# V1.0.1 Fix: One comma missing in word_religions.csv
|
|
119
|
+
# V1.0.0 Init version
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class CMMLU(datasets.GeneratorBasedBuilder):
|
|
123
|
+
BUILDER_CONFIGS = [
|
|
124
|
+
CMMLUConfig(name=task_name) for task_name in task_list
|
|
125
|
+
]
|
|
126
|
+
|
|
127
|
+
def _info(self):
|
|
128
|
+
features = datasets.Features(
|
|
129
|
+
{
|
|
130
|
+
"Question": datasets.Value("string"),
|
|
131
|
+
"A": datasets.Value("string"),
|
|
132
|
+
"B": datasets.Value("string"),
|
|
133
|
+
"C": datasets.Value("string"),
|
|
134
|
+
"D": datasets.Value("string"),
|
|
135
|
+
"Answer": datasets.Value("string"),
|
|
136
|
+
}
|
|
137
|
+
)
|
|
138
|
+
return datasets.DatasetInfo(
|
|
139
|
+
description=_DESCRIPTION,
|
|
140
|
+
features=features,
|
|
141
|
+
homepage=_HOMEPAGE,
|
|
142
|
+
citation=_CITATION,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
def _split_generators(self, dl_manager):
|
|
146
|
+
data_dir = dl_manager.download_and_extract(_URL)
|
|
147
|
+
task_name = self.config.name
|
|
148
|
+
return [
|
|
149
|
+
datasets.SplitGenerator(
|
|
150
|
+
name=datasets.Split.TEST,
|
|
151
|
+
gen_kwargs={
|
|
152
|
+
"filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
|
|
153
|
+
},
|
|
154
|
+
),
|
|
155
|
+
datasets.SplitGenerator(
|
|
156
|
+
name=datasets.Split("dev"),
|
|
157
|
+
gen_kwargs={
|
|
158
|
+
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
|
|
159
|
+
},
|
|
160
|
+
),
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
def _generate_examples(self, filepath):
|
|
164
|
+
df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
|
|
165
|
+
for i, instance in enumerate(df.to_dict(orient="records")):
|
|
166
|
+
yield i, instance
|