evalscope 0.7.2__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/__init__.py +1 -1
- evalscope/arguments.py +73 -0
- evalscope/backend/base.py +5 -1
- evalscope/backend/opencompass/api_meta_template.py +8 -14
- evalscope/backend/opencompass/backend_manager.py +24 -15
- evalscope/backend/opencompass/tasks/eval_api.py +1 -6
- evalscope/backend/opencompass/tasks/eval_datasets.py +26 -28
- evalscope/backend/rag_eval/__init__.py +3 -3
- evalscope/backend/rag_eval/backend_manager.py +21 -25
- evalscope/backend/rag_eval/clip_benchmark/__init__.py +1 -1
- evalscope/backend/rag_eval/clip_benchmark/arguments.py +6 -6
- evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +62 -79
- evalscope/backend/rag_eval/clip_benchmark/task_template.py +29 -43
- evalscope/backend/rag_eval/clip_benchmark/tasks/image_caption.py +20 -22
- evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_classification.py +16 -23
- evalscope/backend/rag_eval/clip_benchmark/tasks/zeroshot_retrieval.py +14 -35
- evalscope/backend/rag_eval/clip_benchmark/utils/webdataset_convert.py +69 -90
- evalscope/backend/rag_eval/cmteb/__init__.py +3 -3
- evalscope/backend/rag_eval/cmteb/arguments.py +25 -27
- evalscope/backend/rag_eval/cmteb/base.py +22 -23
- evalscope/backend/rag_eval/cmteb/task_template.py +15 -17
- evalscope/backend/rag_eval/cmteb/tasks/Classification.py +98 -79
- evalscope/backend/rag_eval/cmteb/tasks/Clustering.py +17 -22
- evalscope/backend/rag_eval/cmteb/tasks/CustomTask.py +17 -19
- evalscope/backend/rag_eval/cmteb/tasks/PairClassification.py +35 -29
- evalscope/backend/rag_eval/cmteb/tasks/Reranking.py +18 -5
- evalscope/backend/rag_eval/cmteb/tasks/Retrieval.py +163 -163
- evalscope/backend/rag_eval/cmteb/tasks/STS.py +126 -104
- evalscope/backend/rag_eval/cmteb/tasks/__init__.py +33 -34
- evalscope/backend/rag_eval/ragas/__init__.py +2 -2
- evalscope/backend/rag_eval/ragas/arguments.py +3 -8
- evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/correctness_prompt_chinese.json +9 -9
- evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerCorrectness/long_form_answer_prompt_chinese.json +2 -2
- evalscope/backend/rag_eval/ragas/prompts/chinese/AnswerRelevancy/question_generation_chinese.json +3 -3
- evalscope/backend/rag_eval/ragas/prompts/chinese/ContextPrecision/context_precision_prompt_chinese.json +5 -5
- evalscope/backend/rag_eval/ragas/prompts/chinese/CustomNodeFilter/scoring_prompt_chinese.json +7 -0
- evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/nli_statements_message_chinese.json +8 -8
- evalscope/backend/rag_eval/ragas/prompts/chinese/Faithfulness/statement_prompt_chinese.json +5 -5
- evalscope/backend/rag_eval/ragas/prompts/chinese/HeadlinesExtractor/prompt_chinese.json +7 -5
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/concept_combination_prompt_chinese.json +2 -2
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/generate_query_reference_prompt_chinese.json +27 -4
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopAbstractQuerySynthesizer/theme_persona_matching_prompt_chinese.json +2 -2
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json +27 -4
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json +2 -2
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalFaithfulness/faithfulness_prompt_chinese.json +2 -2
- evalscope/backend/rag_eval/ragas/prompts/chinese/MultiModalRelevance/relevance_prompt_chinese.json +5 -5
- evalscope/backend/rag_eval/ragas/prompts/chinese/NERExtractor/prompt_chinese.json +3 -3
- evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/generate_query_reference_prompt_chinese.json +21 -4
- evalscope/backend/rag_eval/ragas/prompts/chinese/SingleHopSpecificQuerySynthesizer/theme_persona_matching_prompt_chinese.json +3 -3
- evalscope/backend/rag_eval/ragas/prompts/chinese/SummaryExtractor/prompt_chinese.json +4 -4
- evalscope/backend/rag_eval/ragas/prompts/chinese/ThemesExtractor/prompt_chinese.json +2 -2
- evalscope/backend/rag_eval/ragas/prompts/persona_prompt.py +0 -1
- evalscope/backend/rag_eval/ragas/task_template.py +10 -15
- evalscope/backend/rag_eval/ragas/tasks/__init__.py +1 -1
- evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +45 -0
- evalscope/backend/rag_eval/ragas/tasks/build_transform.py +135 -0
- evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +17 -133
- evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +8 -18
- evalscope/backend/rag_eval/utils/clip.py +46 -50
- evalscope/backend/rag_eval/utils/embedding.py +12 -11
- evalscope/backend/rag_eval/utils/llm.py +8 -6
- evalscope/backend/rag_eval/utils/tools.py +12 -11
- evalscope/backend/vlm_eval_kit/__init__.py +1 -1
- evalscope/backend/vlm_eval_kit/custom_dataset.py +7 -8
- evalscope/benchmarks/arc/__init__.py +3 -2
- evalscope/benchmarks/arc/ai2_arc.py +19 -16
- evalscope/benchmarks/arc/arc_adapter.py +32 -24
- evalscope/benchmarks/bbh/__init__.py +1 -2
- evalscope/benchmarks/bbh/bbh_adapter.py +28 -25
- evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/navigate.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/snarks.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +1 -1
- evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +1 -1
- evalscope/benchmarks/benchmark.py +16 -16
- evalscope/benchmarks/ceval/__init__.py +3 -2
- evalscope/benchmarks/ceval/ceval_adapter.py +80 -69
- evalscope/benchmarks/ceval/ceval_exam.py +18 -31
- evalscope/benchmarks/cmmlu/__init__.py +3 -2
- evalscope/benchmarks/cmmlu/cmmlu.py +87 -92
- evalscope/benchmarks/cmmlu/cmmlu_adapter.py +109 -155
- evalscope/benchmarks/cmmlu/samples.jsonl +1 -1
- evalscope/benchmarks/competition_math/__init__.py +3 -2
- evalscope/benchmarks/competition_math/competition_math.py +7 -16
- evalscope/benchmarks/competition_math/competition_math_adapter.py +32 -34
- evalscope/benchmarks/data_adapter.py +24 -24
- evalscope/benchmarks/general_qa/__init__.py +3 -2
- evalscope/benchmarks/general_qa/general_qa_adapter.py +34 -38
- evalscope/benchmarks/gsm8k/__init__.py +1 -1
- evalscope/benchmarks/gsm8k/gsm8k.py +6 -12
- evalscope/benchmarks/gsm8k/gsm8k_adapter.py +26 -24
- evalscope/benchmarks/hellaswag/__init__.py +3 -2
- evalscope/benchmarks/hellaswag/hellaswag.py +15 -19
- evalscope/benchmarks/hellaswag/hellaswag_adapter.py +27 -23
- evalscope/benchmarks/humaneval/__init__.py +1 -1
- evalscope/benchmarks/humaneval/humaneval.py +15 -18
- evalscope/benchmarks/humaneval/humaneval_adapter.py +0 -1
- evalscope/benchmarks/mmlu/__init__.py +3 -2
- evalscope/benchmarks/mmlu/mmlu.py +15 -29
- evalscope/benchmarks/mmlu/mmlu_adapter.py +85 -77
- evalscope/benchmarks/race/__init__.py +3 -2
- evalscope/benchmarks/race/race.py +21 -35
- evalscope/benchmarks/race/race_adapter.py +32 -29
- evalscope/benchmarks/race/samples.jsonl +1 -1
- evalscope/benchmarks/trivia_qa/__init__.py +3 -2
- evalscope/benchmarks/trivia_qa/samples.jsonl +1 -1
- evalscope/benchmarks/trivia_qa/trivia_qa.py +19 -34
- evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +27 -22
- evalscope/benchmarks/truthful_qa/__init__.py +3 -2
- evalscope/benchmarks/truthful_qa/truthful_qa.py +25 -29
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +36 -37
- evalscope/cli/cli.py +6 -5
- evalscope/cli/start_eval.py +31 -0
- evalscope/cli/start_perf.py +0 -3
- evalscope/cli/start_server.py +27 -41
- evalscope/config.py +119 -95
- evalscope/constants.py +61 -29
- evalscope/evaluator/__init__.py +1 -0
- evalscope/evaluator/evaluator.py +96 -377
- evalscope/evaluator/humaneval_evaluator.py +158 -0
- evalscope/evaluator/rating_eval.py +12 -33
- evalscope/evaluator/reviewer/auto_reviewer.py +47 -76
- evalscope/metrics/bundled_rouge_score/rouge_scorer.py +10 -20
- evalscope/metrics/code_metric.py +3 -9
- evalscope/metrics/math_accuracy.py +3 -6
- evalscope/metrics/metrics.py +21 -21
- evalscope/metrics/rouge_metric.py +11 -25
- evalscope/models/__init__.py +1 -2
- evalscope/models/api/openai_api.py +40 -29
- evalscope/models/custom/__init__.py +0 -1
- evalscope/models/custom/custom_model.py +3 -3
- evalscope/models/dummy_chat_model.py +7 -8
- evalscope/models/model_adapter.py +89 -156
- evalscope/models/openai_model.py +20 -20
- evalscope/perf/arguments.py +15 -3
- evalscope/perf/benchmark.py +7 -9
- evalscope/perf/http_client.py +3 -8
- evalscope/perf/main.py +10 -0
- evalscope/perf/plugin/api/custom_api.py +1 -2
- evalscope/perf/plugin/api/dashscope_api.py +1 -2
- evalscope/perf/plugin/api/openai_api.py +2 -3
- evalscope/perf/plugin/datasets/base.py +1 -2
- evalscope/perf/plugin/datasets/flickr8k.py +1 -2
- evalscope/perf/plugin/datasets/longalpaca.py +1 -2
- evalscope/perf/plugin/datasets/openqa.py +1 -2
- evalscope/perf/utils/analysis_result.py +1 -2
- evalscope/perf/utils/benchmark_util.py +1 -2
- evalscope/perf/utils/db_util.py +11 -8
- evalscope/perf/utils/local_server.py +19 -13
- evalscope/registry/config/cfg_arena_zhihu.yaml +1 -1
- evalscope/registry/tasks/arc.yaml +2 -3
- evalscope/registry/tasks/bbh.yaml +3 -4
- evalscope/registry/tasks/bbh_mini.yaml +3 -4
- evalscope/registry/tasks/ceval.yaml +3 -3
- evalscope/registry/tasks/ceval_mini.yaml +3 -4
- evalscope/registry/tasks/cmmlu.yaml +3 -3
- evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +1 -1
- evalscope/registry/tasks/general_qa.yaml +1 -1
- evalscope/registry/tasks/gsm8k.yaml +2 -2
- evalscope/registry/tasks/mmlu.yaml +3 -3
- evalscope/registry/tasks/mmlu_mini.yaml +3 -3
- evalscope/run.py +184 -375
- evalscope/run_arena.py +20 -25
- evalscope/summarizer.py +16 -17
- evalscope/third_party/longbench_write/README.md +99 -42
- evalscope/third_party/longbench_write/default_task.json +1 -1
- evalscope/third_party/longbench_write/default_task.yaml +8 -7
- evalscope/third_party/longbench_write/eval.py +29 -28
- evalscope/third_party/longbench_write/infer.py +16 -104
- evalscope/third_party/longbench_write/longbench_write.py +5 -5
- evalscope/third_party/longbench_write/resources/judge.txt +1 -1
- evalscope/third_party/longbench_write/tools/data_etl.py +4 -5
- evalscope/third_party/longbench_write/utils.py +0 -1
- evalscope/third_party/toolbench_static/eval.py +14 -15
- evalscope/third_party/toolbench_static/infer.py +48 -69
- evalscope/third_party/toolbench_static/llm/swift_infer.py +4 -12
- evalscope/third_party/toolbench_static/requirements.txt +1 -1
- evalscope/third_party/toolbench_static/toolbench_static.py +3 -3
- evalscope/tools/combine_reports.py +25 -30
- evalscope/tools/rewrite_eval_results.py +14 -46
- evalscope/utils/__init__.py +0 -1
- evalscope/utils/arena_utils.py +18 -48
- evalscope/{perf/utils → utils}/chat_service.py +3 -4
- evalscope/utils/completion_parsers.py +3 -8
- evalscope/utils/logger.py +9 -7
- evalscope/utils/model_utils.py +11 -0
- evalscope/utils/utils.py +12 -138
- evalscope/version.py +2 -2
- {evalscope-0.7.2.dist-info → evalscope-0.8.0.dist-info}/METADATA +123 -118
- evalscope-0.8.0.dist-info/RECORD +285 -0
- tests/cli/test_run.py +54 -15
- tests/perf/test_perf.py +4 -0
- tests/rag/test_clip_benchmark.py +38 -38
- tests/rag/test_mteb.py +3 -2
- tests/rag/test_ragas.py +5 -5
- tests/swift/test_run_swift_eval.py +2 -3
- tests/swift/test_run_swift_vlm_eval.py +2 -3
- tests/swift/test_run_swift_vlm_jugde_eval.py +2 -3
- evalscope/backend/rag_eval/ragas/metrics/__init__.py +0 -2
- evalscope/backend/rag_eval/ragas/metrics/multi_modal_faithfulness.py +0 -91
- evalscope/backend/rag_eval/ragas/metrics/multi_modal_relevance.py +0 -99
- evalscope/cache.py +0 -98
- evalscope/models/template.py +0 -1446
- evalscope/run_ms.py +0 -140
- evalscope/utils/task_cfg_parser.py +0 -10
- evalscope/utils/task_utils.py +0 -22
- evalscope-0.7.2.dist-info/RECORD +0 -286
- {evalscope-0.7.2.dist-info → evalscope-0.8.0.dist-info}/LICENSE +0 -0
- {evalscope-0.7.2.dist-info → evalscope-0.8.0.dist-info}/WHEEL +0 -0
- {evalscope-0.7.2.dist-info → evalscope-0.8.0.dist-info}/entry_points.txt +0 -0
- {evalscope-0.7.2.dist-info → evalscope-0.8.0.dist-info}/top_level.txt +0 -0
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
2
|
import csv
|
|
3
3
|
import os
|
|
4
|
+
|
|
4
5
|
from evalscope.benchmarks.data_adapter import DataAdapter
|
|
5
6
|
from evalscope.metrics.metrics import exact_match, weighted_mean
|
|
6
|
-
from evalscope.utils import
|
|
7
|
+
from evalscope.utils import ResponseParser, normalize_score
|
|
7
8
|
from evalscope.utils.logger import get_logger
|
|
8
9
|
|
|
9
10
|
# flake8: noqa
|
|
@@ -67,58 +68,60 @@ SUBSET_LIST = [
|
|
|
67
68
|
'physician',
|
|
68
69
|
]
|
|
69
70
|
|
|
70
|
-
SUBJECT_MAPPING = {
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
71
|
+
SUBJECT_MAPPING = {
|
|
72
|
+
'computer_network': ['Computer Network', '计算机网络', 'STEM'],
|
|
73
|
+
'operating_system': ['Operating System', '操作系统', 'STEM'],
|
|
74
|
+
'computer_architecture': ['Computer Architecture', '计算机组成', 'STEM'],
|
|
75
|
+
'college_programming': ['College Programming', '大学编程', 'STEM'],
|
|
76
|
+
'college_physics': ['College Physics', '大学物理', 'STEM'],
|
|
77
|
+
'college_chemistry': ['College Chemistry', '大学化学', 'STEM'],
|
|
78
|
+
'advanced_mathematics': ['Advanced Mathematics', '高等数学', 'STEM'],
|
|
79
|
+
'probability_and_statistics': ['Probability and Statistics', '概率统计', 'STEM'],
|
|
80
|
+
'discrete_mathematics': ['Discrete Mathematics', '离散数学', 'STEM'],
|
|
81
|
+
'electrical_engineer': ['Electrical Engineer', '注册电气工程师', 'STEM'],
|
|
82
|
+
'metrology_engineer': ['Metrology Engineer', '注册计量师', 'STEM'],
|
|
83
|
+
'high_school_mathematics': ['High School Mathematics', '高中数学', 'STEM'],
|
|
84
|
+
'high_school_physics': ['High School Physics', '高中物理', 'STEM'],
|
|
85
|
+
'high_school_chemistry': ['High School Chemistry', '高中化学', 'STEM'],
|
|
86
|
+
'high_school_biology': ['High School Biology', '高中生物', 'STEM'],
|
|
87
|
+
'middle_school_mathematics': ['Middle School Mathematics', '初中数学', 'STEM'],
|
|
88
|
+
'middle_school_biology': ['Middle School Biology', '初中生物', 'STEM'],
|
|
89
|
+
'middle_school_physics': ['Middle School Physics', '初中物理', 'STEM'],
|
|
90
|
+
'middle_school_chemistry': ['Middle School Chemistry', '初中化学', 'STEM'],
|
|
91
|
+
'veterinary_medicine': ['Veterinary Medicine', '兽医学', 'STEM'],
|
|
92
|
+
'college_economics': ['College Economics', '大学经济学', 'Social Science'],
|
|
93
|
+
'business_administration': ['Business Administration', '工商管理', 'Social Science'],
|
|
94
|
+
'marxism': ['Marxism', '马克思主义基本原理', 'Social Science'],
|
|
95
|
+
'mao_zedong_thought': ['Mao Zedong Thought', '毛泽东思想和中国特色社会主义理论体系概论', 'Social Science'],
|
|
96
|
+
'education_science': ['Education Science', '教育学', 'Social Science'],
|
|
97
|
+
'teacher_qualification': ['Teacher Qualification', '教师资格', 'Social Science'],
|
|
98
|
+
'high_school_politics': ['High School Politics', '高中政治', 'Social Science'],
|
|
99
|
+
'high_school_geography': ['High School Geography', '高中地理', 'Social Science'],
|
|
100
|
+
'middle_school_politics': ['Middle School Politics', '初中政治', 'Social Science'],
|
|
101
|
+
'middle_school_geography': ['Middle School Geography', '初中地理', 'Social Science'],
|
|
102
|
+
'modern_chinese_history': ['Modern Chinese History', '近代史纲要', 'Humanities'],
|
|
103
|
+
'ideological_and_moral_cultivation': ['Ideological and Moral Cultivation', '思想道德修养与法律基础', 'Humanities'],
|
|
104
|
+
'logic': ['Logic', '逻辑学', 'Humanities'],
|
|
105
|
+
'law': ['Law', '法学', 'Humanities'],
|
|
106
|
+
'chinese_language_and_literature': ['Chinese Language and Literature', '中国语言文学', 'Humanities'],
|
|
107
|
+
'art_studies': ['Art Studies', '艺术学', 'Humanities'],
|
|
108
|
+
'professional_tour_guide': ['Professional Tour Guide', '导游资格', 'Humanities'],
|
|
109
|
+
'legal_professional': ['Legal Professional', '法律职业资格', 'Humanities'],
|
|
110
|
+
'high_school_chinese': ['High School Chinese', '高中语文', 'Humanities'],
|
|
111
|
+
'high_school_history': ['High School History', '高中历史', 'Humanities'],
|
|
112
|
+
'middle_school_history': ['Middle School History', '初中历史', 'Humanities'],
|
|
113
|
+
'civil_servant': ['Civil Servant', '公务员', 'Other'],
|
|
114
|
+
'sports_science': ['Sports Science', '体育学', 'Other'],
|
|
115
|
+
'plant_protection': ['Plant Protection', '植物保护', 'Other'],
|
|
116
|
+
'basic_medicine': ['Basic Medicine', '基础医学', 'Other'],
|
|
117
|
+
'clinical_medicine': ['Clinical Medicine', '临床医学', 'Other'],
|
|
118
|
+
'urban_and_rural_planner': ['Urban and Rural Planner', '注册城乡规划师', 'Other'],
|
|
119
|
+
'accountant': ['Accountant', '注册会计师', 'Other'],
|
|
120
|
+
'fire_engineer': ['Fire Engineer', '注册消防工程师', 'Other'],
|
|
121
|
+
'environmental_impact_assessment_engineer': ['Environmental Impact Assessment Engineer', '环境影响评价工程师', 'Other'],
|
|
122
|
+
'tax_accountant': ['Tax Accountant', '税务师', 'Other'],
|
|
123
|
+
'physician': ['Physician', '医师资格', 'Other']
|
|
124
|
+
}
|
|
122
125
|
|
|
123
126
|
|
|
124
127
|
class CEVALAdapter(DataAdapter):
|
|
@@ -148,12 +151,13 @@ class CEVALAdapter(DataAdapter):
|
|
|
148
151
|
logger.warning(f'few_shot_num <= 5 for C-Eval, but got {few_shot_num}. Use 5-shot by default.')
|
|
149
152
|
few_shot_num = 5
|
|
150
153
|
|
|
151
|
-
super().__init__(
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
154
|
+
super().__init__(
|
|
155
|
+
subset_list=subset_list,
|
|
156
|
+
metric_list=metric_list,
|
|
157
|
+
few_shot_num=few_shot_num,
|
|
158
|
+
train_split=train_split,
|
|
159
|
+
eval_split=eval_split,
|
|
160
|
+
**kwargs)
|
|
157
161
|
|
|
158
162
|
def load_from_disk(self, dataset_name_or_path, subset_list, work_dir, **kwargs) -> dict:
|
|
159
163
|
data_dict = {}
|
|
@@ -211,7 +215,7 @@ class CEVALAdapter(DataAdapter):
|
|
|
211
215
|
full_prompt: str = context.strip() + self._format_example(input_d=input_d, include_answer=False)
|
|
212
216
|
|
|
213
217
|
subject_name: str = SUBJECT_MAPPING.get(subset_name)[1] if SUBJECT_MAPPING.get(subset_name) else subset_name
|
|
214
|
-
full_prompt = f
|
|
218
|
+
full_prompt = f'以下是中国关于{subject_name}考试的单项选择题,请选出其中的正确答案。\n' + full_prompt
|
|
215
219
|
|
|
216
220
|
return {'data': [full_prompt], 'multi_choices': self.choices}
|
|
217
221
|
|
|
@@ -311,19 +315,26 @@ class CEVALAdapter(DataAdapter):
|
|
|
311
315
|
domain_weighted_avg_acc = sum([score * num for _, score, num in domain_res_list]) / \
|
|
312
316
|
sum([num for _, _, num in domain_res_list])
|
|
313
317
|
domain_weighted_avg_acc = normalize_score(score=domain_weighted_avg_acc)
|
|
314
|
-
category_list.append({
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
+
category_list.append({
|
|
319
|
+
'name':
|
|
320
|
+
domain_name,
|
|
321
|
+
'score':
|
|
322
|
+
domain_weighted_avg_acc,
|
|
323
|
+
'subset': [{
|
|
324
|
+
'name': subset_name,
|
|
325
|
+
'score': normalize_score(score=subset_score)
|
|
326
|
+
} for subset_name, subset_score, _ in domain_res_list]
|
|
327
|
+
})
|
|
318
328
|
|
|
319
329
|
category_list = sorted(category_list, key=lambda x: x['name'])
|
|
320
330
|
|
|
321
331
|
# Get final dict of report
|
|
322
|
-
res_map = dict(
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
332
|
+
res_map = dict(
|
|
333
|
+
name=report_name or 'ceval',
|
|
334
|
+
metric=self.metric_list[0]['name'],
|
|
335
|
+
score=weighted_avg_acc,
|
|
336
|
+
category=category_list,
|
|
337
|
+
total_num=total_num)
|
|
327
338
|
|
|
328
339
|
return res_map
|
|
329
340
|
|
|
@@ -2,12 +2,11 @@
|
|
|
2
2
|
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
|
3
3
|
#
|
|
4
4
|
# Licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License
|
|
5
|
-
import os
|
|
6
|
-
|
|
7
5
|
import datasets
|
|
6
|
+
import os
|
|
8
7
|
import pandas as pd
|
|
9
|
-
# flake8: noqa
|
|
10
8
|
|
|
9
|
+
# flake8: noqa
|
|
11
10
|
"""DO NOT EDIT unless you are contributing a new dataset."""
|
|
12
11
|
|
|
13
12
|
_CITATION = """\
|
|
@@ -29,7 +28,6 @@ _LICENSE = 'Creative Commons Attribution-NonCommercial-ShareAlike 4.0 Internatio
|
|
|
29
28
|
|
|
30
29
|
_URL = r'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/c-eval/ceval-exam.zip'
|
|
31
30
|
|
|
32
|
-
|
|
33
31
|
task_list = [
|
|
34
32
|
'computer_network',
|
|
35
33
|
'operating_system',
|
|
@@ -85,32 +83,27 @@ task_list = [
|
|
|
85
83
|
'physician',
|
|
86
84
|
]
|
|
87
85
|
|
|
86
|
+
|
|
88
87
|
class CevalExamConfig(datasets.BuilderConfig):
|
|
88
|
+
|
|
89
89
|
def __init__(self, **kwargs):
|
|
90
90
|
super().__init__(version=datasets.Version('1.0.0'), **kwargs)
|
|
91
91
|
|
|
92
92
|
|
|
93
93
|
class CevalExam(datasets.GeneratorBasedBuilder):
|
|
94
|
-
BUILDER_CONFIGS = [
|
|
95
|
-
CevalExamConfig(
|
|
96
|
-
name=task_name,
|
|
97
|
-
)
|
|
98
|
-
for task_name in task_list
|
|
99
|
-
]
|
|
94
|
+
BUILDER_CONFIGS = [CevalExamConfig(name=task_name, ) for task_name in task_list]
|
|
100
95
|
|
|
101
96
|
def _info(self):
|
|
102
|
-
features = datasets.Features(
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
}
|
|
113
|
-
)
|
|
97
|
+
features = datasets.Features({
|
|
98
|
+
'id': datasets.Value('int32'),
|
|
99
|
+
'question': datasets.Value('string'),
|
|
100
|
+
'A': datasets.Value('string'),
|
|
101
|
+
'B': datasets.Value('string'),
|
|
102
|
+
'C': datasets.Value('string'),
|
|
103
|
+
'D': datasets.Value('string'),
|
|
104
|
+
'answer': datasets.Value('string'),
|
|
105
|
+
'explanation': datasets.Value('string'),
|
|
106
|
+
})
|
|
114
107
|
return datasets.DatasetInfo(
|
|
115
108
|
description=_DESCRIPTION,
|
|
116
109
|
features=features,
|
|
@@ -126,25 +119,19 @@ class CevalExam(datasets.GeneratorBasedBuilder):
|
|
|
126
119
|
datasets.SplitGenerator(
|
|
127
120
|
name=datasets.Split.TEST,
|
|
128
121
|
gen_kwargs={
|
|
129
|
-
'filepath': os.path.join(
|
|
130
|
-
data_dir, 'test', f'{task_name}_test.csv'
|
|
131
|
-
),
|
|
122
|
+
'filepath': os.path.join(data_dir, 'test', f'{task_name}_test.csv'),
|
|
132
123
|
},
|
|
133
124
|
),
|
|
134
125
|
datasets.SplitGenerator(
|
|
135
126
|
name=datasets.Split('val'),
|
|
136
127
|
gen_kwargs={
|
|
137
|
-
'filepath': os.path.join(
|
|
138
|
-
data_dir, 'val', f'{task_name}_val.csv'
|
|
139
|
-
),
|
|
128
|
+
'filepath': os.path.join(data_dir, 'val', f'{task_name}_val.csv'),
|
|
140
129
|
},
|
|
141
130
|
),
|
|
142
131
|
datasets.SplitGenerator(
|
|
143
132
|
name=datasets.Split('dev'),
|
|
144
133
|
gen_kwargs={
|
|
145
|
-
'filepath': os.path.join(
|
|
146
|
-
data_dir, 'dev', f'{task_name}_dev.csv'
|
|
147
|
-
),
|
|
134
|
+
'filepath': os.path.join(data_dir, 'dev', f'{task_name}_dev.csv'),
|
|
148
135
|
},
|
|
149
136
|
),
|
|
150
137
|
]
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
2
2
|
|
|
3
|
-
from evalscope.benchmarks.cmmlu.cmmlu_adapter import DATASET_ID, SUBJECT_MAPPING, SUBSET_LIST
|
|
3
|
+
from evalscope.benchmarks.cmmlu.cmmlu_adapter import DATASET_ID, SUBJECT_MAPPING, SUBSET_LIST
|
|
4
|
+
from evalscope.benchmarks.cmmlu.cmmlu_adapter import CMMLUAdapter
|
|
4
5
|
from evalscope.benchmarks.cmmlu.cmmlu_adapter import CMMLUAdapter as DataAdapterClass
|
|
5
|
-
from evalscope.models.model_adapter import MultiChoiceModelAdapter as ModelAdapterClass
|
|
6
|
+
from evalscope.models.model_adapter import MultiChoiceModelAdapter as ModelAdapterClass # noqa
|
|
@@ -14,11 +14,10 @@
|
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
# flake8: noqa
|
|
16
16
|
|
|
17
|
-
import os
|
|
18
17
|
import datasets
|
|
18
|
+
import os
|
|
19
19
|
import pandas as pd
|
|
20
20
|
|
|
21
|
-
|
|
22
21
|
_CITATION = """\
|
|
23
22
|
@misc{li2023cmmlu,
|
|
24
23
|
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
|
@@ -34,107 +33,103 @@ _DESCRIPTION = """\
|
|
|
34
33
|
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
|
|
35
34
|
"""
|
|
36
35
|
|
|
37
|
-
_HOMEPAGE =
|
|
38
|
-
|
|
36
|
+
_HOMEPAGE = 'https://modelscope.cn/datasets/modelscope/cmmlu/summary'
|
|
39
37
|
|
|
40
38
|
# _URL = r"https://huggingface.co/datasets/haonan-li/cmmlu/resolve/main/cmmlu_v1_0_1.zip"
|
|
41
|
-
_URL = r
|
|
39
|
+
_URL = r'https://modelscope.cn/api/v1/datasets/modelscope/cmmlu/repo?Revision=master&FilePath=cmmlu_v1_0_1.zip'
|
|
42
40
|
|
|
43
41
|
# contains 67 sub-tasks
|
|
44
42
|
task_list = [
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
43
|
+
'agronomy',
|
|
44
|
+
'anatomy',
|
|
45
|
+
'ancient_chinese',
|
|
46
|
+
'arts',
|
|
47
|
+
'astronomy',
|
|
48
|
+
'business_ethics',
|
|
49
|
+
'chinese_civil_service_exam',
|
|
50
|
+
'chinese_driving_rule',
|
|
51
|
+
'chinese_food_culture',
|
|
52
|
+
'chinese_foreign_policy',
|
|
53
|
+
'chinese_history',
|
|
54
|
+
'chinese_literature',
|
|
55
|
+
'chinese_teacher_qualification',
|
|
56
|
+
'clinical_knowledge',
|
|
57
|
+
'college_actuarial_science',
|
|
58
|
+
'college_education',
|
|
59
|
+
'college_engineering_hydrology',
|
|
60
|
+
'college_law',
|
|
61
|
+
'college_mathematics',
|
|
62
|
+
'college_medical_statistics',
|
|
63
|
+
'college_medicine',
|
|
64
|
+
'computer_science',
|
|
65
|
+
'computer_security',
|
|
66
|
+
'conceptual_physics',
|
|
67
|
+
'construction_project_management',
|
|
68
|
+
'economics',
|
|
69
|
+
'education',
|
|
70
|
+
'electrical_engineering',
|
|
71
|
+
'elementary_chinese',
|
|
72
|
+
'elementary_commonsense',
|
|
73
|
+
'elementary_information_and_technology',
|
|
74
|
+
'elementary_mathematics',
|
|
75
|
+
'ethnology',
|
|
76
|
+
'food_science',
|
|
77
|
+
'genetics',
|
|
78
|
+
'global_facts',
|
|
79
|
+
'high_school_biology',
|
|
80
|
+
'high_school_chemistry',
|
|
81
|
+
'high_school_geography',
|
|
82
|
+
'high_school_mathematics',
|
|
83
|
+
'high_school_physics',
|
|
84
|
+
'high_school_politics',
|
|
85
|
+
'human_sexuality',
|
|
86
|
+
'international_law',
|
|
87
|
+
'journalism',
|
|
88
|
+
'jurisprudence',
|
|
89
|
+
'legal_and_moral_basis',
|
|
90
|
+
'logical',
|
|
91
|
+
'machine_learning',
|
|
92
|
+
'management',
|
|
93
|
+
'marketing',
|
|
94
|
+
'marxist_theory',
|
|
95
|
+
'modern_chinese',
|
|
96
|
+
'nutrition',
|
|
97
|
+
'philosophy',
|
|
98
|
+
'professional_accounting',
|
|
99
|
+
'professional_law',
|
|
100
|
+
'professional_medicine',
|
|
101
|
+
'professional_psychology',
|
|
102
|
+
'public_relations',
|
|
103
|
+
'security_study',
|
|
104
|
+
'sociology',
|
|
105
|
+
'sports_science',
|
|
106
|
+
'traditional_chinese_medicine',
|
|
107
|
+
'virology',
|
|
108
|
+
'world_history',
|
|
109
|
+
'world_religions',
|
|
112
110
|
]
|
|
113
111
|
|
|
114
112
|
|
|
115
113
|
class CMMLUConfig(datasets.BuilderConfig):
|
|
114
|
+
|
|
116
115
|
def __init__(self, **kwargs):
|
|
117
|
-
super().__init__(version=datasets.Version(
|
|
116
|
+
super().__init__(version=datasets.Version('1.0.1'), **kwargs)
|
|
118
117
|
# V1.0.1 Fix: One comma missing in word_religions.csv
|
|
119
118
|
# V1.0.0 Init version
|
|
120
119
|
|
|
121
120
|
|
|
122
121
|
class CMMLU(datasets.GeneratorBasedBuilder):
|
|
123
|
-
BUILDER_CONFIGS = [
|
|
124
|
-
CMMLUConfig(name=task_name) for task_name in task_list
|
|
125
|
-
]
|
|
122
|
+
BUILDER_CONFIGS = [CMMLUConfig(name=task_name) for task_name in task_list]
|
|
126
123
|
|
|
127
124
|
def _info(self):
|
|
128
|
-
features = datasets.Features(
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
}
|
|
137
|
-
)
|
|
125
|
+
features = datasets.Features({
|
|
126
|
+
'Question': datasets.Value('string'),
|
|
127
|
+
'A': datasets.Value('string'),
|
|
128
|
+
'B': datasets.Value('string'),
|
|
129
|
+
'C': datasets.Value('string'),
|
|
130
|
+
'D': datasets.Value('string'),
|
|
131
|
+
'Answer': datasets.Value('string'),
|
|
132
|
+
})
|
|
138
133
|
return datasets.DatasetInfo(
|
|
139
134
|
description=_DESCRIPTION,
|
|
140
135
|
features=features,
|
|
@@ -149,18 +144,18 @@ class CMMLU(datasets.GeneratorBasedBuilder):
|
|
|
149
144
|
datasets.SplitGenerator(
|
|
150
145
|
name=datasets.Split.TEST,
|
|
151
146
|
gen_kwargs={
|
|
152
|
-
|
|
147
|
+
'filepath': os.path.join(data_dir, f'test/{task_name}.csv'),
|
|
153
148
|
},
|
|
154
149
|
),
|
|
155
150
|
datasets.SplitGenerator(
|
|
156
|
-
name=datasets.Split(
|
|
151
|
+
name=datasets.Split('dev'),
|
|
157
152
|
gen_kwargs={
|
|
158
|
-
|
|
153
|
+
'filepath': os.path.join(data_dir, f'dev/{task_name}.csv'),
|
|
159
154
|
},
|
|
160
155
|
),
|
|
161
156
|
]
|
|
162
157
|
|
|
163
158
|
def _generate_examples(self, filepath):
|
|
164
|
-
df = pd.read_csv(filepath, header=0, index_col=0, encoding=
|
|
165
|
-
for i, instance in enumerate(df.to_dict(orient=
|
|
166
|
-
yield i, instance
|
|
159
|
+
df = pd.read_csv(filepath, header=0, index_col=0, encoding='utf-8')
|
|
160
|
+
for i, instance in enumerate(df.to_dict(orient='records')):
|
|
161
|
+
yield i, instance
|