evalscope 0.17.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (273) hide show
  1. evalscope/__init__.py +4 -1
  2. evalscope/api/__init__.py +0 -0
  3. evalscope/api/benchmark/__init__.py +3 -0
  4. evalscope/api/benchmark/adapters/__init__.py +3 -0
  5. evalscope/api/benchmark/adapters/default_data_adapter.py +683 -0
  6. evalscope/api/benchmark/adapters/multi_choice_adapter.py +83 -0
  7. evalscope/api/benchmark/adapters/text2image_adapter.py +155 -0
  8. evalscope/api/benchmark/benchmark.py +321 -0
  9. evalscope/api/benchmark/meta.py +115 -0
  10. evalscope/api/dataset/__init__.py +2 -0
  11. evalscope/api/dataset/dataset.py +349 -0
  12. evalscope/api/dataset/loader.py +261 -0
  13. evalscope/api/dataset/utils.py +143 -0
  14. evalscope/api/evaluator/__init__.py +3 -0
  15. evalscope/api/evaluator/cache.py +355 -0
  16. evalscope/api/evaluator/evaluator.py +56 -0
  17. evalscope/api/evaluator/state.py +264 -0
  18. evalscope/api/filter/__init__.py +1 -0
  19. evalscope/api/filter/filter.py +72 -0
  20. evalscope/api/messages/__init__.py +11 -0
  21. evalscope/api/messages/chat_message.py +198 -0
  22. evalscope/api/messages/content.py +102 -0
  23. evalscope/api/messages/utils.py +35 -0
  24. evalscope/api/metric/__init__.py +2 -0
  25. evalscope/api/metric/metric.py +55 -0
  26. evalscope/api/metric/scorer.py +105 -0
  27. evalscope/api/mixin/__init__.py +2 -0
  28. evalscope/api/mixin/dataset_mixin.py +105 -0
  29. evalscope/api/mixin/llm_judge_mixin.py +168 -0
  30. evalscope/api/model/__init__.py +12 -0
  31. evalscope/api/model/generate_config.py +157 -0
  32. evalscope/api/model/model.py +383 -0
  33. evalscope/api/model/model_output.py +285 -0
  34. evalscope/api/registry.py +182 -0
  35. evalscope/api/tool/__init__.py +3 -0
  36. evalscope/api/tool/tool_call.py +101 -0
  37. evalscope/api/tool/tool_info.py +173 -0
  38. evalscope/api/tool/utils.py +64 -0
  39. evalscope/app/ui/app_ui.py +2 -1
  40. evalscope/app/ui/multi_model.py +50 -25
  41. evalscope/app/ui/single_model.py +23 -11
  42. evalscope/app/utils/data_utils.py +42 -26
  43. evalscope/app/utils/text_utils.py +0 -2
  44. evalscope/app/utils/visualization.py +9 -4
  45. evalscope/arguments.py +6 -7
  46. evalscope/backend/opencompass/api_meta_template.py +2 -1
  47. evalscope/backend/opencompass/backend_manager.py +6 -3
  48. evalscope/backend/rag_eval/clip_benchmark/dataset_builder.py +10 -10
  49. evalscope/backend/rag_eval/clip_benchmark/task_template.py +8 -4
  50. evalscope/backend/rag_eval/ragas/task_template.py +2 -1
  51. evalscope/backend/rag_eval/ragas/tasks/build_distribution.py +2 -1
  52. evalscope/backend/rag_eval/ragas/tasks/build_transform.py +7 -4
  53. evalscope/backend/rag_eval/ragas/tasks/testset_generation.py +2 -1
  54. evalscope/backend/rag_eval/ragas/tasks/translate_prompt.py +2 -1
  55. evalscope/backend/rag_eval/utils/embedding.py +2 -1
  56. evalscope/backend/rag_eval/utils/llm.py +13 -12
  57. evalscope/benchmarks/__init__.py +0 -2
  58. evalscope/benchmarks/aigc/i2i/__init__.py +0 -0
  59. evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +44 -0
  60. evalscope/benchmarks/aigc/t2i/evalmuse_adapter.py +53 -55
  61. evalscope/benchmarks/aigc/t2i/genai_bench_adapter.py +41 -46
  62. evalscope/benchmarks/aigc/t2i/general_t2i_adapter.py +29 -45
  63. evalscope/benchmarks/aigc/t2i/hpdv2_adapter.py +34 -44
  64. evalscope/benchmarks/aigc/t2i/tifa_adapter.py +16 -27
  65. evalscope/benchmarks/aime/aime24_adapter.py +38 -40
  66. evalscope/benchmarks/aime/aime25_adapter.py +34 -40
  67. evalscope/benchmarks/alpaca_eval/alpaca_eval_adapter.py +86 -60
  68. evalscope/benchmarks/arc/arc_adapter.py +34 -147
  69. evalscope/benchmarks/arena_hard/arena_hard_adapter.py +96 -70
  70. evalscope/benchmarks/arena_hard/utils.py +37 -1
  71. evalscope/benchmarks/bbh/bbh_adapter.py +72 -144
  72. evalscope/benchmarks/bfcl/bfcl_adapter.py +181 -160
  73. evalscope/benchmarks/bfcl/generation.py +222 -0
  74. evalscope/benchmarks/ceval/ceval_adapter.py +94 -162
  75. evalscope/benchmarks/chinese_simple_qa/csimple_qa_adapter.py +85 -82
  76. evalscope/benchmarks/cmmlu/cmmlu_adapter.py +34 -125
  77. evalscope/benchmarks/competition_math/competition_math_adapter.py +56 -108
  78. evalscope/benchmarks/data_collection/data_collection_adapter.py +183 -45
  79. evalscope/benchmarks/docmath/docmath_adapter.py +109 -51
  80. evalscope/benchmarks/docmath/utils.py +4 -5
  81. evalscope/benchmarks/drop/drop_adapter.py +88 -40
  82. evalscope/benchmarks/frames/frames_adapter.py +135 -52
  83. evalscope/benchmarks/general_arena/general_arena_adapter.py +136 -98
  84. evalscope/benchmarks/general_arena/utils.py +23 -27
  85. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +40 -101
  86. evalscope/benchmarks/general_qa/general_qa_adapter.py +73 -134
  87. evalscope/benchmarks/gpqa/gpqa_adapter.py +61 -100
  88. evalscope/benchmarks/gpqa/{chain_of_thought.txt → prompt.py} +12 -5
  89. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +62 -142
  90. evalscope/benchmarks/hellaswag/hellaswag_adapter.py +35 -124
  91. evalscope/benchmarks/hle/hle_adapter.py +127 -93
  92. evalscope/benchmarks/humaneval/humaneval_adapter.py +86 -55
  93. evalscope/benchmarks/ifeval/ifeval_adapter.py +69 -40
  94. evalscope/benchmarks/ifeval/instructions.py +109 -64
  95. evalscope/benchmarks/ifeval/instructions_registry.py +1 -1
  96. evalscope/benchmarks/ifeval/utils.py +6 -7
  97. evalscope/benchmarks/iquiz/iquiz_adapter.py +30 -65
  98. evalscope/benchmarks/live_code_bench/evaluate_utils.py +2 -2
  99. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +121 -71
  100. evalscope/benchmarks/live_code_bench/load_utils.py +13 -21
  101. evalscope/benchmarks/live_code_bench/testing_util.py +6 -2
  102. evalscope/benchmarks/maritime_bench/maritime_bench_adapter.py +49 -75
  103. evalscope/benchmarks/math_500/math_500_adapter.py +41 -48
  104. evalscope/benchmarks/mmlu/mmlu_adapter.py +32 -205
  105. evalscope/benchmarks/mmlu_pro/mmlu_pro_adapter.py +80 -99
  106. evalscope/benchmarks/mmlu_redux/mmlu_redux_adapter.py +64 -110
  107. evalscope/benchmarks/musr/musr_adapter.py +33 -64
  108. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +192 -152
  109. evalscope/benchmarks/process_bench/process_bench_adapter.py +144 -76
  110. evalscope/benchmarks/race/race_adapter.py +33 -119
  111. evalscope/benchmarks/simple_qa/simple_qa_adapter.py +72 -70
  112. evalscope/benchmarks/super_gpqa/{five_shot_prompt.txt → prompt.py} +14 -16
  113. evalscope/benchmarks/super_gpqa/super_gpqa_adapter.py +73 -117
  114. evalscope/benchmarks/super_gpqa/utils.py +2 -1
  115. evalscope/benchmarks/tau_bench/generation.py +147 -0
  116. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +112 -54
  117. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +91 -70
  118. evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +56 -124
  119. evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +70 -265
  120. evalscope/benchmarks/winogrande/winogrande_adapter.py +28 -54
  121. evalscope/cli/cli.py +2 -0
  122. evalscope/cli/start_server.py +6 -3
  123. evalscope/collections/__init__.py +2 -10
  124. evalscope/collections/sampler.py +10 -10
  125. evalscope/collections/schema.py +13 -11
  126. evalscope/config.py +95 -54
  127. evalscope/constants.py +29 -61
  128. evalscope/evaluator/__init__.py +1 -1
  129. evalscope/evaluator/evaluator.py +277 -423
  130. evalscope/filters/__init__.py +2 -0
  131. evalscope/filters/extraction.py +126 -0
  132. evalscope/filters/selection.py +57 -0
  133. evalscope/metrics/__init__.py +13 -13
  134. evalscope/metrics/llm_judge.py +32 -30
  135. evalscope/metrics/math_parser.py +27 -22
  136. evalscope/metrics/metric.py +307 -0
  137. evalscope/metrics/metrics.py +22 -18
  138. evalscope/metrics/t2v_metrics/__init__.py +0 -52
  139. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/clip_model.py +4 -2
  140. evalscope/metrics/t2v_metrics/models/clipscore_models/build_mps_model/cross_modeling.py +9 -13
  141. evalscope/metrics/t2v_metrics/models/clipscore_models/clip_model.py +2 -1
  142. evalscope/metrics/t2v_metrics/models/clipscore_models/hpsv2_model.py +3 -2
  143. evalscope/metrics/t2v_metrics/models/clipscore_models/mps_model.py +2 -1
  144. evalscope/metrics/t2v_metrics/models/clipscore_models/pickscore_model.py +2 -2
  145. evalscope/metrics/t2v_metrics/models/itmscore_models/blip2_itm_model.py +2 -1
  146. evalscope/metrics/t2v_metrics/models/itmscore_models/fga_blip2_model.py +4 -2
  147. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/ImageReward.py +10 -5
  148. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward/blip_pretrain.py +4 -2
  149. evalscope/metrics/t2v_metrics/models/itmscore_models/image_reward_model.py +2 -1
  150. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/language_model/clip_t5.py +15 -9
  151. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5/model/multimodal_encoder/clip_encoder.py +4 -2
  152. evalscope/metrics/t2v_metrics/models/vqascore_models/clip_t5_model.py +15 -10
  153. evalscope/metrics/t2v_metrics/models/vqascore_models/gpt4v_model.py +9 -6
  154. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +2 -2
  155. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +4 -2
  156. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +4 -2
  157. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +3 -9
  158. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +16 -10
  159. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +3 -2
  160. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +4 -2
  161. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/__init__.py +8 -4
  162. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +47 -25
  163. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_qformer.py +12 -7
  164. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5.py +23 -17
  165. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/blip2_t5_instruct.py +33 -23
  166. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/fga_blip2.py +2 -1
  167. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_llama.py +46 -30
  168. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/modeling_t5.py +69 -37
  169. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/__init__.py +7 -5
  170. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip.py +6 -4
  171. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_image_text_matching.py +7 -5
  172. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_nlvr.py +3 -2
  173. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_outputs.py +5 -2
  174. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/blip_vqa.py +17 -13
  175. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +35 -19
  176. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/clip_vit.py +14 -12
  177. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/eva_vit.py +63 -52
  178. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +63 -38
  179. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/vit.py +6 -3
  180. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/__init__.py +6 -2
  181. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/processors/randaugment.py +3 -2
  182. evalscope/metrics/t2v_metrics/models/vqascore_models/mm_utils.py +15 -13
  183. evalscope/metrics/t2v_metrics/models/vqascore_models/vqa_model.py +3 -2
  184. evalscope/models/__init__.py +6 -29
  185. evalscope/models/mockllm.py +65 -0
  186. evalscope/models/model_apis.py +47 -0
  187. evalscope/models/modelscope.py +455 -0
  188. evalscope/models/openai_compatible.py +123 -0
  189. evalscope/models/text2image_model.py +124 -0
  190. evalscope/models/utils/openai.py +698 -0
  191. evalscope/perf/benchmark.py +2 -1
  192. evalscope/perf/http_client.py +4 -2
  193. evalscope/perf/plugin/api/custom_api.py +5 -4
  194. evalscope/perf/plugin/api/openai_api.py +11 -9
  195. evalscope/perf/plugin/datasets/custom.py +2 -1
  196. evalscope/perf/plugin/datasets/flickr8k.py +1 -1
  197. evalscope/perf/plugin/datasets/kontext_bench.py +1 -1
  198. evalscope/perf/plugin/datasets/line_by_line.py +2 -1
  199. evalscope/perf/plugin/datasets/longalpaca.py +2 -1
  200. evalscope/perf/plugin/datasets/openqa.py +4 -2
  201. evalscope/perf/utils/benchmark_util.py +7 -5
  202. evalscope/perf/utils/db_util.py +9 -6
  203. evalscope/perf/utils/local_server.py +8 -3
  204. evalscope/perf/utils/rich_display.py +16 -10
  205. evalscope/report/__init__.py +2 -2
  206. evalscope/report/combinator.py +18 -12
  207. evalscope/report/generator.py +101 -6
  208. evalscope/report/{utils.py → report.py} +8 -6
  209. evalscope/run.py +26 -44
  210. evalscope/summarizer.py +1 -1
  211. evalscope/utils/__init__.py +21 -2
  212. evalscope/utils/chat_service.py +2 -1
  213. evalscope/utils/deprecation_utils.py +12 -1
  214. evalscope/utils/function_utils.py +29 -0
  215. evalscope/utils/io_utils.py +100 -5
  216. evalscope/utils/json_schema.py +208 -0
  217. evalscope/utils/logger.py +51 -12
  218. evalscope/utils/model_utils.py +10 -7
  219. evalscope/utils/multi_choices.py +271 -0
  220. evalscope/utils/url_utils.py +65 -0
  221. evalscope/version.py +2 -2
  222. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/METADATA +98 -49
  223. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/RECORD +234 -216
  224. tests/aigc/test_t2i.py +22 -4
  225. tests/benchmark/__init__.py +1 -0
  226. tests/benchmark/test_eval.py +386 -0
  227. tests/cli/test_all.py +3 -5
  228. tests/cli/test_collection.py +13 -4
  229. tests/cli/test_custom.py +22 -15
  230. tests/rag/test_clip_benchmark.py +1 -0
  231. evalscope/benchmarks/aigc/t2i/base.py +0 -56
  232. evalscope/benchmarks/arc/ai2_arc.py +0 -151
  233. evalscope/benchmarks/benchmark.py +0 -81
  234. evalscope/benchmarks/ceval/ceval_exam.py +0 -146
  235. evalscope/benchmarks/cmmlu/cmmlu.py +0 -161
  236. evalscope/benchmarks/cmmlu/samples.jsonl +0 -5
  237. evalscope/benchmarks/competition_math/competition_math.py +0 -79
  238. evalscope/benchmarks/data_adapter.py +0 -528
  239. evalscope/benchmarks/filters.py +0 -59
  240. evalscope/benchmarks/gsm8k/gsm8k.py +0 -121
  241. evalscope/benchmarks/hellaswag/hellaswag.py +0 -112
  242. evalscope/benchmarks/humaneval/humaneval.py +0 -79
  243. evalscope/benchmarks/mmlu/mmlu.py +0 -160
  244. evalscope/benchmarks/mmlu/samples.jsonl +0 -5
  245. evalscope/benchmarks/process_bench/critique_template.txt +0 -13
  246. evalscope/benchmarks/race/race.py +0 -104
  247. evalscope/benchmarks/race/samples.jsonl +0 -5
  248. evalscope/benchmarks/super_gpqa/zero_shot_prompt.txt +0 -4
  249. evalscope/benchmarks/trivia_qa/trivia_qa.py +0 -89
  250. evalscope/benchmarks/truthful_qa/truthful_qa.py +0 -163
  251. evalscope/benchmarks/utils.py +0 -60
  252. evalscope/collections/evaluator.py +0 -375
  253. evalscope/metrics/completion_parsers.py +0 -227
  254. evalscope/metrics/named_metrics.py +0 -55
  255. evalscope/models/adapters/__init__.py +0 -14
  256. evalscope/models/adapters/base_adapter.py +0 -84
  257. evalscope/models/adapters/bfcl_adapter.py +0 -246
  258. evalscope/models/adapters/chat_adapter.py +0 -207
  259. evalscope/models/adapters/choice_adapter.py +0 -222
  260. evalscope/models/adapters/custom_adapter.py +0 -71
  261. evalscope/models/adapters/server_adapter.py +0 -236
  262. evalscope/models/adapters/t2i_adapter.py +0 -79
  263. evalscope/models/adapters/tau_bench_adapter.py +0 -189
  264. evalscope/models/custom/__init__.py +0 -4
  265. evalscope/models/custom/custom_model.py +0 -50
  266. evalscope/models/custom/dummy_model.py +0 -99
  267. evalscope/models/local_model.py +0 -128
  268. evalscope/models/register.py +0 -41
  269. tests/cli/test_run.py +0 -489
  270. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/LICENSE +0 -0
  271. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/WHEEL +0 -0
  272. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/entry_points.txt +0 -0
  273. {evalscope-0.17.1.dist-info → evalscope-1.0.0.dist-info}/top_level.txt +0 -0
@@ -1,151 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- # Copyright (c) Allen Institute, and its affiliates.
3
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
- """AI2 ARC (Abstraction and Reasoning Corpus) for General Artificial Intelligence Benchmark."""
5
- """AUTO GENERATED, DO NOT EDIT"""
6
-
7
- import datasets
8
- import json
9
- import os
10
-
11
- # flake8: noqa
12
-
13
- _CITATION = """\
14
- @article{allenai:arc,
15
- author = {Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and
16
- Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
17
- title = {Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
18
- journal = {arXiv:1803.05457v1},
19
- year = {2018},
20
- }
21
- """
22
-
23
- _DESCRIPTION = """\
24
- A new dataset of 7,787 genuine grade-school level, multiple-choice science questions, assembled to encourage research in
25
- advanced question-answering. The dataset is partitioned into a Challenge Set and an Easy Set, where the former contains
26
- only questions answered incorrectly by both a retrieval-based algorithm and a word co-occurrence algorithm. We are also
27
- including a corpus of over 14 million science sentences relevant to the task,
28
- and an implementation of three neural baseline models for this dataset. We pose ARC as a challenge to the community.
29
-
30
- ARC-Easy:
31
- train: 2251
32
- test: 2376
33
- validation: 570
34
-
35
- ARC-Challenge:
36
- train: 1119
37
- test: 1172
38
- validation: 299
39
- """
40
-
41
- _URL = 'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/arc/ARC-V1-Feb2018.zip'
42
-
43
- # tasks: ['ARC-Easy', 'ARC-Challenge']
44
-
45
-
46
- class Ai2ArcConfig(datasets.BuilderConfig):
47
- """BuilderConfig for Ai2ARC."""
48
-
49
- def __init__(self, **kwargs):
50
- """BuilderConfig for Ai2Arc.
51
-
52
- Args:
53
- **kwargs: keyword arguments forwarded to super.
54
- """
55
- super(Ai2ArcConfig, self).__init__(version=datasets.Version('1.0.0', ''), **kwargs)
56
-
57
-
58
- class Ai2Arc(datasets.GeneratorBasedBuilder):
59
- """
60
- The AI2 Reasoning Challenge (ARC) dataset.
61
- Subset: ARC-Easy, ARC-Challenge.
62
- """
63
-
64
- VERSION = datasets.Version('1.0.0')
65
- BUILDER_CONFIGS = [
66
- Ai2ArcConfig(
67
- name='ARC-Challenge',
68
- description="""\
69
- Challenge Set of 2590 “hard” questions (those that both a retrieval and a co-occurrence method fail to answer correctly)
70
- """,
71
- ),
72
- Ai2ArcConfig(
73
- name='ARC-Easy',
74
- description="""\
75
- Easy Set of 5197 questions
76
- """,
77
- ),
78
- ]
79
-
80
- def _info(self):
81
- return datasets.DatasetInfo(
82
- # This is the description that will appear on the datasets page.
83
- description=_DESCRIPTION,
84
- # datasets.features.FeatureConnectors
85
- features=datasets.Features({
86
- 'id':
87
- datasets.Value('string'),
88
- 'question':
89
- datasets.Value('string'),
90
- 'choices':
91
- datasets.features.Sequence({
92
- 'text': datasets.Value('string'),
93
- 'label': datasets.Value('string')
94
- }),
95
- 'answerKey':
96
- datasets.Value('string')
97
- # These are the features of your dataset like images, labels ...
98
- }),
99
- # If there's a common (input, target) tuple from the features,
100
- # specify them here. They'll be used if as_supervised=True in
101
- # builder.as_dataset.
102
- supervised_keys=None,
103
- # Homepage of the dataset for documentation
104
- homepage='https://allenai.org/data/arc',
105
- citation=_CITATION,
106
- )
107
-
108
- def _split_generators(self, dl_manager):
109
- """Returns SplitGenerators."""
110
- # dl_manager is a datasets.download.DownloadManager that can be used to
111
- # download and extract URLs
112
- dl_dir = dl_manager.download_and_extract(_URL)
113
- data_dir = os.path.join(dl_dir, 'ARC-V1-Feb2018-2')
114
- return [
115
- datasets.SplitGenerator(
116
- name=datasets.Split.TRAIN,
117
- # These kwargs will be passed to _generate_examples
118
- gen_kwargs={'filepath': os.path.join(data_dir, self.config.name, self.config.name + '-Train.jsonl')},
119
- ),
120
- datasets.SplitGenerator(
121
- name=datasets.Split.TEST,
122
- # These kwargs will be passed to _generate_examples
123
- gen_kwargs={'filepath': os.path.join(data_dir, self.config.name, self.config.name + '-Test.jsonl')},
124
- ),
125
- datasets.SplitGenerator(
126
- name=datasets.Split.VALIDATION,
127
- # These kwargs will be passed to _generate_examples
128
- gen_kwargs={'filepath': os.path.join(data_dir, self.config.name, self.config.name + '-Dev.jsonl')},
129
- ),
130
- ]
131
-
132
- def _generate_examples(self, filepath):
133
- """Yields examples."""
134
- with open(filepath, encoding='utf-8') as f:
135
- for row in f:
136
- data = json.loads(row)
137
- answerkey = data['answerKey']
138
- id_ = data['id']
139
- question = data['question']['stem']
140
- choices = data['question']['choices']
141
- text_choices = [choice['text'] for choice in choices]
142
- label_choices = [choice['label'] for choice in choices]
143
- yield id_, {
144
- 'id': id_,
145
- 'answerKey': answerkey,
146
- 'question': question,
147
- 'choices': {
148
- 'text': text_choices,
149
- 'label': label_choices
150
- },
151
- }
@@ -1,81 +0,0 @@
1
- import copy
2
- from collections import OrderedDict
3
- from dataclasses import dataclass, field, fields
4
- from typing import TYPE_CHECKING, Dict, List, Optional
5
-
6
- from evalscope.constants import OutputType
7
-
8
- if TYPE_CHECKING:
9
- from evalscope.benchmarks import DataAdapter
10
-
11
- BENCHMARK_MAPPINGS = {}
12
-
13
-
14
- @dataclass
15
- class BenchmarkMeta:
16
- name: str
17
- dataset_id: str
18
- data_adapter: 'DataAdapter'
19
- model_adapter: Optional[str] = OutputType.GENERATION
20
- output_types: Optional[List[str]] = field(default_factory=lambda: [OutputType.GENERATION])
21
- subset_list: List[str] = field(default_factory=lambda: ['default'])
22
- metric_list: List[str] = field(default_factory=list)
23
- few_shot_num: int = 0
24
- few_shot_random: bool = False
25
- train_split: Optional[str] = None
26
- eval_split: Optional[str] = None
27
- prompt_template: Optional[str] = None
28
- system_prompt: Optional[str] = None
29
- query_template: Optional[str] = None
30
- pretty_name: Optional[str] = None
31
- description: Optional[str] = None
32
- tags: Optional[List[str]] = field(default_factory=list)
33
- filters: Optional[OrderedDict] = None
34
- extra_params: Optional[Dict] = field(default_factory=dict)
35
-
36
- def _update(self, args: dict):
37
- if args.get('local_path'):
38
- self.dataset_id = args['local_path']
39
- del args['local_path']
40
- self.__dict__.update(args)
41
-
42
- def to_dict(self) -> dict:
43
- return self.__dict__
44
-
45
- def to_string_dict(self) -> dict:
46
- cur_dict = copy.deepcopy(self.to_dict())
47
- # cur_dict['data_adapter'] = self.data_adapter.__name__
48
- del cur_dict['data_adapter']
49
- return cur_dict
50
-
51
- def get_data_adapter(self, config: dict = {}) -> 'DataAdapter':
52
- if config:
53
- self._update(config)
54
-
55
- data_adapter = self.data_adapter(**self.to_dict())
56
- return data_adapter
57
-
58
-
59
- class Benchmark:
60
-
61
- def __init__(self):
62
- pass
63
-
64
- @classmethod
65
- def get(cls, name: str) -> 'BenchmarkMeta':
66
- if name not in BENCHMARK_MAPPINGS:
67
- raise Exception(f'Unknown benchmark: {name}. Available tasks: {list(BENCHMARK_MAPPINGS.keys())}')
68
- benchmark = BENCHMARK_MAPPINGS[name]
69
- return benchmark
70
-
71
- @classmethod
72
- def register(cls, name: str, dataset_id: str, **kwargs):
73
-
74
- def register_wrapper(data_adapter):
75
- if name in BENCHMARK_MAPPINGS:
76
- raise Exception(f'Benchmark {name} already registered')
77
- BENCHMARK_MAPPINGS[name] = BenchmarkMeta(
78
- name=name, data_adapter=data_adapter, dataset_id=dataset_id, **kwargs)
79
- return data_adapter
80
-
81
- return register_wrapper
@@ -1,146 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License
5
- import datasets
6
- import os
7
- import pandas as pd
8
-
9
- # flake8: noqa
10
- """DO NOT EDIT unless you are contributing a new dataset."""
11
-
12
- _CITATION = """\
13
- @article{huang2023ceval,
14
- title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
15
- author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
16
- journal={arXiv preprint arXiv:2305.08322},
17
- year={2023}
18
- }
19
- """
20
-
21
- _DESCRIPTION = """\
22
- C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
23
- """
24
-
25
- _HOMEPAGE = 'https://cevalbenchmark.com'
26
-
27
- _LICENSE = 'Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License'
28
-
29
- _URL = r'https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/c-eval/ceval-exam.zip'
30
-
31
- task_list = [
32
- 'computer_network',
33
- 'operating_system',
34
- 'computer_architecture',
35
- 'college_programming',
36
- 'college_physics',
37
- 'college_chemistry',
38
- 'advanced_mathematics',
39
- 'probability_and_statistics',
40
- 'discrete_mathematics',
41
- 'electrical_engineer',
42
- 'metrology_engineer',
43
- 'high_school_mathematics',
44
- 'high_school_physics',
45
- 'high_school_chemistry',
46
- 'high_school_biology',
47
- 'middle_school_mathematics',
48
- 'middle_school_biology',
49
- 'middle_school_physics',
50
- 'middle_school_chemistry',
51
- 'veterinary_medicine',
52
- 'college_economics',
53
- 'business_administration',
54
- 'marxism',
55
- 'mao_zedong_thought',
56
- 'education_science',
57
- 'teacher_qualification',
58
- 'high_school_politics',
59
- 'high_school_geography',
60
- 'middle_school_politics',
61
- 'middle_school_geography',
62
- 'modern_chinese_history',
63
- 'ideological_and_moral_cultivation',
64
- 'logic',
65
- 'law',
66
- 'chinese_language_and_literature',
67
- 'art_studies',
68
- 'professional_tour_guide',
69
- 'legal_professional',
70
- 'high_school_chinese',
71
- 'high_school_history',
72
- 'middle_school_history',
73
- 'civil_servant',
74
- 'sports_science',
75
- 'plant_protection',
76
- 'basic_medicine',
77
- 'clinical_medicine',
78
- 'urban_and_rural_planner',
79
- 'accountant',
80
- 'fire_engineer',
81
- 'environmental_impact_assessment_engineer',
82
- 'tax_accountant',
83
- 'physician',
84
- ]
85
-
86
-
87
- class CevalExamConfig(datasets.BuilderConfig):
88
-
89
- def __init__(self, **kwargs):
90
- super().__init__(version=datasets.Version('1.0.0'), **kwargs)
91
-
92
-
93
- class CevalExam(datasets.GeneratorBasedBuilder):
94
- BUILDER_CONFIGS = [CevalExamConfig(name=task_name, ) for task_name in task_list]
95
-
96
- def _info(self):
97
- features = datasets.Features({
98
- 'id': datasets.Value('int32'),
99
- 'question': datasets.Value('string'),
100
- 'A': datasets.Value('string'),
101
- 'B': datasets.Value('string'),
102
- 'C': datasets.Value('string'),
103
- 'D': datasets.Value('string'),
104
- 'answer': datasets.Value('string'),
105
- 'explanation': datasets.Value('string'),
106
- })
107
- return datasets.DatasetInfo(
108
- description=_DESCRIPTION,
109
- features=features,
110
- homepage=_HOMEPAGE,
111
- license=_LICENSE,
112
- citation=_CITATION,
113
- )
114
-
115
- def _split_generators(self, dl_manager):
116
- data_dir = dl_manager.download_and_extract(_URL)
117
- task_name = self.config.name
118
- return [
119
- datasets.SplitGenerator(
120
- name=datasets.Split.TEST,
121
- gen_kwargs={
122
- 'filepath': os.path.join(data_dir, 'test', f'{task_name}_test.csv'),
123
- },
124
- ),
125
- datasets.SplitGenerator(
126
- name=datasets.Split('val'),
127
- gen_kwargs={
128
- 'filepath': os.path.join(data_dir, 'val', f'{task_name}_val.csv'),
129
- },
130
- ),
131
- datasets.SplitGenerator(
132
- name=datasets.Split('dev'),
133
- gen_kwargs={
134
- 'filepath': os.path.join(data_dir, 'dev', f'{task_name}_dev.csv'),
135
- },
136
- ),
137
- ]
138
-
139
- def _generate_examples(self, filepath):
140
- df = pd.read_csv(filepath, encoding='utf-8')
141
- for i, instance in enumerate(df.to_dict(orient='records')):
142
- if 'answer' not in instance.keys():
143
- instance['answer'] = ''
144
- if 'explanation' not in instance.keys():
145
- instance['explanation'] = ''
146
- yield i, instance
@@ -1,161 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- # flake8: noqa
16
-
17
- import datasets
18
- import os
19
- import pandas as pd
20
-
21
- _CITATION = """\
22
- @misc{li2023cmmlu,
23
- title={CMMLU: Measuring massive multitask language understanding in Chinese},
24
- author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
25
- year={2023},
26
- eprint={2306.09212},
27
- archivePrefix={arXiv},
28
- primaryClass={cs.CL}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
34
- """
35
-
36
- _HOMEPAGE = 'https://modelscope.cn/datasets/modelscope/cmmlu/summary'
37
-
38
- # _URL = r"https://huggingface.co/datasets/haonan-li/cmmlu/resolve/main/cmmlu_v1_0_1.zip"
39
- _URL = r'https://modelscope.cn/api/v1/datasets/modelscope/cmmlu/repo?Revision=master&FilePath=cmmlu_v1_0_1.zip'
40
-
41
- # contains 67 sub-tasks
42
- task_list = [
43
- 'agronomy',
44
- 'anatomy',
45
- 'ancient_chinese',
46
- 'arts',
47
- 'astronomy',
48
- 'business_ethics',
49
- 'chinese_civil_service_exam',
50
- 'chinese_driving_rule',
51
- 'chinese_food_culture',
52
- 'chinese_foreign_policy',
53
- 'chinese_history',
54
- 'chinese_literature',
55
- 'chinese_teacher_qualification',
56
- 'clinical_knowledge',
57
- 'college_actuarial_science',
58
- 'college_education',
59
- 'college_engineering_hydrology',
60
- 'college_law',
61
- 'college_mathematics',
62
- 'college_medical_statistics',
63
- 'college_medicine',
64
- 'computer_science',
65
- 'computer_security',
66
- 'conceptual_physics',
67
- 'construction_project_management',
68
- 'economics',
69
- 'education',
70
- 'electrical_engineering',
71
- 'elementary_chinese',
72
- 'elementary_commonsense',
73
- 'elementary_information_and_technology',
74
- 'elementary_mathematics',
75
- 'ethnology',
76
- 'food_science',
77
- 'genetics',
78
- 'global_facts',
79
- 'high_school_biology',
80
- 'high_school_chemistry',
81
- 'high_school_geography',
82
- 'high_school_mathematics',
83
- 'high_school_physics',
84
- 'high_school_politics',
85
- 'human_sexuality',
86
- 'international_law',
87
- 'journalism',
88
- 'jurisprudence',
89
- 'legal_and_moral_basis',
90
- 'logical',
91
- 'machine_learning',
92
- 'management',
93
- 'marketing',
94
- 'marxist_theory',
95
- 'modern_chinese',
96
- 'nutrition',
97
- 'philosophy',
98
- 'professional_accounting',
99
- 'professional_law',
100
- 'professional_medicine',
101
- 'professional_psychology',
102
- 'public_relations',
103
- 'security_study',
104
- 'sociology',
105
- 'sports_science',
106
- 'traditional_chinese_medicine',
107
- 'virology',
108
- 'world_history',
109
- 'world_religions',
110
- ]
111
-
112
-
113
- class CMMLUConfig(datasets.BuilderConfig):
114
-
115
- def __init__(self, **kwargs):
116
- super().__init__(version=datasets.Version('1.0.1'), **kwargs)
117
- # V1.0.1 Fix: One comma missing in word_religions.csv
118
- # V1.0.0 Init version
119
-
120
-
121
- class CMMLU(datasets.GeneratorBasedBuilder):
122
- BUILDER_CONFIGS = [CMMLUConfig(name=task_name) for task_name in task_list]
123
-
124
- def _info(self):
125
- features = datasets.Features({
126
- 'Question': datasets.Value('string'),
127
- 'A': datasets.Value('string'),
128
- 'B': datasets.Value('string'),
129
- 'C': datasets.Value('string'),
130
- 'D': datasets.Value('string'),
131
- 'Answer': datasets.Value('string'),
132
- })
133
- return datasets.DatasetInfo(
134
- description=_DESCRIPTION,
135
- features=features,
136
- homepage=_HOMEPAGE,
137
- citation=_CITATION,
138
- )
139
-
140
- def _split_generators(self, dl_manager):
141
- data_dir = dl_manager.download_and_extract(_URL)
142
- task_name = self.config.name
143
- return [
144
- datasets.SplitGenerator(
145
- name=datasets.Split.TEST,
146
- gen_kwargs={
147
- 'filepath': os.path.join(data_dir, f'test/{task_name}.csv'),
148
- },
149
- ),
150
- datasets.SplitGenerator(
151
- name=datasets.Split('dev'),
152
- gen_kwargs={
153
- 'filepath': os.path.join(data_dir, f'dev/{task_name}.csv'),
154
- },
155
- ),
156
- ]
157
-
158
- def _generate_examples(self, filepath):
159
- df = pd.read_csv(filepath, header=0, index_col=0, encoding='utf-8')
160
- for i, instance in enumerate(df.to_dict(orient='records')):
161
- yield i, instance
@@ -1,5 +0,0 @@
1
- {'input': '毛毛骑在牛背上过河,他共有甲、乙、丙、丁4头牛,甲过河要20分钟,乙过河要30分钟,丙过河要40分钟,丁过河要50分钟。毛毛每次只能赶2头牛过河,要把4头牛都赶到对岸去,最少要多少分钟?', 'A': '190', 'B': '180', 'C': '170', 'D': '160', 'target': 'D'}
2
- {'input': '下列关于重力的说法正确的是', 'A': '在地球周围的物体都要受到重力作用,与其运动状态无关', 'B': '对某一物体而言,重力的大小是一个恒量,不随物体的地理位置而改变', 'C': '重力就是地球对物体的吸引力,重力的方向总是竖直向下', 'D': '在地球表面各处的重力方向都是相同的', 'target': 'A'}
3
- {'input': '心脏的静脉血回心的主要途径是', 'A': '心小静脉', 'B': '冠状窦', 'C': '心中静脉', 'D': '心前静脉', 'target': 'B'}
4
- {'input': "以西蒙为代表的决策理论学派提出的决策准则是", 'A': '最优化', 'B': '公平', 'C': '民主化', 'D': '满意', 'target': 'D'}
5
- {'input': '20世纪初,英国首相阿斯奎斯说:“我们现在有一个牢固确立了两百年的传统,即归根到底,王位的占有者接受其大臣的建议并据此行事。”这一传统的确立,使一个以小农业和手工业生产为主的国家变成了一个典型的资本主义国家,成为欧洲各国效仿的对象。各国效仿的理由是', 'A': '英国“光荣革命”宣告了欧洲新社会政治制度的诞生', 'B': '殖民主义深刻影响了英国“世界工厂”的地位', 'C': '英国经济上的成就得益于其制度设计', 'D': '英国启蒙思想奠定了资产阶级民主主义政治的理论基础', 'target': 'C'}
@@ -1,79 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- """Mathematics Aptitude Test of Heuristics (MATH) dataset."""
3
-
4
- import datasets
5
- import json
6
- import os
7
-
8
- _CITATION = """\
9
- @article{hendrycksmath2021,
10
- title={Measuring Mathematical Problem Solving With the MATH Dataset},
11
- author={Dan Hendrycks
12
- and Collin Burns
13
- and Saurav Kadavath
14
- and Akul Arora
15
- and Steven Basart
16
- and Eric Tang
17
- and Dawn Song
18
- and Jacob Steinhardt},
19
- journal={arXiv preprint arXiv:2103.03874},
20
- year={2021}
21
- }
22
- """
23
-
24
- _DESCRIPTION = """\
25
- The Mathematics Aptitude Test of Heuristics (MATH) dataset consists of problems
26
- from mathematics competitions, including the AMC 10, AMC 12, AIME, and more.
27
- Each problem in MATH has a full step-by-step solution, which can be used to teach
28
- models to generate answer derivations and explanations.
29
- """
30
-
31
- _HOMEPAGE = 'https://github.com/hendrycks/math'
32
-
33
- _LICENSE = 'https://github.com/hendrycks/math/blob/main/LICENSE'
34
-
35
- # Original data URL: "https://people.eecs.berkeley.edu/~hendrycks/MATH.tar"
36
- _URL = 'https://sail-moe.oss-cn-hangzhou.aliyuncs.com/open_data/math/MATH.zip'
37
-
38
-
39
- class CompetitionMathDataset(datasets.GeneratorBasedBuilder):
40
- """Mathematics Aptitude Test of Heuristics (MATH) dataset."""
41
-
42
- VERSION = datasets.Version('1.0.0')
43
-
44
- def _info(self):
45
- features = datasets.Features({
46
- 'problem': datasets.Value('string'),
47
- 'level': datasets.Value('string'),
48
- 'type': datasets.Value('string'),
49
- 'solution': datasets.Value('string'),
50
- })
51
- return datasets.DatasetInfo(
52
- description=_DESCRIPTION,
53
- features=features,
54
- supervised_keys=None,
55
- homepage=_HOMEPAGE,
56
- license=_LICENSE,
57
- citation=_CITATION,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """Returns SplitGenerators."""
62
- download_dir = dl_manager.download_and_extract(_URL)
63
- return [
64
- datasets.SplitGenerator(
65
- name=datasets.Split.TRAIN,
66
- gen_kwargs={'data_dir': dl_manager.iter_files(os.path.join(download_dir, 'MATH', 'train'))},
67
- ),
68
- datasets.SplitGenerator(
69
- name=datasets.Split.TEST,
70
- gen_kwargs={'data_dir': dl_manager.iter_files(os.path.join(download_dir, 'MATH', 'test'))},
71
- ),
72
- ]
73
-
74
- def _generate_examples(self, data_dir):
75
- """Yields examples as (key, example) tuples."""
76
- for id_, filepath in enumerate(data_dir):
77
- with open(filepath, 'rb') as fin:
78
- example = json.load(fin)
79
- yield id_, example