evalscope 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (170) hide show
  1. evalscope-0.5.0/PKG-INFO +475 -0
  2. evalscope-0.5.0/README.md +449 -0
  3. evalscope-0.5.0/evalscope/__init__.py +3 -0
  4. evalscope-0.5.0/evalscope/backend/__init__.py +3 -0
  5. evalscope-0.5.0/evalscope/backend/base.py +27 -0
  6. evalscope-0.5.0/evalscope/backend/opencompass/__init__.py +3 -0
  7. evalscope-0.5.0/evalscope/backend/opencompass/api_meta_template.py +64 -0
  8. evalscope-0.5.0/evalscope/backend/opencompass/backend_manager.py +247 -0
  9. evalscope-0.5.0/evalscope/backend/opencompass/tasks/__init__.py +1 -0
  10. evalscope-0.5.0/evalscope/backend/opencompass/tasks/eval_api.py +30 -0
  11. evalscope-0.5.0/evalscope/backend/opencompass/tasks/eval_datasets.py +71 -0
  12. evalscope-0.5.0/evalscope/backend/vlm_eval_kit/__init__.py +1 -0
  13. evalscope-0.5.0/evalscope/backend/vlm_eval_kit/backend_manager.py +153 -0
  14. evalscope-0.5.0/evalscope/benchmarks/__init__.py +4 -0
  15. evalscope-0.5.0/evalscope/benchmarks/arc/__init__.py +5 -0
  16. evalscope-0.5.0/evalscope/benchmarks/arc/ai2_arc.py +148 -0
  17. evalscope-0.5.0/evalscope/benchmarks/arc/arc_adapter.py +231 -0
  18. evalscope-0.5.0/evalscope/benchmarks/bbh/__init__.py +6 -0
  19. evalscope-0.5.0/evalscope/benchmarks/bbh/bbh_adapter.py +308 -0
  20. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/boolean_expressions.txt +23 -0
  21. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/causal_judgement.txt +25 -0
  22. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/date_understanding.txt +33 -0
  23. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/disambiguation_qa.txt +37 -0
  24. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/dyck_languages.txt +72 -0
  25. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/formal_fallacies.txt +44 -0
  26. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/geometric_shapes.txt +78 -0
  27. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/hyperbaton.txt +28 -0
  28. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_five_objects.txt +37 -0
  29. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_seven_objects.txt +37 -0
  30. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/logical_deduction_three_objects.txt +37 -0
  31. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/movie_recommendation.txt +42 -0
  32. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/multistep_arithmetic_two.txt +25 -0
  33. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/navigate.txt +43 -0
  34. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/object_counting.txt +37 -0
  35. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/penguins_in_a_table.txt +41 -0
  36. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/reasoning_about_colored_objects.txt +63 -0
  37. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/ruin_names.txt +44 -0
  38. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/salient_translation_error_detection.txt +40 -0
  39. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/snarks.txt +30 -0
  40. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/sports_understanding.txt +10 -0
  41. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/temporal_sequences.txt +77 -0
  42. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_five_objects.txt +40 -0
  43. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_seven_objects.txt +40 -0
  44. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/tracking_shuffled_objects_three_objects.txt +40 -0
  45. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/web_of_lies.txt +28 -0
  46. evalscope-0.5.0/evalscope/benchmarks/bbh/cot_prompts/word_sorting.txt +17 -0
  47. evalscope-0.5.0/evalscope/benchmarks/benchmark.py +65 -0
  48. evalscope-0.5.0/evalscope/benchmarks/ceval/__init__.py +5 -0
  49. evalscope-0.5.0/evalscope/benchmarks/ceval/ceval_adapter.py +340 -0
  50. evalscope-0.5.0/evalscope/benchmarks/ceval/ceval_exam.py +159 -0
  51. evalscope-0.5.0/evalscope/benchmarks/cmmlu/__init__.py +5 -0
  52. evalscope-0.5.0/evalscope/benchmarks/cmmlu/cmmlu.py +166 -0
  53. evalscope-0.5.0/evalscope/benchmarks/cmmlu/cmmlu_adapter.py +369 -0
  54. evalscope-0.5.0/evalscope/benchmarks/competition_math/__init__.py +5 -0
  55. evalscope-0.5.0/evalscope/benchmarks/competition_math/competition_math.py +88 -0
  56. evalscope-0.5.0/evalscope/benchmarks/competition_math/competition_math_adapter.py +470 -0
  57. evalscope-0.5.0/evalscope/benchmarks/data_adapter.py +263 -0
  58. evalscope-0.5.0/evalscope/benchmarks/general_qa/__init__.py +5 -0
  59. evalscope-0.5.0/evalscope/benchmarks/general_qa/general_qa_adapter.py +186 -0
  60. evalscope-0.5.0/evalscope/benchmarks/gsm8k/__init__.py +5 -0
  61. evalscope-0.5.0/evalscope/benchmarks/gsm8k/gsm8k.py +127 -0
  62. evalscope-0.5.0/evalscope/benchmarks/gsm8k/gsm8k_adapter.py +236 -0
  63. evalscope-0.5.0/evalscope/benchmarks/hellaswag/__init__.py +5 -0
  64. evalscope-0.5.0/evalscope/benchmarks/hellaswag/hellaswag.py +116 -0
  65. evalscope-0.5.0/evalscope/benchmarks/hellaswag/hellaswag_adapter.py +222 -0
  66. evalscope-0.5.0/evalscope/benchmarks/humaneval/__init__.py +5 -0
  67. evalscope-0.5.0/evalscope/benchmarks/humaneval/humaneval.py +82 -0
  68. evalscope-0.5.0/evalscope/benchmarks/humaneval/humaneval_adapter.py +21 -0
  69. evalscope-0.5.0/evalscope/benchmarks/mmlu/__init__.py +5 -0
  70. evalscope-0.5.0/evalscope/benchmarks/mmlu/mmlu.py +174 -0
  71. evalscope-0.5.0/evalscope/benchmarks/mmlu/mmlu_adapter.py +375 -0
  72. evalscope-0.5.0/evalscope/benchmarks/race/__init__.py +5 -0
  73. evalscope-0.5.0/evalscope/benchmarks/race/race.py +118 -0
  74. evalscope-0.5.0/evalscope/benchmarks/race/race_adapter.py +229 -0
  75. evalscope-0.5.0/evalscope/benchmarks/trivia_qa/__init__.py +5 -0
  76. evalscope-0.5.0/evalscope/benchmarks/trivia_qa/trivia_qa.py +104 -0
  77. evalscope-0.5.0/evalscope/benchmarks/trivia_qa/trivia_qa_adapter.py +207 -0
  78. evalscope-0.5.0/evalscope/benchmarks/truthful_qa/__init__.py +5 -0
  79. evalscope-0.5.0/evalscope/benchmarks/truthful_qa/truthful_qa.py +167 -0
  80. evalscope-0.5.0/evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +351 -0
  81. evalscope-0.5.0/evalscope/cache.py +98 -0
  82. evalscope-0.5.0/evalscope/cli/__init__.py +1 -0
  83. evalscope-0.5.0/evalscope/cli/base.py +20 -0
  84. evalscope-0.5.0/evalscope/cli/cli.py +26 -0
  85. evalscope-0.5.0/evalscope/cli/start_perf.py +37 -0
  86. evalscope-0.5.0/evalscope/cli/start_server.py +138 -0
  87. evalscope-0.5.0/evalscope/config.py +165 -0
  88. evalscope-0.5.0/evalscope/constants.py +150 -0
  89. evalscope-0.5.0/evalscope/evaluator/__init__.py +3 -0
  90. evalscope-0.5.0/evalscope/evaluator/evaluator.py +689 -0
  91. evalscope-0.5.0/evalscope/evaluator/rating_eval.py +178 -0
  92. evalscope-0.5.0/evalscope/evaluator/reviewer/__init__.py +1 -0
  93. evalscope-0.5.0/evalscope/evaluator/reviewer/auto_reviewer.py +411 -0
  94. evalscope-0.5.0/evalscope/metrics/__init__.py +1 -0
  95. evalscope-0.5.0/evalscope/metrics/bundled_rouge_score/__init__.py +14 -0
  96. evalscope-0.5.0/evalscope/metrics/bundled_rouge_score/rouge_scorer.py +342 -0
  97. evalscope-0.5.0/evalscope/metrics/code_metric.py +104 -0
  98. evalscope-0.5.0/evalscope/metrics/math_accuracy.py +60 -0
  99. evalscope-0.5.0/evalscope/metrics/metrics.py +405 -0
  100. evalscope-0.5.0/evalscope/metrics/rouge_metric.py +129 -0
  101. evalscope-0.5.0/evalscope/models/__init__.py +4 -0
  102. evalscope-0.5.0/evalscope/models/custom/__init__.py +4 -0
  103. evalscope-0.5.0/evalscope/models/custom/custom_model.py +53 -0
  104. evalscope-0.5.0/evalscope/models/dummy_chat_model.py +50 -0
  105. evalscope-0.5.0/evalscope/models/model.py +88 -0
  106. evalscope-0.5.0/evalscope/models/model_adapter.py +586 -0
  107. evalscope-0.5.0/evalscope/models/openai_model.py +103 -0
  108. evalscope-0.5.0/evalscope/models/template.py +1446 -0
  109. evalscope-0.5.0/evalscope/perf/__init__.py +0 -0
  110. evalscope-0.5.0/evalscope/perf/_logging.py +32 -0
  111. evalscope-0.5.0/evalscope/perf/api_plugin_base.py +60 -0
  112. evalscope-0.5.0/evalscope/perf/custom_api.py +87 -0
  113. evalscope-0.5.0/evalscope/perf/dashscope_api.py +84 -0
  114. evalscope-0.5.0/evalscope/perf/dataset_plugin_base.py +64 -0
  115. evalscope-0.5.0/evalscope/perf/datasets/__init__.py +0 -0
  116. evalscope-0.5.0/evalscope/perf/datasets/line_by_line.py +18 -0
  117. evalscope-0.5.0/evalscope/perf/datasets/longalpaca_12k.py +20 -0
  118. evalscope-0.5.0/evalscope/perf/datasets/openqa.py +22 -0
  119. evalscope-0.5.0/evalscope/perf/how_to_analysis_result.py +24 -0
  120. evalscope-0.5.0/evalscope/perf/http_client.py +756 -0
  121. evalscope-0.5.0/evalscope/perf/openai_api.py +130 -0
  122. evalscope-0.5.0/evalscope/perf/plugin_registry.py +35 -0
  123. evalscope-0.5.0/evalscope/perf/query_parameters.py +42 -0
  124. evalscope-0.5.0/evalscope/perf/server_sent_event.py +43 -0
  125. evalscope-0.5.0/evalscope/preprocess/__init__.py +1 -0
  126. evalscope-0.5.0/evalscope/preprocess/tokenizers/__init__.py +0 -0
  127. evalscope-0.5.0/evalscope/preprocess/tokenizers/gpt2_tokenizer.py +221 -0
  128. evalscope-0.5.0/evalscope/registry/__init__.py +1 -0
  129. evalscope-0.5.0/evalscope/registry/tasks/arc.yaml +29 -0
  130. evalscope-0.5.0/evalscope/registry/tasks/bbh.yaml +27 -0
  131. evalscope-0.5.0/evalscope/registry/tasks/bbh_mini.yaml +27 -0
  132. evalscope-0.5.0/evalscope/registry/tasks/ceval.yaml +27 -0
  133. evalscope-0.5.0/evalscope/registry/tasks/ceval_mini.yaml +27 -0
  134. evalscope-0.5.0/evalscope/registry/tasks/cmmlu.yaml +27 -0
  135. evalscope-0.5.0/evalscope/registry/tasks/eval_qwen-7b-chat_v100.yaml +28 -0
  136. evalscope-0.5.0/evalscope/registry/tasks/general_qa.yaml +27 -0
  137. evalscope-0.5.0/evalscope/registry/tasks/gsm8k.yaml +29 -0
  138. evalscope-0.5.0/evalscope/registry/tasks/mmlu.yaml +29 -0
  139. evalscope-0.5.0/evalscope/registry/tasks/mmlu_mini.yaml +27 -0
  140. evalscope-0.5.0/evalscope/run.py +404 -0
  141. evalscope-0.5.0/evalscope/run_arena.py +204 -0
  142. evalscope-0.5.0/evalscope/run_ms.py +140 -0
  143. evalscope-0.5.0/evalscope/summarizer.py +144 -0
  144. evalscope-0.5.0/evalscope/third_party/__init__.py +1 -0
  145. evalscope-0.5.0/evalscope/third_party/toolbench_static/__init__.py +3 -0
  146. evalscope-0.5.0/evalscope/third_party/toolbench_static/eval.py +219 -0
  147. evalscope-0.5.0/evalscope/third_party/toolbench_static/infer.py +278 -0
  148. evalscope-0.5.0/evalscope/third_party/toolbench_static/llm/__init__.py +1 -0
  149. evalscope-0.5.0/evalscope/third_party/toolbench_static/llm/swift_infer.py +45 -0
  150. evalscope-0.5.0/evalscope/third_party/toolbench_static/toolbench_static.py +50 -0
  151. evalscope-0.5.0/evalscope/tools/__init__.py +1 -0
  152. evalscope-0.5.0/evalscope/tools/combine_reports.py +140 -0
  153. evalscope-0.5.0/evalscope/tools/gen_mmlu_subject_mapping.py +90 -0
  154. evalscope-0.5.0/evalscope/tools/rewrite_eval_results.py +95 -0
  155. evalscope-0.5.0/evalscope/utils/__init__.py +4 -0
  156. evalscope-0.5.0/evalscope/utils/arena_utils.py +247 -0
  157. evalscope-0.5.0/evalscope/utils/completion_parsers.py +87 -0
  158. evalscope-0.5.0/evalscope/utils/logger.py +64 -0
  159. evalscope-0.5.0/evalscope/utils/task_cfg_parser.py +10 -0
  160. evalscope-0.5.0/evalscope/utils/task_utils.py +19 -0
  161. evalscope-0.5.0/evalscope/utils/utils.py +625 -0
  162. evalscope-0.5.0/evalscope/version.py +4 -0
  163. evalscope-0.5.0/evalscope.egg-info/PKG-INFO +475 -0
  164. evalscope-0.5.0/evalscope.egg-info/SOURCES.txt +168 -0
  165. evalscope-0.5.0/evalscope.egg-info/dependency_links.txt +1 -0
  166. evalscope-0.5.0/evalscope.egg-info/entry_points.txt +3 -0
  167. evalscope-0.5.0/evalscope.egg-info/not-zip-safe +1 -0
  168. evalscope-0.5.0/evalscope.egg-info/requires.txt +100 -0
  169. evalscope-0.5.0/evalscope.egg-info/top_level.txt +1 -0
  170. evalscope-0.5.0/setup.cfg +4 -0
@@ -0,0 +1,475 @@
1
+ Metadata-Version: 2.1
2
+ Name: evalscope
3
+ Version: 0.5.0
4
+ Summary: Eval-Scope: Lightweight LLMs Evaluation Framework
5
+ Home-page: https://github.com/modelscope/eval-scope
6
+ Author: ModelScope team
7
+ Author-email: contact@modelscope.cn
8
+ License: UNKNOWN
9
+ Keywords: python,llm,evaluation
10
+ Platform: UNKNOWN
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: License :: OSI Approved :: Apache Software License
13
+ Classifier: Operating System :: OS Independent
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Requires-Python: >=3.8
19
+ Description-Content-Type: text/markdown
20
+ Provides-Extra: opencompass
21
+ Provides-Extra: vlmeval
22
+ Provides-Extra: inner
23
+ Provides-Extra: all
24
+
25
+ English | [简体中文](README_zh.md)
26
+
27
+ <p align="center">
28
+ <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://img.shields.io/pypi/dm/evalscope">
29
+ </a>
30
+ <a href="https://github.com/modelscope/eval-scope/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
31
+ <p>
32
+
33
+ ## 📖 Table of Content
34
+ - [Introduction](#introduction)
35
+ - [News](#News)
36
+ - [Installation](#installation)
37
+ - [Quick Start](#quick-start)
38
+ - [Dataset List](#datasets-list)
39
+ - [Leaderboard](#leaderboard)
40
+ - [Experiments and Results](#Experiments-and-Results)
41
+ - [Model Serving Performance Evaluation](#Model-Serving-Performance-Evaluation)
42
+
43
+ ## 📝 Introduction
44
+
45
+ Large Language Model (LLMs) evaluation has become a critical process for assessing and improving LLMs. To better support the evaluation of large models, we propose the Eval-Scope framework, which includes the following components and features:
46
+
47
+ - Pre-configured common benchmark datasets, including: MMLU, CMMLU, C-Eval, GSM8K, ARC, HellaSwag, TruthfulQA, MATH, HumanEval, etc.
48
+ - Implementation of common evaluation metrics
49
+ - Unified model integration, compatible with the generate and chat interfaces of multiple model series
50
+ - Automatic evaluation (evaluator):
51
+ - Automatic evaluation for objective questions
52
+ - Implementation of complex task evaluation using expert models
53
+ - Reports of evaluation generating
54
+ - Arena mode
55
+ - Visualization tools
56
+ - Model Inference Performance Evaluation [Tutorial](evalscope/perf/README.md)
57
+ - Support for OpenCompass as an Evaluation Backend, featuring advanced encapsulation and task simplification to easily submit tasks to OpenCompass for evaluation.
58
+ - Supports VLMEvalKit as the evaluation backend. It initiates VLMEvalKit's multimodal evaluation tasks through Eval-Scope, supporting various multimodal models and datasets.
59
+ - Full pipeline support: Seamlessly integrate with SWIFT to easily train and deploy model services, initiate evaluation tasks, view evaluation reports, and achieve an end-to-end large model development process.
60
+
61
+
62
+ **Features**
63
+ - Lightweight, minimizing unnecessary abstractions and configurations
64
+ - Easy to customize
65
+ - New datasets can be integrated by simply implementing a single class
66
+ - Models can be hosted on [ModelScope](https://modelscope.cn), and evaluations can be initiated with just a model id
67
+ - Supports deployment of locally hosted models
68
+ - Visualization of evaluation reports
69
+ - Rich evaluation metrics
70
+ - Model-based automatic evaluation process, supporting multiple evaluation modes
71
+ - Single mode: Expert models score individual models
72
+ - Pairwise-baseline mode: Comparison with baseline models
73
+ - Pairwise (all) mode: Pairwise comparison of all models
74
+
75
+ ## 🎉 News
76
+ - **[2024.07.31]** Breaking change: The sdk name has been changed from `llmuses` to `evalscope`, please update the sdk name in your code.
77
+ - **[2024.07.26]** Supports **VLMEvalKit** as a third-party evaluation framework, initiating multimodal model evaluation tasks. [User Guide](#vlmevalkit-evaluation-backend) 🔥🔥🔥
78
+ - **[2024.06.29]** Supports **OpenCompass** as a third-party evaluation framework. We have provided a high-level wrapper, supporting installation via pip and simplifying the evaluation task configuration. [User Guide](#opencompass-evaluation-backend) 🔥🔥🔥
79
+ - **[2024.06.13]** Eval-Scope has been updated to version 0.3.x, which supports the ModelScope SWIFT framework for LLMs evaluation. 🚀🚀🚀
80
+ - **[2024.06.13]** We have supported the ToolBench as a third-party evaluation backend for Agents evaluation. 🚀🚀🚀
81
+
82
+
83
+
84
+ ## 🛠️ Installation
85
+ ### Install with pip
86
+ 1. create conda environment
87
+ ```shell
88
+ conda create -n eval-scope python=3.10
89
+ conda activate eval-scope
90
+ ```
91
+
92
+ 2. Install Eval-Scope
93
+ ```shell
94
+ pip install evalscope
95
+ ```
96
+
97
+ ### Install from source code
98
+ 1. Download source code
99
+ ```shell
100
+ git clone https://github.com/modelscope/eval-scope.git
101
+ ```
102
+
103
+ 2. Install dependencies
104
+ ```shell
105
+ cd eval-scope/
106
+ pip install -e .
107
+ ```
108
+
109
+
110
+ ## 🚀 Quick Start
111
+
112
+ ### Simple Evaluation
113
+ command line with pip installation:
114
+ ```shell
115
+ python -m evalscope.run --model ZhipuAI/chatglm3-6b --template-type chatglm3 --datasets arc --limit 100
116
+ ```
117
+ command line with source code:
118
+ ```shell
119
+ python evalscope/run.py --model ZhipuAI/chatglm3-6b --template-type chatglm3 --datasets mmlu ceval --limit 10
120
+ ```
121
+ Parameters:
122
+ - --model: ModelScope model id, model link: [ZhipuAI/chatglm3-6b](https://modelscope.cn/models/ZhipuAI/chatglm3-6b/summary)
123
+
124
+ ### Evaluation with Model Arguments
125
+ ```shell
126
+ python evalscope/run.py --model ZhipuAI/chatglm3-6b --template-type chatglm3 --model-args revision=v1.0.2,precision=torch.float16,device_map=auto --datasets mmlu ceval --use-cache true --limit 10
127
+ ```
128
+ ```shell
129
+ python evalscope/run.py --model qwen/Qwen-1_8B --generation-config do_sample=false,temperature=0.0 --datasets ceval --dataset-args '{"ceval": {"few_shot_num": 0, "few_shot_random": false}}' --limit 10
130
+ ```
131
+ Parameters:
132
+ - --model-args: Parameters of model: revision, precision, device_map, in format of key=value,key=value
133
+ - --datasets: datasets list, separated by space
134
+ - --use-cache: `true` or `false`, whether to use cache, default is `false`
135
+ - --dataset-args: evaluation settings,json format,key is the dataset name,value should be args for the dataset
136
+ - --few_shot_num: few-shot data number
137
+ - --few_shot_random: whether to use random few-shot data, default is `true`
138
+ - --local_path: local dataset path
139
+ - --limit: maximum number of samples to evaluate for each sub-dataset
140
+ - --template-type: model template type, see [Template Type List](https://github.com/modelscope/swift/blob/main/docs/source_en/LLM/Supported-models-datasets.md)
141
+
142
+ Note: you can use following command to check the template type list of the model:
143
+ ```shell
144
+ from evalscope.models.template import TemplateType
145
+ print(TemplateType.get_template_name_list())
146
+ ```
147
+
148
+ ### Evaluation Backend
149
+ Eval-Scope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
150
+ - **Native**: Eval-Scope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
151
+ - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through Eval-Scope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework [ModelScope Swift](https://github.com/modelscope/swift).
152
+ - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through Eval-Scope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework [ModelScope Swift](https://github.com/modelscope/swift).
153
+ - **ThirdParty**: The third-party task, e.g. [ToolBench](evalscope/thirdparty/toolbench/README.md), you can contribute your own evaluation task to Eval-Scope as third-party backend.
154
+
155
+ #### OpenCompass Eval-Backend
156
+
157
+ To facilitate the use of the OpenCompass evaluation backend, we have customized the OpenCompass source code and named it `ms-opencompass`. This version includes optimizations for evaluation task configuration and execution based on the original version, and it supports installation via PyPI. This allows users to initiate lightweight OpenCompass evaluation tasks through Eval-Scope. Additionally, we have initially opened up API-based evaluation tasks in the OpenAI API format. You can deploy model services using [ModelScope Swift](https://github.com/modelscope/swift), where [swift deploy](https://swift.readthedocs.io/en/latest/LLM/VLLM-inference-acceleration-and-deployment.html) supports using vLLM to launch model inference services.
158
+
159
+
160
+ ##### Installation
161
+ ```shell
162
+ # Install with extra option
163
+ pip install evalscope[opencompass]
164
+ ```
165
+
166
+ ##### Data Preparation
167
+ Available datasets from OpenCompass backend:
168
+ ```text
169
+ 'obqa', 'AX_b', 'siqa', 'nq', 'mbpp', 'winogrande', 'mmlu', 'BoolQ', 'cluewsc', 'ocnli', 'lambada', 'CMRC', 'ceval', 'csl', 'cmnli', 'bbh', 'ReCoRD', 'math', 'humaneval', 'eprstmt', 'WSC', 'storycloze', 'MultiRC', 'RTE', 'chid', 'gsm8k', 'AX_g', 'bustm', 'afqmc', 'piqa', 'lcsts', 'strategyqa', 'Xsum', 'agieval', 'ocnli_fc', 'C3', 'tnews', 'race', 'triviaqa', 'CB', 'WiC', 'hellaswag', 'summedits', 'GaokaoBench', 'ARC_e', 'COPA', 'ARC_c', 'DRCD'
170
+ ```
171
+ Refer to [OpenCompass datasets](https://hub.opencompass.org.cn/home)
172
+
173
+ You can use the following code to list all available datasets:
174
+ ```python
175
+ from evalscope.backend.opencompass import OpenCompassBackendManager
176
+ print(f'** All datasets from OpenCompass backend: {OpenCompassBackendManager.list_datasets()}')
177
+ ```
178
+
179
+ Dataset download:
180
+ - Option1: Download from ModelScope
181
+ ```shell
182
+ git clone https://www.modelscope.cn/datasets/swift/evalscope_resource.git
183
+ ```
184
+
185
+ - Option2: Download from OpenCompass GitHub
186
+ ```shell
187
+ wget https://github.com/open-compass/opencompass/releases/download/0.2.2.rc1/OpenCompassData-complete-20240207.zip
188
+ ```
189
+
190
+ Unzip the file and set the path to the `data` directory in current work directory.
191
+
192
+
193
+ ##### Model Serving
194
+ We use ModelScope swift to deploy model services, see: [ModelScope Swift](hhttps://swift.readthedocs.io/en/latest/LLM/VLLM-inference-acceleration-and-deployment.html)
195
+ ```shell
196
+ # Install ms-swift
197
+ pip install ms-swift
198
+
199
+ # Deploy model
200
+ CUDA_VISIBLE_DEVICES=0 swift deploy --model_type llama3-8b-instruct --port 8000
201
+ ```
202
+
203
+
204
+ ##### Model Evaluation
205
+
206
+ Refer to example: [example_eval_swift_openai_api](examples/example_eval_swift_openai_api.py) to configure and execute the evaluation task:
207
+ ```shell
208
+ python examples/example_eval_swift_openai_api.py
209
+ ```
210
+
211
+ #### VLMEvalKit Evaluation Backend
212
+
213
+ To facilitate the use of the VLMEvalKit evaluation backend, we have customized the VLMEvalKit source code and named it `ms-vlmeval`. This version encapsulates the configuration and execution of evaluation tasks based on the original version and supports installation via PyPI, allowing users to initiate lightweight VLMEvalKit evaluation tasks through Eval-Scope. Additionally, we support API-based evaluation tasks in the OpenAI API format. You can deploy multimodal model services using ModelScope [swift](https://github.com/modelscope/swift).
214
+
215
+ ##### Installation
216
+ ```shell
217
+ # Install with additional options
218
+ pip install evalscope[vlmeval]
219
+ ```
220
+
221
+ ##### Data Preparation
222
+ Currently supported datasets include:
223
+ ```text
224
+ 'COCO_VAL', 'MME', 'HallusionBench', 'POPE', 'MMBench_DEV_EN', 'MMBench_TEST_EN', 'MMBench_DEV_CN', 'MMBench_TEST_CN', 'MMBench', 'MMBench_CN', 'MMBench_DEV_EN_V11', 'MMBench_TEST_EN_V11', 'MMBench_DEV_CN_V11', 'MMBench_TEST_CN_V11', 'MMBench_V11', 'MMBench_CN_V11', 'SEEDBench_IMG', 'SEEDBench2', 'SEEDBench2_Plus', 'ScienceQA_VAL', 'ScienceQA_TEST', 'MMT-Bench_ALL_MI', 'MMT-Bench_ALL', 'MMT-Bench_VAL_MI', 'MMT-Bench_VAL', 'AesBench_VAL', 'AesBench_TEST', 'CCBench', 'AI2D_TEST', 'MMStar', 'RealWorldQA', 'MLLMGuard_DS', 'BLINK', 'OCRVQA_TEST', 'OCRVQA_TESTCORE', 'TextVQA_VAL', 'DocVQA_VAL', 'DocVQA_TEST', 'InfoVQA_ VAL', 'InfoVQA_TEST', 'ChartQA_VAL', 'ChartQA_TEST', 'MathVision', 'MathVision_MINI', 'MMMU_DEV_VAL', 'MMMU_TEST', 'OCRBench', 'MathVista_MINI', 'LLaVABench', 'MMVet', 'MTVQA_TEST', 'MMLongBench_DOC', 'VCR_EN_EASY_500', 'VCR_EN_EASY_100', 'VCR_EN_EASY_ALL', 'VCR_EN_HARD_500', 'VCR_EN_HARD_100', 'VCR_EN_HARD_ALL', 'VCR_ZH_EASY_500', 'VCR_ZH_EASY_100', 'VCR_Z H_EASY_ALL', 'VCR_ZH_HARD_500', 'VCR_ZH_HARD_100', 'VCR_ZH_HARD_ALL', 'MMBench-Video', 'Video-MME', 'MMBench_DEV_EN', 'MMBench_TEST_EN', 'MMBench_DEV_CN', 'MMBench_TEST_CN', 'MMBench', 'MMBench_CN', 'MMBench_DEV_EN_V11', 'MMBench_TEST_EN_V11', 'MMBench_DEV_CN_V11', 'MMBench_TEST_CN_V11', 'MM Bench_V11', 'MMBench_CN_V11', 'SEEDBench_IMG', 'SEEDBench2', 'SEEDBench2_Plus', 'ScienceQA_VAL', 'ScienceQA_TEST', 'MMT-Bench_ALL_MI', 'MMT-Bench_ALL', 'MMT-Bench_VAL_MI', 'MMT-Bench_VAL', 'AesBench_VAL', 'AesBench_TEST', 'CCBench', 'AI2D_TEST', 'MMStar', 'RealWorldQA', 'MLLMGuard_DS', 'BLINK'
225
+ ```
226
+ For detailed information about the datasets, please refer to [VLMEvalKit Supported Multimodal Evaluation Sets](https://github.com/open-compass/VLMEvalKit/tree/main#-datasets-models-and-evaluation-results).
227
+
228
+ You can use the following to view the list of dataset names:
229
+ ```python
230
+ from evalscope.backend.vlm_eval_kit import VLMEvalKitBackendManager
231
+ print(f'** All models from VLMEvalKit backend: {VLMEvalKitBackendManager.list(list_supported_VLMs().keys())}')
232
+ ```
233
+ If the dataset file does not exist locally when loading the dataset, it will be automatically downloaded to the `~/LMUData/` directory.
234
+
235
+
236
+ ##### Model Evaluation
237
+ There are two ways to evaluate the model:
238
+
239
+ ###### 1. ModelScope Swift Deployment for Model Evaluation
240
+ **Model Deployment**
241
+ Deploy the model service using ModelScope Swift. For detailed instructions, refer to: [ModelScope Swift MLLM Deployment Guide](https://swift.readthedocs.io/en/latest/Multi-Modal/mutlimodal-deployment.html)
242
+ ```shell
243
+ # Install ms-swift
244
+ pip install ms-swift
245
+ # Deploy the qwen-vl-chat multi-modal model service
246
+ CUDA_VISIBLE_DEVICES=0 swift deploy --model_type qwen-vl-chat --model_id_or_path models/Qwen-VL-Chat
247
+ ```
248
+ **Model Evaluation**
249
+ Refer to the example file: [example_eval_vlm_swift](examples/example_eval_vlm_swift.py) to configure the evaluation task.
250
+ Execute the evaluation task:
251
+ ```shell
252
+ python examples/example_eval_vlm_swift.py
253
+ ```
254
+
255
+ ###### 2. Local Model Inference Evaluation
256
+ **Model Inference Evaluation**
257
+ Skip the model service deployment and perform inference directly on the local machine. Refer to the example file: [example_eval_vlm_local](examples/example_eval_vlm_local.py) to configure the evaluation task.
258
+ Execute the evaluation task:
259
+ ```shell
260
+ python examples/example_eval_vlm_local.py
261
+ ```
262
+
263
+
264
+ ##### (Optional) Deploy Judge Model
265
+ Deploy the local language model as a judge/extractor using ModelScope swift. For details, refer to: [ModelScope Swift LLM Deployment Guide](https://swift.readthedocs.io/en/latest/LLM/VLLM-inference-acceleration-and-deployment.html). If no judge model is deployed, exact matching will be used.
266
+
267
+ ```shell
268
+ # Deploy qwen2-7b as a judge
269
+ CUDA_VISIBLE_DEVICES=1 swift deploy --model_type qwen2-7b-instruct --model_id_or_path models/Qwen2-7B-Instruct --port 8866
270
+ ```
271
+
272
+ You **must configure the following environment variables for the judge model to be correctly invoked**:
273
+ ```
274
+ OPENAI_API_KEY=EMPTY
275
+ OPENAI_API_BASE=http://127.0.0.1:8866/v1/chat/completions # api_base for the judge model
276
+ LOCAL_LLM=qwen2-7b-instruct # model_id for the judge model
277
+ ```
278
+
279
+ ##### Model Evaluation
280
+ Refer to the example file: [example_eval_vlm_swift](examples/example_eval_vlm_swift.py) to configure the evaluation task.
281
+
282
+ Execute the evaluation task:
283
+
284
+ ```shell
285
+ python examples/example_eval_vlm_swift.py
286
+ ```
287
+
288
+
289
+ ### Local Dataset
290
+ You can use local dataset to evaluate the model without internet connection.
291
+ #### 1. Download and unzip the dataset
292
+ ```shell
293
+ # set path to /path/to/workdir
294
+ wget https://modelscope.oss-cn-beijing.aliyuncs.com/open_data/benchmark/data.zip
295
+ unzip data.zip
296
+ ```
297
+
298
+
299
+ #### 2. Use local dataset to evaluate the model
300
+ ```shell
301
+ python evalscope/run.py --model ZhipuAI/chatglm3-6b --template-type chatglm3 --datasets arc --dataset-hub Local --dataset-args '{"arc": {"local_path": "/path/to/workdir/data/arc"}}' --limit 10
302
+
303
+ # Parameters:
304
+ # --dataset-hub: dataset sources: `ModelScope`, `Local`, `HuggingFace` (TO-DO) default to `ModelScope`
305
+ # --dataset-args: json format, key is the dataset name, value should be args for the dataset
306
+ ```
307
+
308
+ #### 3. (Optional) Use local mode to submit evaluation task
309
+
310
+ ```shell
311
+ # 1. Prepare the model local folder, the folder structure refers to chatglm3-6b, link: https://modelscope.cn/models/ZhipuAI/chatglm3-6b/files
312
+ # For example, download the model folder to the local path /path/to/ZhipuAI/chatglm3-6b
313
+
314
+ # 2. Execute the offline evaluation task
315
+ python evalscope/run.py --model /path/to/ZhipuAI/chatglm3-6b --template-type chatglm3 --datasets arc --dataset-hub Local --dataset-args '{"arc": {"local_path": "/path/to/workdir/data/arc"}}' --limit 10
316
+ ```
317
+
318
+
319
+ ### Use run_task function
320
+
321
+ #### 1. Configuration
322
+ ```python
323
+ import torch
324
+ from evalscope.constants import DEFAULT_ROOT_CACHE_DIR
325
+
326
+ # Example configuration
327
+ your_task_cfg = {
328
+ 'model_args': {'revision': None, 'precision': torch.float16, 'device_map': 'auto'},
329
+ 'generation_config': {'do_sample': False, 'repetition_penalty': 1.0, 'max_new_tokens': 512},
330
+ 'dataset_args': {},
331
+ 'dry_run': False,
332
+ 'model': 'ZhipuAI/chatglm3-6b',
333
+ 'template_type': 'chatglm3',
334
+ 'datasets': ['arc', 'hellaswag'],
335
+ 'work_dir': DEFAULT_ROOT_CACHE_DIR,
336
+ 'outputs': DEFAULT_ROOT_CACHE_DIR,
337
+ 'mem_cache': False,
338
+ 'dataset_hub': 'ModelScope',
339
+ 'dataset_dir': DEFAULT_ROOT_CACHE_DIR,
340
+ 'stage': 'all',
341
+ 'limit': 10,
342
+ 'debug': False
343
+ }
344
+
345
+ ```
346
+
347
+ #### 2. Execute the task
348
+ ```python
349
+ from evalscope.run import run_task
350
+
351
+ run_task(task_cfg=your_task_cfg)
352
+ ```
353
+
354
+
355
+ ### Arena Mode
356
+ The Arena mode allows multiple candidate models to be evaluated through pairwise battles, and can choose to use the AI Enhanced Auto-Reviewer (AAR) automatic evaluation process or manual evaluation to obtain the evaluation report. The process is as follows:
357
+ #### 1. Env preparation
358
+ ```text
359
+ a. Data preparation, the question data format refers to: evalscope/registry/data/question.jsonl
360
+ b. If you need to use the automatic evaluation process (AAR), you need to configure the relevant environment variables. Taking the GPT-4 based auto-reviewer process as an example, you need to configure the following environment variables:
361
+ > export OPENAI_API_KEY=YOUR_OPENAI_API_KEY
362
+ ```
363
+
364
+ #### 2. Configuration files
365
+ ```text
366
+ Refer to : evalscope/registry/config/cfg_arena.yaml
367
+ Parameters:
368
+ questions_file: question data path
369
+ answers_gen: candidate model prediction result generation, supports multiple models, can control whether to enable the model through the enable parameter
370
+ reviews_gen: evaluation result generation, currently defaults to using GPT-4 as the Auto-reviewer, can control whether to enable this step through the enable parameter
371
+ elo_rating: ELO rating algorithm, can control whether to enable this step through the enable parameter, note that this step depends on the review_file must exist
372
+ ```
373
+
374
+ #### 3. Execute the script
375
+ ```shell
376
+ #Usage:
377
+ cd evalscope
378
+
379
+ # dry-run mode
380
+ python evalscope/run_arena.py -c registry/config/cfg_arena.yaml --dry-run
381
+
382
+ # Execute the script
383
+ python evalscope/run_arena.py --c registry/config/cfg_arena.yaml
384
+ ```
385
+
386
+ #### 4. Visualization
387
+
388
+ ```shell
389
+ # Usage:
390
+ streamlit run viz.py -- --review-file evalscope/registry/data/qa_browser/battle.jsonl --category-file evalscope/registry/data/qa_browser/category_mapping.yaml
391
+ ```
392
+
393
+
394
+ ### Single Model Evaluation Mode
395
+
396
+ In this mode, we only score the output of a single model, without pairwise comparison.
397
+ #### 1. Configuration file
398
+ ```text
399
+ Refer to: evalscope/registry/config/cfg_single.yaml
400
+ Parameters:
401
+ questions_file: question data path
402
+ answers_gen: candidate model prediction result generation, supports multiple models, can control whether to enable the model through the enable parameter
403
+ reviews_gen: evaluation result generation, currently defaults to using GPT-4 as the Auto-reviewer, can control whether to enable this step through the enable parameter
404
+ rating_gen: rating algorithm, can control whether to enable this step through the enable parameter, note that this step depends on the review_file must exist
405
+ ```
406
+ #### 2. Execute the script
407
+ ```shell
408
+ #Example:
409
+ python evalscope/run_arena.py --c registry/config/cfg_single.yaml
410
+ ```
411
+
412
+ ### Baseline Model Comparison Mode
413
+
414
+ In this mode, we select the baseline model, and compare other models with the baseline model for scoring. This mode can easily add new models to the Leaderboard (just need to run the scoring with the new model and the baseline model).
415
+
416
+ #### 1. Configuration file
417
+ ```text
418
+ Refer to: evalscope/registry/config/cfg_pairwise_baseline.yaml
419
+ Parameters:
420
+ questions_file: question data path
421
+ answers_gen: candidate model prediction result generation, supports multiple models, can control whether to enable the model through the enable parameter
422
+ reviews_gen: evaluation result generation, currently defaults to using GPT-4 as the Auto-reviewer, can control whether to enable this step through the enable parameter
423
+ rating_gen: rating algorithm, can control whether to enable this step through the enable parameter, note that this step depends on the review_file must exist
424
+ ```
425
+ #### 2. Execute the script
426
+ ```shell
427
+ # Example:
428
+ python evalscope/run_arena.py --c registry/config/cfg_pairwise_baseline.yaml
429
+ ```
430
+
431
+
432
+ ## Datasets list
433
+
434
+ | DatasetName | Link | Status | Note |
435
+ |--------------------|----------------------------------------------------------------------------------------|--------|------|
436
+ | `mmlu` | [mmlu](https://modelscope.cn/datasets/modelscope/mmlu/summary) | Active | |
437
+ | `ceval` | [ceval](https://modelscope.cn/datasets/modelscope/ceval-exam/summary) | Active | |
438
+ | `gsm8k` | [gsm8k](https://modelscope.cn/datasets/modelscope/gsm8k/summary) | Active | |
439
+ | `arc` | [arc](https://modelscope.cn/datasets/modelscope/ai2_arc/summary) | Active | |
440
+ | `hellaswag` | [hellaswag](https://modelscope.cn/datasets/modelscope/hellaswag/summary) | Active | |
441
+ | `truthful_qa` | [truthful_qa](https://modelscope.cn/datasets/modelscope/truthful_qa/summary) | Active | |
442
+ | `competition_math` | [competition_math](https://modelscope.cn/datasets/modelscope/competition_math/summary) | Active | |
443
+ | `humaneval` | [humaneval](https://modelscope.cn/datasets/modelscope/humaneval/summary) | Active | |
444
+ | `bbh` | [bbh](https://modelscope.cn/datasets/modelscope/bbh/summary) | Active | |
445
+ | `race` | [race](https://modelscope.cn/datasets/modelscope/race/summary) | Active | |
446
+ | `trivia_qa` | [trivia_qa](https://modelscope.cn/datasets/modelscope/trivia_qa/summary) | To be intergrated | |
447
+
448
+
449
+ ## Leaderboard
450
+ The LLM Leaderboard aims to provide an objective and comprehensive evaluation standard and platform to help researchers and developers understand and compare the performance of models on various tasks on ModelScope.
451
+
452
+ [Leaderboard](https://modelscope.cn/leaderboard/58/ranking?type=free)
453
+
454
+
455
+
456
+ ## Experiments and Results
457
+ [Experiments](./resources/experiments.md)
458
+
459
+ ## Model Serving Performance Evaluation
460
+ [Perf](evalscope/perf/README.md)
461
+
462
+ ## TO-DO List
463
+ - ✅Agents evaluation
464
+ - [ ] vLLM
465
+ - [ ] Distributed evaluating
466
+ - ✅ Multi-modal evaluation
467
+ - [ ] Benchmarks
468
+ - [ ] GAIA
469
+ - [ ] GPQA
470
+ - ✅ MBPP
471
+ - [ ] Auto-reviewer
472
+ - [ ] Qwen-max
473
+
474
+
475
+