evalscope 1.1.0__py3-none-any.whl β†’ 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (100) hide show
  1. evalscope/api/benchmark/__init__.py +8 -1
  2. evalscope/api/benchmark/adapters/__init__.py +1 -0
  3. evalscope/api/benchmark/adapters/ner_adapter.py +212 -0
  4. evalscope/api/benchmark/benchmark.py +14 -0
  5. evalscope/api/dataset/dataset.py +21 -0
  6. evalscope/api/dataset/loader.py +6 -2
  7. evalscope/api/mixin/sandbox_mixin.py +32 -54
  8. evalscope/api/model/generate_config.py +6 -0
  9. evalscope/benchmarks/aa_lcr/__init__.py +0 -0
  10. evalscope/benchmarks/aa_lcr/aa_lcr_adapter.py +205 -0
  11. evalscope/benchmarks/bfcl/bfcl_adapter.py +1 -1
  12. evalscope/benchmarks/data_collection/data_collection_adapter.py +2 -1
  13. evalscope/benchmarks/general_arena/general_arena_adapter.py +1 -1
  14. evalscope/benchmarks/general_mcq/general_mcq_adapter.py +1 -1
  15. evalscope/benchmarks/general_qa/general_qa_adapter.py +1 -1
  16. evalscope/benchmarks/gsm8k/gsm8k_adapter.py +23 -4
  17. evalscope/benchmarks/hallusion_bench/__init__.py +0 -0
  18. evalscope/benchmarks/hallusion_bench/hallusion_bench_adapter.py +158 -0
  19. evalscope/benchmarks/humaneval/humaneval_adapter.py +2 -1
  20. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +3 -1
  21. evalscope/benchmarks/math_verse/__init__.py +0 -0
  22. evalscope/benchmarks/math_verse/math_verse_adapter.py +100 -0
  23. evalscope/benchmarks/math_vision/__init__.py +0 -0
  24. evalscope/benchmarks/math_vision/math_vision_adapter.py +111 -0
  25. evalscope/benchmarks/math_vista/math_vista_adapter.py +6 -26
  26. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -1
  27. evalscope/benchmarks/ner/__init__.py +0 -0
  28. evalscope/benchmarks/ner/broad_twitter_corpus_adapter.py +52 -0
  29. evalscope/benchmarks/ner/conll2003_adapter.py +48 -0
  30. evalscope/benchmarks/ner/copious_adapter.py +85 -0
  31. evalscope/benchmarks/ner/cross_ner_adapter.py +120 -0
  32. evalscope/benchmarks/ner/cross_ner_entities/__init__.py +0 -0
  33. evalscope/benchmarks/ner/cross_ner_entities/ai.py +54 -0
  34. evalscope/benchmarks/ner/cross_ner_entities/literature.py +36 -0
  35. evalscope/benchmarks/ner/cross_ner_entities/music.py +39 -0
  36. evalscope/benchmarks/ner/cross_ner_entities/politics.py +37 -0
  37. evalscope/benchmarks/ner/cross_ner_entities/science.py +58 -0
  38. evalscope/benchmarks/ner/genia_ner_adapter.py +66 -0
  39. evalscope/benchmarks/ner/harvey_ner_adapter.py +58 -0
  40. evalscope/benchmarks/ner/mit_movie_trivia_adapter.py +74 -0
  41. evalscope/benchmarks/ner/mit_restaurant_adapter.py +66 -0
  42. evalscope/benchmarks/ner/ontonotes5_adapter.py +87 -0
  43. evalscope/benchmarks/ner/wnut2017_adapter.py +61 -0
  44. evalscope/benchmarks/ocr_bench_v2/utils.py +1 -0
  45. evalscope/benchmarks/omnidoc_bench/__init__.py +0 -0
  46. evalscope/benchmarks/omnidoc_bench/end2end_eval.py +349 -0
  47. evalscope/benchmarks/omnidoc_bench/metrics.py +547 -0
  48. evalscope/benchmarks/omnidoc_bench/omnidoc_bench_adapter.py +135 -0
  49. evalscope/benchmarks/omnidoc_bench/utils.py +1937 -0
  50. evalscope/benchmarks/poly_math/__init__.py +0 -0
  51. evalscope/benchmarks/poly_math/poly_math_adapter.py +127 -0
  52. evalscope/benchmarks/poly_math/utils/instruction.py +105 -0
  53. evalscope/benchmarks/pope/__init__.py +0 -0
  54. evalscope/benchmarks/pope/pope_adapter.py +111 -0
  55. evalscope/benchmarks/seed_bench_2_plus/__init__.py +0 -0
  56. evalscope/benchmarks/seed_bench_2_plus/seed_bench_2_plus_adapter.py +72 -0
  57. evalscope/benchmarks/simple_vqa/__init__.py +0 -0
  58. evalscope/benchmarks/simple_vqa/simple_vqa_adapter.py +169 -0
  59. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +1 -1
  60. evalscope/benchmarks/tool_bench/tool_bench_adapter.py +1 -1
  61. evalscope/benchmarks/visu_logic/__init__.py +0 -0
  62. evalscope/benchmarks/visu_logic/visu_logic_adapter.py +75 -0
  63. evalscope/benchmarks/zerobench/__init__.py +0 -0
  64. evalscope/benchmarks/zerobench/zerobench_adapter.py +64 -0
  65. evalscope/constants.py +4 -0
  66. evalscope/evaluator/evaluator.py +72 -79
  67. evalscope/metrics/math_parser.py +14 -0
  68. evalscope/metrics/metric.py +1 -1
  69. evalscope/models/utils/openai.py +4 -0
  70. evalscope/perf/arguments.py +24 -4
  71. evalscope/perf/benchmark.py +74 -89
  72. evalscope/perf/http_client.py +31 -16
  73. evalscope/perf/main.py +15 -2
  74. evalscope/perf/plugin/api/base.py +9 -7
  75. evalscope/perf/plugin/api/custom_api.py +13 -58
  76. evalscope/perf/plugin/api/default_api.py +179 -79
  77. evalscope/perf/plugin/api/openai_api.py +4 -3
  78. evalscope/perf/plugin/datasets/base.py +21 -0
  79. evalscope/perf/plugin/datasets/custom.py +2 -3
  80. evalscope/perf/plugin/datasets/line_by_line.py +2 -3
  81. evalscope/perf/plugin/datasets/longalpaca.py +2 -3
  82. evalscope/perf/plugin/datasets/openqa.py +2 -4
  83. evalscope/perf/plugin/datasets/random_dataset.py +1 -3
  84. evalscope/perf/utils/benchmark_util.py +36 -22
  85. evalscope/perf/utils/db_util.py +14 -19
  86. evalscope/perf/utils/local_server.py +0 -44
  87. evalscope/perf/utils/log_utils.py +21 -6
  88. evalscope/report/__init__.py +2 -1
  89. evalscope/run.py +4 -0
  90. evalscope/utils/function_utils.py +195 -12
  91. evalscope/utils/io_utils.py +74 -0
  92. evalscope/utils/logger.py +49 -17
  93. evalscope/utils/ner.py +377 -0
  94. evalscope/version.py +2 -2
  95. {evalscope-1.1.0.dist-info β†’ evalscope-1.1.1.dist-info}/METADATA +235 -363
  96. {evalscope-1.1.0.dist-info β†’ evalscope-1.1.1.dist-info}/RECORD +100 -55
  97. {evalscope-1.1.0.dist-info β†’ evalscope-1.1.1.dist-info}/WHEEL +1 -1
  98. {evalscope-1.1.0.dist-info β†’ evalscope-1.1.1.dist-info}/entry_points.txt +0 -0
  99. {evalscope-1.1.0.dist-info β†’ evalscope-1.1.1.dist-info/licenses}/LICENSE +0 -0
  100. {evalscope-1.1.0.dist-info β†’ evalscope-1.1.1.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: evalscope
3
- Version: 1.1.0
3
+ Version: 1.1.1
4
4
  Summary: EvalScope: Lightweight LLMs Evaluation Framework
5
5
  Author: ModelScope team
6
6
  Author-email: contact@modelscope.cn
@@ -10,22 +10,21 @@ Keywords: python,llm,evaluation
10
10
  Classifier: Development Status :: 4 - Beta
11
11
  Classifier: Operating System :: OS Independent
12
12
  Classifier: Programming Language :: Python :: 3
13
- Classifier: Programming Language :: Python :: 3.9
14
13
  Classifier: Programming Language :: Python :: 3.10
15
14
  Classifier: Programming Language :: Python :: 3.11
16
15
  Classifier: Programming Language :: Python :: 3.12
17
16
  Classifier: License :: OSI Approved :: Apache Software License
18
- Requires-Python: >=3.9
17
+ Requires-Python: >=3.10
19
18
  Description-Content-Type: text/markdown
20
19
  License-File: LICENSE
21
20
  Requires-Dist: colorlog
22
21
  Requires-Dist: datasets==3.6.0
23
- Requires-Dist: docstring-parser
22
+ Requires-Dist: docstring_parser
24
23
  Requires-Dist: dotenv
25
24
  Requires-Dist: jieba
26
25
  Requires-Dist: jsonlines
27
26
  Requires-Dist: langdetect
28
- Requires-Dist: latex2sympy2-extended[antlr4_9_3]
27
+ Requires-Dist: latex2sympy2_extended[antlr4_9_3]
29
28
  Requires-Dist: matplotlib
30
29
  Requires-Dist: modelscope[framework]>=1.27
31
30
  Requires-Dist: nltk>=3.9
@@ -47,15 +46,53 @@ Requires-Dist: tabulate
47
46
  Requires-Dist: tqdm
48
47
  Requires-Dist: transformers>=4.33
49
48
  Requires-Dist: word2number
49
+ Provides-Extra: opencompass
50
+ Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
51
+ Provides-Extra: vlmeval
52
+ Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
53
+ Provides-Extra: rag
54
+ Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "rag"
55
+ Requires-Dist: langchain-community<0.4.0,>=0.3.0; extra == "rag"
56
+ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
57
+ Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
58
+ Requires-Dist: mteb==1.38.20; extra == "rag"
59
+ Requires-Dist: ragas==0.2.14; extra == "rag"
60
+ Requires-Dist: torch; extra == "rag"
61
+ Requires-Dist: webdataset>0.2.0; extra == "rag"
62
+ Provides-Extra: perf
63
+ Requires-Dist: aiohttp; extra == "perf"
64
+ Requires-Dist: fastapi; extra == "perf"
65
+ Requires-Dist: jinja2; extra == "perf"
66
+ Requires-Dist: numpy; extra == "perf"
67
+ Requires-Dist: rich; extra == "perf"
68
+ Requires-Dist: sse_starlette; extra == "perf"
69
+ Requires-Dist: transformers; extra == "perf"
70
+ Requires-Dist: uvicorn; extra == "perf"
71
+ Provides-Extra: app
72
+ Requires-Dist: gradio==5.4.0; extra == "app"
73
+ Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
50
74
  Provides-Extra: aigc
51
75
  Requires-Dist: diffusers; extra == "aigc"
52
76
  Requires-Dist: iopath; extra == "aigc"
53
77
  Requires-Dist: omegaconf; extra == "aigc"
54
- Requires-Dist: open-clip-torch; extra == "aigc"
78
+ Requires-Dist: open_clip_torch; extra == "aigc"
55
79
  Requires-Dist: opencv-python; extra == "aigc"
56
80
  Requires-Dist: peft>=0.17; extra == "aigc"
57
81
  Requires-Dist: torch; extra == "aigc"
58
82
  Requires-Dist: torchvision; extra == "aigc"
83
+ Provides-Extra: sandbox
84
+ Requires-Dist: ms-enclave[docker]; extra == "sandbox"
85
+ Provides-Extra: dev
86
+ Requires-Dist: pytest; extra == "dev"
87
+ Requires-Dist: pytest-cov; extra == "dev"
88
+ Requires-Dist: python-dotenv; extra == "dev"
89
+ Provides-Extra: docs
90
+ Requires-Dist: docutils>=0.16.0; extra == "docs"
91
+ Requires-Dist: myst_parser; extra == "docs"
92
+ Requires-Dist: recommonmark; extra == "docs"
93
+ Requires-Dist: sphinx>=5.3.0; extra == "docs"
94
+ Requires-Dist: sphinx-design; extra == "docs"
95
+ Requires-Dist: sphinxawesome-theme; extra == "docs"
59
96
  Provides-Extra: all
60
97
  Requires-Dist: ms-opencompass>=0.1.6; extra == "all"
61
98
  Requires-Dist: ms-vlmeval>=0.0.17; extra == "all"
@@ -72,7 +109,7 @@ Requires-Dist: fastapi; extra == "all"
72
109
  Requires-Dist: jinja2; extra == "all"
73
110
  Requires-Dist: numpy; extra == "all"
74
111
  Requires-Dist: rich; extra == "all"
75
- Requires-Dist: sse-starlette; extra == "all"
112
+ Requires-Dist: sse_starlette; extra == "all"
76
113
  Requires-Dist: transformers; extra == "all"
77
114
  Requires-Dist: uvicorn; extra == "all"
78
115
  Requires-Dist: gradio==5.4.0; extra == "all"
@@ -80,46 +117,12 @@ Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "all"
80
117
  Requires-Dist: diffusers; extra == "all"
81
118
  Requires-Dist: iopath; extra == "all"
82
119
  Requires-Dist: omegaconf; extra == "all"
83
- Requires-Dist: open-clip-torch; extra == "all"
120
+ Requires-Dist: open_clip_torch; extra == "all"
84
121
  Requires-Dist: opencv-python; extra == "all"
85
122
  Requires-Dist: peft>=0.17; extra == "all"
123
+ Requires-Dist: torch; extra == "all"
86
124
  Requires-Dist: torchvision; extra == "all"
87
- Provides-Extra: app
88
- Requires-Dist: gradio==5.4.0; extra == "app"
89
- Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
90
- Provides-Extra: dev
91
- Requires-Dist: pytest; extra == "dev"
92
- Requires-Dist: pytest-cov; extra == "dev"
93
- Requires-Dist: python-dotenv; extra == "dev"
94
- Provides-Extra: docs
95
- Requires-Dist: docutils>=0.16.0; extra == "docs"
96
- Requires-Dist: myst-parser; extra == "docs"
97
- Requires-Dist: recommonmark; extra == "docs"
98
- Requires-Dist: sphinx>=5.3.0; extra == "docs"
99
- Requires-Dist: sphinx-design; extra == "docs"
100
- Requires-Dist: sphinxawesome-theme; extra == "docs"
101
- Provides-Extra: opencompass
102
- Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
103
- Provides-Extra: perf
104
- Requires-Dist: aiohttp; extra == "perf"
105
- Requires-Dist: fastapi; extra == "perf"
106
- Requires-Dist: jinja2; extra == "perf"
107
- Requires-Dist: numpy; extra == "perf"
108
- Requires-Dist: rich; extra == "perf"
109
- Requires-Dist: sse-starlette; extra == "perf"
110
- Requires-Dist: transformers; extra == "perf"
111
- Requires-Dist: uvicorn; extra == "perf"
112
- Provides-Extra: rag
113
- Requires-Dist: langchain<0.4.0,>=0.3.0; extra == "rag"
114
- Requires-Dist: langchain-community<0.4.0,>=0.3.0; extra == "rag"
115
- Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
116
- Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
117
- Requires-Dist: mteb==1.38.20; extra == "rag"
118
- Requires-Dist: ragas==0.2.14; extra == "rag"
119
- Requires-Dist: torch; extra == "rag"
120
- Requires-Dist: webdataset>0.2.0; extra == "rag"
121
- Provides-Extra: vlmeval
122
- Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
125
+ Dynamic: license-file
123
126
 
124
127
  <p align="center">
125
128
  <br>
@@ -127,13 +130,12 @@ Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
127
130
  <br>
128
131
  <p>
129
132
 
130
-
131
133
  <p align="center">
132
134
  <a href="README_zh.md">δΈ­ζ–‡</a> &nbsp | &nbsp English &nbsp
133
135
  </p>
134
136
 
135
137
  <p align="center">
136
- <img src="https://img.shields.io/badge/python-%E2%89%A53.9-5be.svg">
138
+ <img src="https://img.shields.io/badge/python-%E2%89%A53.10-5be.svg">
137
139
  <a href="https://badge.fury.io/py/evalscope"><img src="https://badge.fury.io/py/evalscope.svg" alt="PyPI version" height="18"></a>
138
140
  <a href="https://pypi.org/project/evalscope"><img alt="PyPI - Downloads" src="https://static.pepy.tech/badge/evalscope"></a>
139
141
  <a href="https://github.com/modelscope/evalscope/pulls"><img src="https://img.shields.io/badge/PR-welcome-55EB99.svg"></a>
@@ -141,122 +143,77 @@ Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
141
143
  <p>
142
144
 
143
145
  <p align="center">
144
- <a href="https://evalscope.readthedocs.io/zh-cn/latest/"> πŸ“– δΈ­ζ–‡ζ–‡ζ‘£</a> &nbsp | &nbsp <a href="https://evalscope.readthedocs.io/en/latest/"> πŸ“– English Documents</a>
146
+ <a href="https://evalscope.readthedocs.io/zh-cn/latest/"> πŸ“– Chinese Documentation</a> &nbsp | &nbsp <a href="https://evalscope.readthedocs.io/en/latest/"> πŸ“– English Documentation</a>
145
147
  <p>
146
148
 
147
- > ⭐ If you like this project, please click the "Star" button at the top right to support us. Your support is our motivation to keep going!
148
-
149
- ## πŸ“‹ Contents
150
- - [πŸ“‹ Contents](#-contents)
151
- - [πŸ“ Introduction](#-introduction)
152
- - [☎ User Groups](#-user-groups)
153
- - [πŸŽ‰ News](#-news)
154
- - [πŸ› οΈ Environment Setup](#️-environment-setup)
155
- - [Method 1. Install via pip](#method-1-install-via-pip)
156
- - [Method 2. Install from source](#method-2-install-from-source)
157
- - [πŸš€ Quick Start](#-quick-start)
158
- - [Method 1. Using Command Line](#method-1-using-command-line)
159
- - [Method 2. Using Python Code](#method-2-using-python-code)
160
- - [Basic Parameter](#basic-parameter)
161
- - [Output Results](#output-results)
162
- - [πŸ“ˆ Visualization of Evaluation Results](#-visualization-of-evaluation-results)
163
- - [🌐 Evaluation of Model API](#-evaluation-of-model-api)
164
- - [βš™οΈ Custom Parameter Evaluation](#️-custom-parameter-evaluation)
165
- - [Parameter Description](#parameter-description)
166
- - [πŸ§ͺ Other Evaluation Backends](#-other-evaluation-backends)
167
- - [πŸ“ˆ Model Serving Performance Evaluation](#-model-serving-performance-evaluation)
168
- - [πŸ–ŠοΈ Custom Dataset Evaluation](#️-custom-dataset-evaluation)
169
- - [βš”οΈ Arena Mode](#️-arena-mode)
170
- - [πŸ‘·β€β™‚οΈ Contribution](#️-contribution)
171
- - [πŸ“š Citation](#-citation)
172
- - [πŸ”œ Roadmap](#-roadmap)
173
- - [⭐ Star History](#-star-history)
174
149
 
150
+ > ⭐ If you like this project, please click the "Star" button in the upper right corner to support us. Your support is our motivation to move forward!
175
151
 
176
152
  ## πŸ“ Introduction
177
153
 
178
- EvalScope is a comprehensive model evaluation and performance benchmarking framework meticulously crafted by the [ModelScope Community](https://modelscope.cn/), offering a one-stop solution for your model assessment needs. Regardless of the type of model you are developing, EvalScope is equipped to cater to your requirements:
154
+ EvalScope is a powerful and easily extensible model evaluation framework created by the [ModelScope Community](https://modelscope.cn/), aiming to provide a one-stop evaluation solution for large model developers.
179
155
 
180
- - 🧠 Large Language Models
181
- - 🎨 Multimodal Models
182
- - πŸ” Embedding Models
183
- - πŸ† Reranker Models
184
- - πŸ–ΌοΈ CLIP Models
185
- - 🎭 AIGC Models (Image-to-Text/Video)
186
- - ...and more!
156
+ Whether you want to evaluate the general capabilities of models, conduct multi-model performance comparisons, or need to stress test models, EvalScope can meet your needs.
187
157
 
188
- EvalScope is not merely an evaluation tool; it is a valuable ally in your model optimization journey:
158
+ ## ✨ Key Features
189
159
 
190
- - πŸ… Equipped with multiple industry-recognized benchmarks and evaluation metrics: MMLU, CMMLU, C-Eval, GSM8K, etc.
191
- - πŸ“Š Model inference performance stress testing: Ensuring your model excels in real-world applications.
192
- - πŸš€ Seamless integration with the [ms-swift](https://github.com/modelscope/ms-swift) training framework, enabling one-click evaluations and providing full-chain support from training to assessment for your model development.
160
+ - **πŸ“š Comprehensive Evaluation Benchmarks**: Built-in multiple industry-recognized evaluation benchmarks including MMLU, C-Eval, GSM8K, and more.
161
+ - **🧩 Multi-modal and Multi-domain Support**: Supports evaluation of various model types including Large Language Models (LLM), Vision Language Models (VLM), Embedding, Reranker, AIGC, and more.
162
+ - **πŸš€ Multi-backend Integration**: Seamlessly integrates multiple evaluation backends including OpenCompass, VLMEvalKit, RAGEval to meet different evaluation needs.
163
+ - **⚑ Inference Performance Testing**: Provides powerful model service stress testing tools, supporting multiple performance metrics such as TTFT, TPOT.
164
+ - **πŸ“Š Interactive Reports**: Provides WebUI visualization interface, supporting multi-dimensional model comparison, report overview and detailed inspection.
165
+ - **βš”οΈ Arena Mode**: Supports multi-model battles (Pairwise Battle), intuitively ranking and evaluating models.
166
+ - **πŸ”§ Highly Extensible**: Developers can easily add custom datasets, models and evaluation metrics.
193
167
 
194
- Below is the overall architecture diagram of EvalScope:
168
+ <details><summary>πŸ›οΈ Overall Architecture</summary>
195
169
 
196
170
  <p align="center">
197
- <img src="https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/doc/EvalScope%E6%9E%B6%E6%9E%84%E5%9B%BE.png" width="70%">
198
- <br>EvalScope Framework.
171
+ <img src="https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/doc/EvalScope%E6%9E%B6%E6%9E%84%E5%9B%BE.png" style="width: 70%;">
172
+ <br>EvalScope Overall Architecture.
199
173
  </p>
200
174
 
201
- <details><summary>Framework Description</summary>
202
-
203
- The architecture includes the following modules:
204
- 1. Input Layer
205
- - **Model Sources**: API models (OpenAI API), local models (ModelScope)
206
- - **Datasets**: Standard evaluation benchmarks (MMLU/GSM8k, etc.), custom data (MCQ/QA)
207
-
208
- 2. Core Functions
209
- - **Multi-backend Evaluation**
210
- - Native backends: Unified evaluation for LLM/VLM/Embedding/T2I models
211
- - Integrated frameworks: OpenCompass/MTEB/VLMEvalKit/RAGAS
212
-
213
- - **Performance Monitoring**
214
- - Model plugins: Supports various model service APIs
215
- - Data plugins: Supports multiple data formats
216
- - Metric tracking: TTFT/TPOP/Stability and other metrics
175
+ 1. **Input Layer**
176
+ - **Model Sources**: API models (OpenAI API), Local models (ModelScope)
177
+ - **Datasets**: Standard evaluation benchmarks (MMLU/GSM8k etc.), Custom data (MCQ/QA)
217
178
 
218
- - **Tool Extensions**
219
- - Integration: Tool-Bench/Needle-in-a-Haystack/BFCL-v3
179
+ 2. **Core Functions**
180
+ - **Multi-backend Evaluation**: Native backend, OpenCompass, MTEB, VLMEvalKit, RAGAS
181
+ - **Performance Monitoring**: Supports multiple model service APIs and data formats, tracking TTFT/TPOP and other metrics
182
+ - **Tool Extensions**: Integrates Tool-Bench, Needle-in-a-Haystack, etc.
220
183
 
221
- 3. Output Layer
222
- - **Structured Reports**: Supports JSON/Tables/Logs
223
- - **Visualization Platforms**: Supports Gradio/Wandb/SwanLab
184
+ 3. **Output Layer**
185
+ - **Structured Reports**: Supports JSON, Table, Logs
186
+ - **Visualization Platform**: Supports Gradio, Wandb, SwanLab
224
187
 
225
188
  </details>
226
189
 
227
- ## ☎ User Groups
228
-
229
- Please scan the QR code below to join our community groups:
230
-
231
- [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
232
- :-------------------------:|:-------------------------:|:-------------------------:
233
- <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
234
-
235
-
236
- ## πŸŽ‰ News
190
+ ## πŸŽ‰ What's New
237
191
 
238
192
  > [!IMPORTANT]
239
193
  > **Version 1.0 Refactoring**
240
194
  >
241
195
  > Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
196
+
197
+ - πŸ”₯ **[2025.10.21]** Optimized sandbox environment usage in code evaluation, supporting both local and remote operation modes. For details, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/sandbox.html).
198
+ - πŸ”₯ **[2025.10.20]** Added support for evaluation benchmarks including PolyMath, SimpleVQA, MathVerse, MathVision, AA-LCR; optimized evalscope perf performance to align with vLLM Bench. For details, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/vs_vllm_bench.html).
242
199
  - πŸ”₯ **[2025.10.14]** Added support for OCRBench, OCRBench-v2, DocVQA, InfoVQA, ChartQA, and BLINK multimodal image-text evaluation benchmarks.
243
200
  - πŸ”₯ **[2025.09.22]** Code evaluation benchmarks (HumanEval, LiveCodeBench) now support running in a sandbox environment. To use this feature, please install [ms-enclave](https://github.com/modelscope/ms-enclave) first.
244
201
  - πŸ”₯ **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
245
- - πŸ”₯ **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
202
+ - πŸ”₯ **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/vlm.html).
246
203
  - πŸ”₯ **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
247
204
  - πŸ”₯ **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
248
205
  - πŸ”₯ **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
249
- - πŸ”₯ **[2025.07.16]** Support for [Ο„-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
206
+ - πŸ”₯ **[2025.07.16]** Support for [Ο„-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#bench).
250
207
  - πŸ”₯ **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
251
208
  - πŸ”₯ **[2025.07.03]** Refactored Arena Mode: now supports custom model battles, outputs a model leaderboard, and provides battle result visualization. See [reference](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html) for details.
209
+ <details><summary>More</summary>
210
+
252
211
  - πŸ”₯ **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
253
- - πŸ”₯ **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
212
+ - πŸ”₯ **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/bfcl_v3.html).
254
213
  - πŸ”₯ **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
255
214
  - πŸ”₯ **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
256
215
  - πŸ”₯ **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
257
216
  - πŸ”₯ **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
258
- <details><summary>More</summary>
259
-
260
217
  - πŸ”₯ **[2025.04.29]** Added Qwen3 Evaluation Best Practices, [welcome to read πŸ“–](https://evalscope.readthedocs.io/en/latest/best_practice/qwen3.html)
261
218
  - πŸ”₯ **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
262
219
  - πŸ”₯ **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
@@ -293,96 +250,71 @@ Please scan the QR code below to join our community groups:
293
250
 
294
251
  </details>
295
252
 
296
- ## πŸ› οΈ Environment Setup
253
+ ## ❀️ Community & Support
297
254
 
298
- ### Method 1. Install via pip
255
+ Welcome to join our community to communicate with other developers and get help.
299
256
 
300
- We recommend using conda to manage your environment and pip to install dependencies. This allows you to use the latest evalscope PyPI package.
257
+ [Discord Group](https://discord.com/invite/D27yfEFVz5) | WeChat Group | DingTalk Group
258
+ :-------------------------:|:-------------------------:|:-------------------------:
259
+ <img src="docs/asset/discord_qr.jpg" width="160" height="160"> | <img src="docs/asset/wechat.png" width="160" height="160"> | <img src="docs/asset/dingding.png" width="160" height="160">
301
260
 
302
- 1. Create a conda environment (optional)
303
- ```shell
304
- # Python 3.10 is recommended
305
- conda create -n evalscope python=3.10
306
261
 
307
- # Activate the conda environment
308
- conda activate evalscope
309
- ```
310
- 2. Install dependencies via pip
311
- ```shell
312
- pip install evalscope
313
- ```
314
- 3. Install additional dependencies (optional)
315
- - To use model service inference benchmarking features, install the perf dependency:
262
+
263
+ ## πŸ› οΈ Environment Setup
264
+
265
+ We recommend using `conda` to create a virtual environment and install with `pip`.
266
+
267
+ 1. **Create and Activate Conda Environment** (Python 3.10 recommended)
316
268
  ```shell
317
- pip install 'evalscope[perf]'
269
+ conda create -n evalscope python=3.10
270
+ conda activate evalscope
318
271
  ```
319
- - To use visualization features, install the app dependency:
272
+
273
+ 2. **Install EvalScope**
274
+
275
+ - **Method 1: Install via PyPI (Recommended)**
276
+ ```shell
277
+ pip install evalscope
278
+ ```
279
+
280
+ - **Method 2: Install from Source (For Development)**
281
+ ```shell
282
+ git clone https://github.com/modelscope/evalscope.git
283
+ cd evalscope
284
+ pip install -e .
285
+ ```
286
+
287
+ 3. **Install Additional Dependencies** (Optional)
288
+ Install corresponding feature extensions according to your needs:
320
289
  ```shell
290
+ # Performance testing
291
+ pip install 'evalscope[perf]'
292
+
293
+ # Visualization App
321
294
  pip install 'evalscope[app]'
322
- ```
323
- - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
324
- ```shell
295
+
296
+ # Other evaluation backends
325
297
  pip install 'evalscope[opencompass]'
326
298
  pip install 'evalscope[vlmeval]'
327
299
  pip install 'evalscope[rag]'
328
- ```
329
- - To install all dependencies:
330
- ```shell
300
+
301
+ # Install all dependencies
331
302
  pip install 'evalscope[all]'
332
303
  ```
304
+ > If you installed from source, please replace `evalscope` with `.`, for example `pip install '.[perf]'`.
333
305
 
334
306
  > [!NOTE]
335
- > The project has been renamed to `evalscope`. For version `v0.4.3` or earlier, you can install it with:
336
- > ```shell
337
- > pip install llmuses<=0.4.3
338
- > ```
339
- > Then, import related dependencies using `llmuses`:
340
- > ```python
341
- > from llmuses import ...
342
- > ```
343
-
344
- ### Method 2. Install from source
345
-
346
- Installing from source allows you to use the latest code and makes it easier for further development and debugging.
347
-
348
- 1. Clone the source code
349
- ```shell
350
- git clone https://github.com/modelscope/evalscope.git
351
- ```
352
- 2. Install dependencies
353
- ```shell
354
- cd evalscope/
355
-
356
- pip install -e .
357
- ```
358
- 3. Install additional dependencies
359
- - To use model service inference benchmarking features, install the perf dependency:
360
- ```shell
361
- pip install '.[perf]'
362
- ```
363
- - To use visualization features, install the app dependency:
364
- ```shell
365
- pip install '.[app]'
366
- ```
367
- - If you need to use other evaluation backends, you can install OpenCompass, VLMEvalKit, or RAGEval as needed:
368
- ```shell
369
- pip install '.[opencompass]'
370
- pip install '.[vlmeval]'
371
- pip install '.[rag]'
372
- ```
373
- - To install all dependencies:
374
- ```shell
375
- pip install '.[all]'
376
- ```
307
+ > This project was formerly known as `llmuses`. If you need to use `v0.4.3` or earlier versions, please run `pip install llmuses<=0.4.3` and use `from llmuses import ...` for imports.
377
308
 
378
309
 
379
310
  ## πŸš€ Quick Start
380
311
 
381
- To evaluate a model on specified datasets using default configurations, this framework supports two ways to initiate evaluation tasks: using the command line or using Python code.
312
+ You can start evaluation tasks in two ways: **command line** or **Python code**.
382
313
 
383
314
  ### Method 1. Using Command Line
384
315
 
385
- Execute the `eval` command in any directory:
316
+ Execute the `evalscope eval` command in any path to start evaluation. The following command will evaluate the `Qwen/Qwen2.5-0.5B-Instruct` model on `gsm8k` and `arc` datasets, taking only 5 samples from each dataset.
317
+
386
318
  ```bash
387
319
  evalscope eval \
388
320
  --model Qwen/Qwen2.5-0.5B-Instruct \
@@ -392,22 +324,23 @@ evalscope eval \
392
324
 
393
325
  ### Method 2. Using Python Code
394
326
 
395
- When using Python code for evaluation, you need to submit the evaluation task using the `run_task` function, passing a `TaskConfig` as a parameter. It can also be a Python dictionary, yaml file path, or json file path, for example:
396
-
397
- **Using `TaskConfig`**
327
+ Use the `run_task` function and `TaskConfig` object to configure and start evaluation tasks.
398
328
 
399
329
  ```python
400
330
  from evalscope import run_task, TaskConfig
401
331
 
332
+ # Configure evaluation task
402
333
  task_cfg = TaskConfig(
403
334
  model='Qwen/Qwen2.5-0.5B-Instruct',
404
335
  datasets=['gsm8k', 'arc'],
405
336
  limit=5
406
337
  )
407
338
 
408
- run_task(task_cfg=task_cfg)
339
+ # Start evaluation
340
+ run_task(task_cfg)
409
341
  ```
410
- <details><summary>More Startup Methods</summary>
342
+
343
+ <details><summary><b>πŸ’‘ Tip:</b> `run_task` also supports dictionaries, YAML or JSON files as configuration.</summary>
411
344
 
412
345
  **Using Python Dictionary**
413
346
 
@@ -419,13 +352,10 @@ task_cfg = {
419
352
  'datasets': ['gsm8k', 'arc'],
420
353
  'limit': 5
421
354
  }
422
-
423
355
  run_task(task_cfg=task_cfg)
424
356
  ```
425
357
 
426
- **Using `yaml` file**
427
-
428
- `config.yaml`:
358
+ **Using YAML File** (`config.yaml`)
429
359
  ```yaml
430
360
  model: Qwen/Qwen2.5-0.5B-Instruct
431
361
  datasets:
@@ -433,37 +363,15 @@ datasets:
433
363
  - arc
434
364
  limit: 5
435
365
  ```
436
-
437
366
  ```python
438
367
  from evalscope.run import run_task
439
368
 
440
369
  run_task(task_cfg="config.yaml")
441
370
  ```
442
-
443
- **Using `json` file**
444
-
445
- `config.json`:
446
- ```json
447
- {
448
- "model": "Qwen/Qwen2.5-0.5B-Instruct",
449
- "datasets": ["gsm8k", "arc"],
450
- "limit": 5
451
- }
452
- ```
453
-
454
- ```python
455
- from evalscope.run import run_task
456
-
457
- run_task(task_cfg="config.json")
458
- ```
459
371
  </details>
460
372
 
461
- ### Basic Parameter
462
- - `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
463
- - `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
464
- - `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
465
-
466
373
  ### Output Results
374
+ After evaluation completion, you will see a report in the terminal in the following format:
467
375
  ```text
468
376
  +-----------------------+----------------+-----------------+-----------------+---------------+-------+---------+
469
377
  | Model Name | Dataset Name | Metric Name | Category Name | Subset Name | Num | Score |
@@ -476,164 +384,140 @@ run_task(task_cfg="config.json")
476
384
  +-----------------------+----------------+-----------------+-----------------+---------------+-------+---------+
477
385
  ```
478
386
 
479
- ## πŸ“ˆ Visualization of Evaluation Results
480
-
481
- 1. Install the dependencies required for visualization, including gradio, plotly, etc.
482
- ```bash
483
- pip install 'evalscope[app]'
484
- ```
485
-
486
- 2. Start the Visualization Service
487
-
488
- Run the following command to start the visualization service.
489
- ```bash
490
- evalscope app
491
- ```
492
- You can access the visualization service in the browser if the following output appears.
493
- ```text
494
- * Running on local URL: http://127.0.0.1:7861
495
-
496
- To create a public link, set `share=True` in `launch()`.
497
- ```
498
-
499
- <table>
500
- <tr>
501
- <td style="text-align: center;">
502
- <img src="docs/en/get_started/images/setting.png" alt="Setting" style="width: 75%;" />
503
- <p>Setting Interface</p>
504
- </td>
505
- <td style="text-align: center;">
506
- <img src="docs/en/get_started/images/model_compare.png" alt="Model Compare" style="width: 100%;" />
507
- <p>Model Comparison</p>
508
- </td>
509
- </tr>
510
- <tr>
511
- <td style="text-align: center;">
512
- <img src="docs/en/get_started/images/report_overview.png" alt="Report Overview" style="width: 100%;" />
513
- <p>Report Overview</p>
514
- </td>
515
- <td style="text-align: center;">
516
- <img src="docs/en/get_started/images/report_details.png" alt="Report Details" style="width: 80%;" />
517
- <p>Report Details</p>
518
- </td>
519
- </tr>
520
- </table>
521
-
522
- For more details, refer to: [πŸ“– Visualization of Evaluation Results](https://evalscope.readthedocs.io/en/latest/get_started/visualization.html)
523
-
524
- ## 🌐 Evaluation of Model API
525
-
526
- Specify the model API service address (api_url) and API Key (api_key) to evaluate the deployed model API service. In this case, the `eval-type` parameter must be specified as `service`, for example:
527
-
528
- For example, to launch a model service using [vLLM](https://github.com/vllm-project/vllm):
529
-
530
- ```shell
531
- export VLLM_USE_MODELSCOPE=True && python -m vllm.entrypoints.openai.api_server --model Qwen/Qwen2.5-0.5B-Instruct --served-model-name qwen2.5 --trust_remote_code --port 8801
532
- ```
533
- Then, you can use the following command to evaluate the model API service:
534
- ```shell
535
- evalscope eval \
536
- --model qwen2.5 \
537
- --api-url http://127.0.0.1:8801/v1 \
538
- --api-key EMPTY \
539
- --eval-type service \
540
- --datasets gsm8k \
541
- --limit 10
542
- ```
387
+ ## πŸ“ˆ Advanced Usage
543
388
 
544
- ## βš™οΈ Custom Parameter Evaluation
389
+ ### Custom Evaluation Parameters
545
390
 
546
- For more customized evaluations, such as customizing model parameters or dataset parameters, you can use the following command. The evaluation startup method is the same as simple evaluation. Below shows how to start the evaluation using the `eval` command:
391
+ You can fine-tune model loading, inference, and dataset configuration through command line parameters.
547
392
 
548
393
  ```shell
549
394
  evalscope eval \
550
395
  --model Qwen/Qwen3-0.6B \
551
396
  --model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
552
- --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
397
+ --generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512}' \
553
398
  --dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
554
399
  --datasets gsm8k \
555
400
  --limit 10
556
401
  ```
557
402
 
558
- ### Parameter Description
559
- - `--model-args`: Model loading parameters, passed as a JSON string:
560
- - `revision`: Model version
561
- - `precision`: Model precision
562
- - `device_map`: Device allocation for the model
563
- - `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
564
- - `do_sample`: Whether to use sampling
565
- - `temperature`: Generation temperature
566
- - `max_tokens`: Maximum length of generated tokens
567
- - `chat_template_kwargs`: Model inference template parameters
568
- - `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
569
- - `few_shot_num`: Number of few-shot examples
570
- - `few_shot_random`: Whether to randomly sample few-shot data; if not set, defaults to `true`
403
+ - `--model-args`: Model loading parameters such as `revision`, `precision`, etc.
404
+ - `--generation-config`: Model generation parameters such as `temperature`, `max_tokens`, etc.
405
+ - `--dataset-args`: Dataset configuration parameters such as `few_shot_num`, etc.
571
406
 
572
- Reference: [Full Parameter Description](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html)
407
+ For details, please refer to [πŸ“– Complete Parameter Guide](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
573
408
 
409
+ ### Evaluating Online Model APIs
574
410
 
575
- ## πŸ§ͺ Other Evaluation Backends
576
- EvalScope supports using third-party evaluation frameworks to initiate evaluation tasks, which we call Evaluation Backend. Currently supported Evaluation Backend includes:
577
- - **Native**: EvalScope's own **default evaluation framework**, supporting various evaluation modes including single model evaluation, arena mode, and baseline model comparison mode.
578
- - [OpenCompass](https://github.com/open-compass/opencompass): Initiate OpenCompass evaluation tasks through EvalScope. Lightweight, easy to customize, supports seamless integration with the LLM fine-tuning framework ms-swift. [πŸ“– User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
579
- - [VLMEvalKit](https://github.com/open-compass/VLMEvalKit): Initiate VLMEvalKit multimodal evaluation tasks through EvalScope. Supports various multimodal models and datasets, and offers seamless integration with the LLM fine-tuning framework ms-swift. [πŸ“– User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
580
- - **RAGEval**: Initiate RAG evaluation tasks through EvalScope, supporting independent evaluation of embedding models and rerankers using [MTEB/CMTEB](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html), as well as end-to-end evaluation using [RAGAS](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/ragas.html): [πŸ“– User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
581
- - **ThirdParty**: Third-party evaluation tasks, such as [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) and [LongBench-Write](https://evalscope.readthedocs.io/en/latest/third_party/longwriter.html).
411
+ EvalScope supports evaluating model services deployed via APIs (such as services deployed with vLLM). Simply specify the service address and API Key.
582
412
 
413
+ 1. **Start Model Service** (using vLLM as example)
414
+ ```shell
415
+ export VLLM_USE_MODELSCOPE=True
416
+ python -m vllm.entrypoints.openai.api_server \
417
+ --model Qwen/Qwen2.5-0.5B-Instruct \
418
+ --served-model-name qwen2.5 \
419
+ --port 8801
420
+ ```
583
421
 
584
- ## πŸ“ˆ Model Serving Performance Evaluation
585
- A stress testing tool focused on large language models, which can be customized to support various dataset formats and different API protocol formats.
422
+ 2. **Run Evaluation**
423
+ ```shell
424
+ evalscope eval \
425
+ --model qwen2.5 \
426
+ --eval-type service \
427
+ --api-url http://127.0.0.1:8801/v1 \
428
+ --api-key EMPTY \
429
+ --datasets gsm8k \
430
+ --limit 10
431
+ ```
586
432
 
587
- Reference: Performance Testing [πŸ“– User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html)
433
+ ### βš”οΈ Arena Mode
588
434
 
589
- **Output example**
435
+ Arena mode evaluates model performance through pairwise battles between models, providing win rates and rankings, perfect for horizontal comparison of multiple models.
590
436
 
591
- ![multi_perf](docs/en/user_guides/stress_test/images/multi_perf.png)
437
+ ```text
438
+ # Example evaluation results
439
+ Model WinRate (%) CI (%)
440
+ ------------ ------------- ---------------
441
+ qwen2.5-72b 69.3 (-13.3 / +12.2)
442
+ qwen2.5-7b 50 (+0.0 / +0.0)
443
+ qwen2.5-0.5b 4.7 (-2.5 / +4.4)
444
+ ```
445
+ For details, please refer to [πŸ“– Arena Mode Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html).
592
446
 
447
+ ### πŸ–ŠοΈ Custom Dataset Evaluation
593
448
 
594
- **Supports wandb for recording results**
449
+ EvalScope allows you to easily add and evaluate your own datasets. For details, please refer to [πŸ“– Custom Dataset Evaluation Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/index.html).
595
450
 
596
- ![wandb sample](https://modelscope.oss-cn-beijing.aliyuncs.com/resource/wandb_sample.png)
597
451
 
598
- **Supports swanlab for recording results**
452
+ ## πŸ§ͺ Other Evaluation Backends
453
+ EvalScope supports launching evaluation tasks through third-party evaluation frameworks (we call them "backends") to meet diverse evaluation needs.
599
454
 
600
- ![swanlab sample](https://sail-moe.oss-cn-hangzhou.aliyuncs.com/yunlin/images/evalscope/swanlab.png)
455
+ - **Native**: EvalScope's default evaluation framework with comprehensive functionality.
456
+ - **OpenCompass**: Focuses on text-only evaluation. [πŸ“– Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/opencompass_backend.html)
457
+ - **VLMEvalKit**: Focuses on multi-modal evaluation. [πŸ“– Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/vlmevalkit_backend.html)
458
+ - **RAGEval**: Focuses on RAG evaluation, supporting Embedding and Reranker models. [πŸ“– Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/index.html)
459
+ - **Third-party Evaluation Tools**: Supports evaluation tasks like [ToolBench](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html).
601
460
 
602
- **Supports Speed Benchmark**
461
+ ## ⚑ Inference Performance Evaluation Tool
462
+ EvalScope provides a powerful stress testing tool for evaluating the performance of large language model services.
603
463
 
604
- It supports speed testing and provides speed benchmarks similar to those found in the [official Qwen](https://qwen.readthedocs.io/en/latest/benchmark/speed_benchmark.html) reports:
464
+ - **Key Metrics**: Supports throughput (Tokens/s), first token latency (TTFT), token generation latency (TPOT), etc.
465
+ - **Result Recording**: Supports recording results to `wandb` and `swanlab`.
466
+ - **Speed Benchmarks**: Can generate speed benchmark results similar to official reports.
605
467
 
606
- ```text
607
- Speed Benchmark Results:
608
- +---------------+-----------------+----------------+
609
- | Prompt Tokens | Speed(tokens/s) | GPU Memory(GB) |
610
- +---------------+-----------------+----------------+
611
- | 1 | 50.69 | 0.97 |
612
- | 6144 | 51.36 | 1.23 |
613
- | 14336 | 49.93 | 1.59 |
614
- | 30720 | 49.56 | 2.34 |
615
- +---------------+-----------------+----------------+
616
- ```
468
+ For details, please refer to [πŸ“– Performance Testing Usage Guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/index.html).
617
469
 
618
- ## πŸ–ŠοΈ Custom Dataset Evaluation
619
- EvalScope supports custom dataset evaluation. For detailed information, please refer to the Custom Dataset Evaluation [πŸ“–User Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/index.html)
470
+ Example output is shown below:
471
+ <p align="center">
472
+ <img src="docs/en/user_guides/stress_test/images/multi_perf.png" style="width: 80%;">
473
+ </p>
620
474
 
621
475
 
622
- ## βš”οΈ Arena Mode
476
+ ## πŸ“Š Visualizing Evaluation Results
623
477
 
624
- Arena mode allows you to configure multiple candidate models and specify a baseline model. Evaluation is performed by pairwise battles between each candidate model and the baseline model, with the final output including each model's win rate and ranking. This method is suitable for comparative evaluation among multiple models, providing an intuitive reflection of each model's strengths and weaknesses. Refer to: Arena Mode [πŸ“– User Guide](https://evalscope.readthedocs.io/en/latest/user_guides/arena.html)
478
+ EvalScope provides a Gradio-based WebUI for interactive analysis and comparison of evaluation results.
625
479
 
626
- ```text
627
- Model WinRate (%) CI (%)
628
- ------------ ------------- ---------------
629
- qwen2.5-72b 69.3 (-13.3 / +12.2)
630
- qwen2.5-7b 50 (+0.0 / +0.0)
631
- qwen2.5-0.5b 4.7 (-2.5 / +4.4)
632
- ```
480
+ 1. **Install Dependencies**
481
+ ```bash
482
+ pip install 'evalscope[app]'
483
+ ```
484
+
485
+ 2. **Start Service**
486
+ ```bash
487
+ evalscope app
488
+ ```
489
+ Visit `http://127.0.0.1:7861` to open the visualization interface.
490
+
491
+ <table>
492
+ <tr>
493
+ <td style="text-align: center;">
494
+ <img src="docs/en/get_started/images/setting.png" alt="Setting" style="width: 85%;" />
495
+ <p>Settings Interface</p>
496
+ </td>
497
+ <td style="text-align: center;">
498
+ <img src="docs/en/get_started/images/model_compare.png" alt="Model Compare" style="width: 100%;" />
499
+ <p>Model Comparison</p>
500
+ </td>
501
+ </tr>
502
+ <tr>
503
+ <td style="text-align: center;">
504
+ <img src="docs/en/get_started/images/report_overview.png" alt="Report Overview" style="width: 100%;" />
505
+ <p>Report Overview</p>
506
+ </td>
507
+ <td style="text-align: center;">
508
+ <img src="docs/en/get_started/images/report_details.png" alt="Report Details" style="width: 85%;" />
509
+ <p>Report Details</p>
510
+ </td>
511
+ </tr>
512
+ </table>
513
+
514
+ For details, please refer to [πŸ“– Visualizing Evaluation Results](https://evalscope.readthedocs.io/en/latest/get_started/visualization.html).
633
515
 
634
- ## πŸ‘·β€β™‚οΈ Contribution
516
+ ## πŸ‘·β€β™‚οΈ Contributing
635
517
 
636
- EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn), is continuously optimizing its benchmark evaluation features! We invite you to refer to the [Contribution Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/add_benchmark.html) to easily add your own evaluation benchmarks and share your contributions with the community. Let’s work together to support the growth of EvalScope and make our tools even better! Join us now!
518
+ We welcome any contributions from the community! If you want to add new evaluation benchmarks, models, or features, please refer to our [Contributing Guide](https://evalscope.readthedocs.io/en/latest/advanced_guides/add_benchmark.html).
519
+
520
+ Thanks to all developers who have contributed to EvalScope!
637
521
 
638
522
  <a href="https://github.com/modelscope/evalscope/graphs/contributors" target="_blank">
639
523
  <table>
@@ -645,8 +529,10 @@ EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn
645
529
  </table>
646
530
  </a>
647
531
 
532
+
648
533
  ## πŸ“š Citation
649
534
 
535
+ If you use EvalScope in your research, please cite our work:
650
536
  ```bibtex
651
537
  @misc{evalscope_2024,
652
538
  title={{EvalScope}: Evaluation Framework for Large Models},
@@ -656,20 +542,6 @@ EvalScope, as the official evaluation tool of [ModelScope](https://modelscope.cn
656
542
  }
657
543
  ```
658
544
 
659
- ## πŸ”œ Roadmap
660
- - [x] Support for better evaluation report visualization
661
- - [x] Support for mixed evaluations across multiple datasets
662
- - [x] RAG evaluation
663
- - [x] VLM evaluation
664
- - [x] Agents evaluation
665
- - [x] vLLM
666
- - [ ] Distributed evaluating
667
- - [x] Multi-modal evaluation
668
- - [ ] Benchmarks
669
- - [x] BFCL-v3
670
- - [x] GPQA
671
- - [x] MBPP
672
-
673
545
 
674
546
  ## ⭐ Star History
675
547