evalscope 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of evalscope might be problematic. Click here for more details.
- evalscope/api/benchmark/__init__.py +1 -1
- evalscope/api/benchmark/adapters/__init__.py +2 -0
- evalscope/api/benchmark/adapters/default_data_adapter.py +7 -4
- evalscope/api/benchmark/adapters/image_edit_adapter.py +82 -0
- evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
- evalscope/api/benchmark/adapters/text2image_adapter.py +12 -10
- evalscope/api/benchmark/adapters/vision_language_adapter.py +8 -0
- evalscope/api/benchmark/benchmark.py +62 -2
- evalscope/api/benchmark/meta.py +9 -0
- evalscope/api/dataset/dataset.py +6 -6
- evalscope/api/dataset/loader.py +2 -1
- evalscope/api/evaluator/cache.py +24 -1
- evalscope/api/evaluator/evaluator.py +5 -0
- evalscope/api/evaluator/state.py +17 -1
- evalscope/api/messages/__init__.py +1 -0
- evalscope/api/messages/chat_message.py +52 -2
- evalscope/api/metric/scorer.py +15 -7
- evalscope/api/mixin/__init__.py +1 -1
- evalscope/api/mixin/llm_judge_mixin.py +2 -0
- evalscope/api/mixin/sandbox_mixin.py +204 -0
- evalscope/api/model/generate_config.py +1 -6
- evalscope/api/model/model.py +5 -2
- evalscope/api/tool/tool_info.py +1 -1
- evalscope/app/app.py +3 -0
- evalscope/app/ui/single_model.py +3 -3
- evalscope/app/utils/data_utils.py +7 -7
- evalscope/app/utils/env_utils.py +12 -0
- evalscope/app/utils/text_utils.py +14 -12
- evalscope/arguments.py +8 -4
- evalscope/backend/opencompass/backend_manager.py +0 -2
- evalscope/backend/rag_eval/utils/embedding.py +9 -1
- evalscope/benchmarks/ai2d/ai2d_adapter.py +53 -0
- evalscope/benchmarks/amc/amc_adapter.py +46 -0
- evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
- evalscope/benchmarks/bfcl/bfcl_adapter.py +142 -7
- evalscope/benchmarks/bfcl/generation.py +9 -9
- evalscope/benchmarks/ceval/ceval_adapter.py +1 -2
- evalscope/benchmarks/data_collection/data_collection_adapter.py +23 -19
- evalscope/benchmarks/drop/drop_adapter.py +1 -1
- evalscope/benchmarks/frames/frames_adapter.py +2 -1
- evalscope/benchmarks/general_arena/general_arena_adapter.py +5 -1
- evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
- evalscope/benchmarks/healthbench/utils.py +102 -0
- evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
- evalscope/benchmarks/humaneval/utils.py +235 -0
- evalscope/benchmarks/ifeval/instructions_util.py +2 -3
- evalscope/benchmarks/image_edit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/__init__.py +0 -0
- evalscope/benchmarks/image_edit/gedit/gedit_adapter.py +138 -0
- evalscope/benchmarks/image_edit/gedit/utils.py +372 -0
- evalscope/benchmarks/image_edit/gedit/vie_prompts.py +406 -0
- evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
- evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
- evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
- evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
- evalscope/benchmarks/math_vista/__init__.py +0 -0
- evalscope/benchmarks/math_vista/math_vista_adapter.py +129 -0
- evalscope/benchmarks/minerva_math/__init__.py +0 -0
- evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
- evalscope/benchmarks/mm_bench/__init__.py +0 -0
- evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
- evalscope/benchmarks/mm_star/__init__.py +0 -0
- evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
- evalscope/benchmarks/mmmu/__init__.py +0 -0
- evalscope/benchmarks/mmmu/mmmu_adapter.py +159 -0
- evalscope/benchmarks/mmmu_pro/__init__.py +0 -0
- evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +124 -0
- evalscope/benchmarks/multi_if/__init__.py +0 -0
- evalscope/benchmarks/multi_if/ifeval.py +3354 -0
- evalscope/benchmarks/multi_if/metrics.py +120 -0
- evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
- evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +6 -5
- evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
- evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
- evalscope/benchmarks/olympiad_bench/utils.py +565 -0
- evalscope/benchmarks/omni_bench/__init__.py +0 -0
- evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
- evalscope/benchmarks/real_world_qa/__init__.py +0 -0
- evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
- evalscope/benchmarks/tau_bench/generation.py +1 -1
- evalscope/benchmarks/tau_bench/tau_bench_adapter.py +20 -19
- evalscope/benchmarks/text2image/__init__.py +0 -0
- evalscope/benchmarks/{aigc/t2i → text2image}/evalmuse_adapter.py +3 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/genai_bench_adapter.py +2 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/general_t2i_adapter.py +1 -1
- evalscope/benchmarks/{aigc/t2i → text2image}/hpdv2_adapter.py +7 -2
- evalscope/benchmarks/{aigc/t2i → text2image}/tifa_adapter.py +1 -0
- evalscope/benchmarks/truthful_qa/truthful_qa_adapter.py +1 -2
- evalscope/cli/start_app.py +7 -1
- evalscope/cli/start_perf.py +7 -1
- evalscope/config.py +96 -14
- evalscope/constants.py +11 -0
- evalscope/evaluator/evaluator.py +30 -10
- evalscope/metrics/llm_judge.py +19 -7
- evalscope/metrics/metric.py +27 -2
- evalscope/models/image_edit_model.py +125 -0
- evalscope/models/model_apis.py +22 -0
- evalscope/models/openai_compatible.py +3 -0
- evalscope/models/text2image_model.py +2 -2
- evalscope/models/utils/openai.py +8 -6
- evalscope/perf/arguments.py +2 -0
- evalscope/perf/benchmark.py +2 -0
- evalscope/perf/plugin/api/base.py +2 -2
- evalscope/perf/plugin/api/default_api.py +7 -7
- evalscope/perf/plugin/api/openai_api.py +83 -19
- evalscope/perf/plugin/datasets/flickr8k.py +2 -2
- evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
- evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
- evalscope/perf/utils/benchmark_util.py +7 -5
- evalscope/perf/utils/local_server.py +3 -0
- evalscope/report/__init__.py +0 -1
- evalscope/report/combinator.py +0 -25
- evalscope/report/generator.py +8 -87
- evalscope/report/report.py +8 -4
- evalscope/run.py +9 -5
- evalscope/third_party/toolbench_static/llm/swift_infer.py +0 -4
- evalscope/utils/chat_service.py +1 -1
- evalscope/utils/function_utils.py +41 -0
- evalscope/utils/import_utils.py +73 -1
- evalscope/utils/io_utils.py +56 -7
- evalscope/utils/json_schema.py +23 -2
- evalscope/utils/logger.py +19 -0
- evalscope/utils/model_utils.py +4 -3
- evalscope/utils/multi_choices.py +23 -6
- evalscope/version.py +2 -2
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/METADATA +17 -24
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/RECORD +145 -103
- tests/benchmark/test_eval.py +80 -37
- tests/benchmark/test_image_edit.py +65 -0
- tests/benchmark/test_sandbox.py +81 -0
- tests/benchmark/test_vlm.py +137 -0
- tests/cli/test_all.py +83 -43
- tests/cli/test_collection.py +8 -5
- tests/cli/test_reasoning.py +81 -0
- tests/common.py +73 -0
- tests/perf/test_perf.py +44 -14
- tests/rag/test_clip_benchmark.py +0 -3
- evalscope/api/mixin/dataset_mixin.py +0 -105
- evalscope/benchmarks/aigc/i2i/general_i2i_adapter.py +0 -44
- tests/aigc/__init__.py +0 -1
- /evalscope/benchmarks/{aigc → ai2d}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/i2i → amc}/__init__.py +0 -0
- /evalscope/benchmarks/{aigc/t2i → healthbench}/__init__.py +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/LICENSE +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/WHEEL +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/entry_points.txt +0 -0
- {evalscope-1.0.0.dist-info → evalscope-1.0.2.dist-info}/top_level.txt +0 -0
- /tests/{aigc → benchmark}/test_t2i.py +0 -0
evalscope/utils/multi_choices.py
CHANGED
|
@@ -1,11 +1,8 @@
|
|
|
1
1
|
# flake8: noqa: E501
|
|
2
|
-
from __future__ import annotations
|
|
3
|
-
|
|
4
2
|
import re
|
|
5
|
-
from typing import
|
|
3
|
+
from typing import List, Optional, Union
|
|
6
4
|
|
|
7
|
-
|
|
8
|
-
from evalscope.api.evaluator import Choices, Target, TaskState
|
|
5
|
+
from evalscope.api.evaluator import Choices, Target, TaskState
|
|
9
6
|
|
|
10
7
|
FEW_SHOT_TEMPLATE = r"""Here are some examples of how to answer similar questions:
|
|
11
8
|
|
|
@@ -84,7 +81,9 @@ def answer_options(choices: Choices) -> str:
|
|
|
84
81
|
return '\n'.join([f'{answer_character(i)}) {choices[j].value}' for i, j in enumerate(indexes)])
|
|
85
82
|
|
|
86
83
|
|
|
87
|
-
def prompt(question: str, choices: Choices, template: str, fewshot: Optional[str] = None) -> str:
|
|
84
|
+
def prompt(question: str, choices: Union[Choices, List[str]], template: str, fewshot: Optional[str] = None) -> str:
|
|
85
|
+
if isinstance(choices, list):
|
|
86
|
+
choices = Choices(choices)
|
|
88
87
|
|
|
89
88
|
choices_text = answer_options(choices)
|
|
90
89
|
letters = ','.join(answer_character(i) for i in range(len(choices)))
|
|
@@ -122,6 +121,14 @@ def format_example(
|
|
|
122
121
|
return f'{question}\n{choices_text}\nANSWER: {answer.text}'
|
|
123
122
|
|
|
124
123
|
|
|
124
|
+
def _fallback_parse_answer(completion: str) -> Optional[set[str]]:
|
|
125
|
+
# Fallback to find the last upper case letter
|
|
126
|
+
for letter in reversed(completion):
|
|
127
|
+
if letter.isupper():
|
|
128
|
+
return {letter}
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
|
|
125
132
|
def parse_answers(state: TaskState, multiple_correct: bool = False) -> set[str]:
|
|
126
133
|
"""
|
|
127
134
|
Convenience function for extracting answers from the state output.
|
|
@@ -150,6 +157,11 @@ def parse_answers(state: TaskState, multiple_correct: bool = False) -> set[str]:
|
|
|
150
157
|
state.output.completion,
|
|
151
158
|
)
|
|
152
159
|
|
|
160
|
+
if match is None:
|
|
161
|
+
fallback_answer = _fallback_parse_answer(state.output.completion)
|
|
162
|
+
if fallback_answer:
|
|
163
|
+
return fallback_answer
|
|
164
|
+
|
|
153
165
|
if match is None:
|
|
154
166
|
return set()
|
|
155
167
|
|
|
@@ -200,6 +212,11 @@ def parse_answers_zh(state: TaskState, multiple_correct: bool = False) -> set[st
|
|
|
200
212
|
pattern = r'答案\s*[::]\s*([A-Za-z0-9,,]+)'
|
|
201
213
|
match = re.search(pattern, state.output.completion, flags=re.MULTILINE)
|
|
202
214
|
|
|
215
|
+
if match is None:
|
|
216
|
+
fallback_answer = _fallback_parse_answer(state.output.completion)
|
|
217
|
+
if fallback_answer:
|
|
218
|
+
return fallback_answer
|
|
219
|
+
|
|
203
220
|
if match is None:
|
|
204
221
|
return set()
|
|
205
222
|
|
evalscope/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: evalscope
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.2
|
|
4
4
|
Summary: EvalScope: Lightweight LLMs Evaluation Framework
|
|
5
5
|
Home-page: https://github.com/modelscope/evalscope
|
|
6
6
|
Author: ModelScope team
|
|
@@ -17,16 +17,14 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
17
17
|
Requires-Python: >=3.9
|
|
18
18
|
Description-Content-Type: text/markdown
|
|
19
19
|
License-File: LICENSE
|
|
20
|
-
Requires-Dist: accelerate
|
|
21
20
|
Requires-Dist: colorlog
|
|
22
21
|
Requires-Dist: datasets==3.6.0
|
|
23
22
|
Requires-Dist: docstring-parser
|
|
24
23
|
Requires-Dist: dotenv
|
|
25
|
-
Requires-Dist: immutabledict
|
|
26
24
|
Requires-Dist: jieba
|
|
27
25
|
Requires-Dist: jsonlines
|
|
28
26
|
Requires-Dist: langdetect
|
|
29
|
-
Requires-Dist: latex2sympy2-extended
|
|
27
|
+
Requires-Dist: latex2sympy2-extended[antlr4_9_3]
|
|
30
28
|
Requires-Dist: matplotlib
|
|
31
29
|
Requires-Dist: modelscope[framework]>=1.27
|
|
32
30
|
Requires-Dist: nltk>=3.9
|
|
@@ -34,7 +32,6 @@ Requires-Dist: openai
|
|
|
34
32
|
Requires-Dist: overrides
|
|
35
33
|
Requires-Dist: pandas
|
|
36
34
|
Requires-Dist: pillow
|
|
37
|
-
Requires-Dist: pyarrow
|
|
38
35
|
Requires-Dist: pydantic
|
|
39
36
|
Requires-Dist: pyyaml>=5.1
|
|
40
37
|
Requires-Dist: requests
|
|
@@ -46,7 +43,6 @@ Requires-Dist: scikit-learn
|
|
|
46
43
|
Requires-Dist: seaborn
|
|
47
44
|
Requires-Dist: sympy
|
|
48
45
|
Requires-Dist: tabulate
|
|
49
|
-
Requires-Dist: torch
|
|
50
46
|
Requires-Dist: tqdm
|
|
51
47
|
Requires-Dist: transformers>=4.33
|
|
52
48
|
Requires-Dist: word2number
|
|
@@ -57,18 +53,17 @@ Requires-Dist: omegaconf; extra == "aigc"
|
|
|
57
53
|
Requires-Dist: open-clip-torch; extra == "aigc"
|
|
58
54
|
Requires-Dist: opencv-python; extra == "aigc"
|
|
59
55
|
Requires-Dist: peft>=0.17; extra == "aigc"
|
|
56
|
+
Requires-Dist: torch; extra == "aigc"
|
|
60
57
|
Requires-Dist: torchvision; extra == "aigc"
|
|
61
58
|
Provides-Extra: all
|
|
62
|
-
Requires-Dist: accelerate; extra == "all"
|
|
63
59
|
Requires-Dist: colorlog; extra == "all"
|
|
64
60
|
Requires-Dist: datasets==3.6.0; extra == "all"
|
|
65
61
|
Requires-Dist: docstring-parser; extra == "all"
|
|
66
62
|
Requires-Dist: dotenv; extra == "all"
|
|
67
|
-
Requires-Dist: immutabledict; extra == "all"
|
|
68
63
|
Requires-Dist: jieba; extra == "all"
|
|
69
64
|
Requires-Dist: jsonlines; extra == "all"
|
|
70
65
|
Requires-Dist: langdetect; extra == "all"
|
|
71
|
-
Requires-Dist: latex2sympy2-extended; extra == "all"
|
|
66
|
+
Requires-Dist: latex2sympy2-extended[antlr4_9_3]; extra == "all"
|
|
72
67
|
Requires-Dist: matplotlib; extra == "all"
|
|
73
68
|
Requires-Dist: modelscope[framework]>=1.27; extra == "all"
|
|
74
69
|
Requires-Dist: nltk>=3.9; extra == "all"
|
|
@@ -76,7 +71,6 @@ Requires-Dist: openai; extra == "all"
|
|
|
76
71
|
Requires-Dist: overrides; extra == "all"
|
|
77
72
|
Requires-Dist: pandas; extra == "all"
|
|
78
73
|
Requires-Dist: pillow; extra == "all"
|
|
79
|
-
Requires-Dist: pyarrow; extra == "all"
|
|
80
74
|
Requires-Dist: pydantic; extra == "all"
|
|
81
75
|
Requires-Dist: pyyaml>=5.1; extra == "all"
|
|
82
76
|
Requires-Dist: requests; extra == "all"
|
|
@@ -88,7 +82,6 @@ Requires-Dist: scikit-learn; extra == "all"
|
|
|
88
82
|
Requires-Dist: seaborn; extra == "all"
|
|
89
83
|
Requires-Dist: sympy; extra == "all"
|
|
90
84
|
Requires-Dist: tabulate; extra == "all"
|
|
91
|
-
Requires-Dist: torch; extra == "all"
|
|
92
85
|
Requires-Dist: tqdm; extra == "all"
|
|
93
86
|
Requires-Dist: transformers>=4.33; extra == "all"
|
|
94
87
|
Requires-Dist: word2number; extra == "all"
|
|
@@ -100,9 +93,11 @@ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "all"
|
|
|
100
93
|
Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "all"
|
|
101
94
|
Requires-Dist: mteb==1.38.20; extra == "all"
|
|
102
95
|
Requires-Dist: ragas==0.2.14; extra == "all"
|
|
96
|
+
Requires-Dist: torch; extra == "all"
|
|
103
97
|
Requires-Dist: webdataset>0.2.0; extra == "all"
|
|
104
98
|
Requires-Dist: aiohttp; extra == "all"
|
|
105
99
|
Requires-Dist: fastapi; extra == "all"
|
|
100
|
+
Requires-Dist: jinja2; extra == "all"
|
|
106
101
|
Requires-Dist: numpy; extra == "all"
|
|
107
102
|
Requires-Dist: sse-starlette; extra == "all"
|
|
108
103
|
Requires-Dist: transformers; extra == "all"
|
|
@@ -116,17 +111,10 @@ Requires-Dist: open-clip-torch; extra == "all"
|
|
|
116
111
|
Requires-Dist: opencv-python; extra == "all"
|
|
117
112
|
Requires-Dist: peft>=0.17; extra == "all"
|
|
118
113
|
Requires-Dist: torchvision; extra == "all"
|
|
119
|
-
Requires-Dist: bfcl-eval==2025.6.16; extra == "all"
|
|
120
|
-
Requires-Dist: human-eval; extra == "all"
|
|
121
|
-
Requires-Dist: pytest; extra == "all"
|
|
122
|
-
Requires-Dist: pytest-cov; extra == "all"
|
|
123
|
-
Requires-Dist: python-dotenv; extra == "all"
|
|
124
114
|
Provides-Extra: app
|
|
125
115
|
Requires-Dist: gradio==5.4.0; extra == "app"
|
|
126
116
|
Requires-Dist: plotly<6.0.0,>=5.23.0; extra == "app"
|
|
127
117
|
Provides-Extra: dev
|
|
128
|
-
Requires-Dist: bfcl-eval==2025.6.16; extra == "dev"
|
|
129
|
-
Requires-Dist: human-eval; extra == "dev"
|
|
130
118
|
Requires-Dist: pytest; extra == "dev"
|
|
131
119
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
132
120
|
Requires-Dist: python-dotenv; extra == "dev"
|
|
@@ -142,6 +130,7 @@ Requires-Dist: ms-opencompass>=0.1.6; extra == "opencompass"
|
|
|
142
130
|
Provides-Extra: perf
|
|
143
131
|
Requires-Dist: aiohttp; extra == "perf"
|
|
144
132
|
Requires-Dist: fastapi; extra == "perf"
|
|
133
|
+
Requires-Dist: jinja2; extra == "perf"
|
|
145
134
|
Requires-Dist: numpy; extra == "perf"
|
|
146
135
|
Requires-Dist: rich; extra == "perf"
|
|
147
136
|
Requires-Dist: sse-starlette; extra == "perf"
|
|
@@ -154,6 +143,7 @@ Requires-Dist: langchain-core<0.4.0,>=0.3.0; extra == "rag"
|
|
|
154
143
|
Requires-Dist: langchain-openai<0.4.0,>=0.3.0; extra == "rag"
|
|
155
144
|
Requires-Dist: mteb==1.38.20; extra == "rag"
|
|
156
145
|
Requires-Dist: ragas==0.2.14; extra == "rag"
|
|
146
|
+
Requires-Dist: torch; extra == "rag"
|
|
157
147
|
Requires-Dist: webdataset>0.2.0; extra == "rag"
|
|
158
148
|
Provides-Extra: vlmeval
|
|
159
149
|
Requires-Dist: ms-vlmeval>=0.0.17; extra == "vlmeval"
|
|
@@ -277,7 +267,10 @@ Please scan the QR code below to join our community groups:
|
|
|
277
267
|
>
|
|
278
268
|
> Version 1.0 introduces a major overhaul of the evaluation framework, establishing a new, more modular and extensible API layer under `evalscope/api`. Key improvements include standardized data models for benchmarks, samples, and results; a registry-based design for components such as benchmarks and metrics; and a rewritten core evaluator that orchestrates the new architecture. Existing benchmark adapters have been migrated to this API, resulting in cleaner, more consistent, and easier-to-maintain implementations.
|
|
279
269
|
|
|
280
|
-
- 🔥 **[2025.
|
|
270
|
+
- 🔥 **[2025.09.19]** Added support for multimodal image-text evaluation benchmarks including RealWorldQA, AI2D, MMStar, MMBench, and OmniBench, as well as pure text evaluation benchmarks such as Multi-IF, HealthBench, and AMC.
|
|
271
|
+
- 🔥 **[2025.09.05]** Added support for vision-language multimodal model evaluation tasks, such as MathVista and MMMU. For more supported datasets, please [refer to the documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/vlm.html).
|
|
272
|
+
- 🔥 **[2025.09.04]** Added support for image editing task evaluation, including the [GEdit-Bench](https://modelscope.cn/datasets/stepfun-ai/GEdit-Bench) benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/image_edit.html).
|
|
273
|
+
- 🔥 **[2025.08.22]** Version 1.0 Refactoring. Break changes, please [refer to](https://evalscope.readthedocs.io/en/latest/get_started/basic_usage.html#switching-to-version-v1-0).
|
|
281
274
|
- 🔥 **[2025.07.18]** The model stress testing now supports randomly generating image-text data for multimodal model evaluation. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#id4).
|
|
282
275
|
- 🔥 **[2025.07.16]** Support for [τ-bench](https://github.com/sierra-research/tau-bench) has been added, enabling the evaluation of AI Agent performance and reliability in real-world scenarios involving dynamic user and tool interactions. For usage instructions, please refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/get_started/supported_dataset/llm.html#bench).
|
|
283
276
|
- 🔥 **[2025.07.14]** Support for "Humanity's Last Exam" ([Humanity's-Last-Exam](https://modelscope.cn/datasets/cais/hle)), a highly challenging evaluation benchmark. For usage instructions, refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/llm.html#humanity-s-last-exam).
|
|
@@ -285,7 +278,7 @@ Please scan the QR code below to join our community groups:
|
|
|
285
278
|
- 🔥 **[2025.06.28]** Optimized custom dataset evaluation: now supports evaluation without reference answers. Enhanced LLM judge usage, with built-in modes for "scoring directly without reference answers" and "checking answer consistency with reference answers". See [reference](https://evalscope.readthedocs.io/en/latest/advanced_guides/custom_dataset/llm.html#qa) for details.
|
|
286
279
|
- 🔥 **[2025.06.19]** Added support for the [BFCL-v3](https://modelscope.cn/datasets/AI-ModelScope/bfcl_v3) benchmark, designed to evaluate model function-calling capabilities across various scenarios. For more information, refer to the [documentation](https://evalscope.readthedocs.io/zh-cn/latest/third_party/bfcl_v3.html).
|
|
287
280
|
- 🔥 **[2025.06.02]** Added support for the Needle-in-a-Haystack test. Simply specify `needle_haystack` to conduct the test, and a corresponding heatmap will be generated in the `outputs/reports` folder, providing a visual representation of the model's performance. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/needle_haystack.html) for more details.
|
|
288
|
-
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html).
|
|
281
|
+
- 🔥 **[2025.05.29]** Added support for two long document evaluation benchmarks: [DocMath](https://modelscope.cn/datasets/yale-nlp/DocMath-Eval/summary) and [FRAMES](https://modelscope.cn/datasets/iic/frames/summary). For usage guidelines, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html).
|
|
289
282
|
- 🔥 **[2025.05.16]** Model service performance stress testing now supports setting various levels of concurrency and outputs a performance test report. [Reference example](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/quick_start.html#id3).
|
|
290
283
|
- 🔥 **[2025.05.13]** Added support for the [ToolBench-Static](https://modelscope.cn/datasets/AI-ModelScope/ToolBench-Static) dataset to evaluate model's tool-calling capabilities. Refer to the [documentation](https://evalscope.readthedocs.io/en/latest/third_party/toolbench.html) for usage instructions. Also added support for the [DROP](https://modelscope.cn/datasets/AI-ModelScope/DROP/dataPeview) and [Winogrande](https://modelscope.cn/datasets/AI-ModelScope/winogrande_val) benchmarks to assess the reasoning capabilities of models.
|
|
291
284
|
<details><summary>More</summary>
|
|
@@ -294,7 +287,7 @@ Please scan the QR code below to join our community groups:
|
|
|
294
287
|
- 🔥 **[2025.04.27]** Support for text-to-image evaluation: Supports 8 metrics including MPS, HPSv2.1Score, etc., and evaluation benchmarks such as EvalMuse, GenAI-Bench. Refer to the [user documentation](https://evalscope.readthedocs.io/en/latest/user_guides/aigc/t2i.html) for more details.
|
|
295
288
|
- 🔥 **[2025.04.10]** Model service stress testing tool now supports the `/v1/completions` endpoint (the default endpoint for vLLM benchmarking)
|
|
296
289
|
- 🔥 **[2025.04.08]** Support for evaluating embedding model services compatible with the OpenAI API has been added. For more details, check the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/backend/rageval_backend/mteb.html#configure-evaluation-parameters).
|
|
297
|
-
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
290
|
+
- 🔥 **[2025.03.27]** Added support for [AlpacaEval](https://www.modelscope.cn/datasets/AI-ModelScope/alpaca_eval/dataPeview) and [ArenaHard](https://modelscope.cn/datasets/AI-ModelScope/arena-hard-auto-v0.1/summary) evaluation benchmarks. For usage notes, please refer to the [documentation](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
298
291
|
- 🔥 **[2025.03.20]** The model inference service stress testing now supports generating prompts of specified length using random values. Refer to the [user guide](https://evalscope.readthedocs.io/en/latest/user_guides/stress_test/examples.html#using-the-random-dataset) for more details.
|
|
299
292
|
- 🔥 **[2025.03.13]** Added support for the [LiveCodeBench](https://www.modelscope.cn/datasets/AI-ModelScope/code_generation_lite/summary) code evaluation benchmark, which can be used by specifying `live_code_bench`. Supports evaluating QwQ-32B on LiveCodeBench, refer to the [best practices](https://evalscope.readthedocs.io/en/latest/best_practice/eval_qwq.html).
|
|
300
293
|
- 🔥 **[2025.03.11]** Added support for the [SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/SimpleQA/summary) and [Chinese SimpleQA](https://modelscope.cn/datasets/AI-ModelScope/Chinese-SimpleQA/summary) evaluation benchmarks. These are used to assess the factual accuracy of models, and you can specify `simple_qa` and `chinese_simpleqa` for use. Support for specifying a judge model is also available. For more details, refer to the [relevant parameter documentation](https://evalscope.readthedocs.io/en/latest/get_started/parameters.html).
|
|
@@ -493,7 +486,7 @@ run_task(task_cfg="config.json")
|
|
|
493
486
|
|
|
494
487
|
### Basic Parameter
|
|
495
488
|
- `--model`: Specifies the `model_id` of the model in [ModelScope](https://modelscope.cn/), which can be automatically downloaded, e.g., [Qwen/Qwen2.5-0.5B-Instruct](https://modelscope.cn/models/Qwen/Qwen2.5-0.5B-Instruct/summary); or use the local path of the model, e.g., `/path/to/model`
|
|
496
|
-
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset.html)
|
|
489
|
+
- `--datasets`: Dataset names, supports inputting multiple datasets separated by spaces. Datasets will be automatically downloaded from modelscope. For supported datasets, refer to the [Dataset List](https://evalscope.readthedocs.io/en/latest/get_started/supported_dataset/index.html)
|
|
497
490
|
- `--limit`: Maximum amount of evaluation data for each dataset. If not specified, it defaults to evaluating all data. Can be used for quick validation
|
|
498
491
|
|
|
499
492
|
### Output Results
|
|
@@ -582,7 +575,7 @@ For more customized evaluations, such as customizing model parameters or dataset
|
|
|
582
575
|
evalscope eval \
|
|
583
576
|
--model Qwen/Qwen3-0.6B \
|
|
584
577
|
--model-args '{"revision": "master", "precision": "torch.float16", "device_map": "auto"}' \
|
|
585
|
-
--generation-config '{"do_sample":true,"temperature":0.6,"
|
|
578
|
+
--generation-config '{"do_sample":true,"temperature":0.6,"max_tokens":512,"chat_template_kwargs":{"enable_thinking": false}}' \
|
|
586
579
|
--dataset-args '{"gsm8k": {"few_shot_num": 0, "few_shot_random": false}}' \
|
|
587
580
|
--datasets gsm8k \
|
|
588
581
|
--limit 10
|
|
@@ -596,7 +589,7 @@ evalscope eval \
|
|
|
596
589
|
- `--generation-config`: Generation parameters, passed as a JSON string and parsed as a dictionary:
|
|
597
590
|
- `do_sample`: Whether to use sampling
|
|
598
591
|
- `temperature`: Generation temperature
|
|
599
|
-
- `
|
|
592
|
+
- `max_tokens`: Maximum length of generated tokens
|
|
600
593
|
- `chat_template_kwargs`: Model inference template parameters
|
|
601
594
|
- `--dataset-args`: Settings for the evaluation dataset, passed as a JSON string where the key is the dataset name and the value is the parameters. Note that these need to correspond one-to-one with the values in the `--datasets` parameter:
|
|
602
595
|
- `few_shot_num`: Number of few-shot examples
|