evalscope 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (155) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +18 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/app/ui/multi_model.py +6 -1
  17. evalscope/app/ui/single_model.py +8 -2
  18. evalscope/app/utils/data_utils.py +3 -2
  19. evalscope/app/utils/visualization.py +2 -2
  20. evalscope/arguments.py +6 -0
  21. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  22. evalscope/benchmarks/amc/__init__.py +0 -0
  23. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  24. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  25. evalscope/benchmarks/bfcl/bfcl_adapter.py +106 -2
  26. evalscope/benchmarks/bfcl/generation.py +7 -7
  27. evalscope/benchmarks/blink/__init__.py +0 -0
  28. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  29. evalscope/benchmarks/chartqa/__init__.py +0 -0
  30. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  31. evalscope/benchmarks/chartqa/utils.py +38 -0
  32. evalscope/benchmarks/docvqa/__init__.py +0 -0
  33. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  34. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  35. evalscope/benchmarks/general_arena/utils.py +2 -1
  36. evalscope/benchmarks/healthbench/__init__.py +0 -0
  37. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  38. evalscope/benchmarks/healthbench/utils.py +102 -0
  39. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  40. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  41. evalscope/benchmarks/humaneval/utils.py +235 -0
  42. evalscope/benchmarks/infovqa/__init__.py +0 -0
  43. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  44. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  45. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  46. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  47. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  48. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  49. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  50. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  51. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  52. evalscope/benchmarks/mm_star/__init__.py +0 -0
  53. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  54. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  55. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  56. evalscope/benchmarks/multi_if/__init__.py +0 -0
  57. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  58. evalscope/benchmarks/multi_if/metrics.py +120 -0
  59. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  60. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  61. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  62. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  63. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  64. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  65. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  66. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  67. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  68. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  69. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  70. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  71. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  72. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  73. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  74. evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
  75. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  76. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  77. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  78. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  79. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  80. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  81. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  82. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  83. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  84. evalscope/config.py +24 -1
  85. evalscope/constants.py +3 -0
  86. evalscope/evaluator/evaluator.py +25 -7
  87. evalscope/metrics/metric.py +78 -2
  88. evalscope/metrics/metrics.py +16 -0
  89. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  90. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  91. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  92. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  93. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  94. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  95. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  96. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  97. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  98. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  99. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  100. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  101. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  102. evalscope/models/model_apis.py +10 -8
  103. evalscope/models/utils/openai.py +1 -2
  104. evalscope/perf/arguments.py +2 -0
  105. evalscope/perf/plugin/api/base.py +2 -2
  106. evalscope/perf/plugin/api/default_api.py +7 -7
  107. evalscope/perf/plugin/api/openai_api.py +83 -19
  108. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  109. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  110. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  111. evalscope/perf/utils/benchmark_util.py +1 -2
  112. evalscope/report/__init__.py +9 -1
  113. evalscope/report/combinator.py +45 -20
  114. evalscope/report/report.py +8 -4
  115. evalscope/run.py +1 -1
  116. evalscope/utils/function_utils.py +41 -0
  117. evalscope/utils/import_utils.py +63 -13
  118. evalscope/utils/io_utils.py +19 -11
  119. evalscope/utils/json_schema.py +25 -2
  120. evalscope/utils/logger.py +19 -0
  121. evalscope/utils/model_utils.py +1 -1
  122. evalscope/utils/multi_choices.py +16 -1
  123. evalscope/version.py +2 -2
  124. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/METADATA +10 -40
  125. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/RECORD +120 -95
  126. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
  127. tests/__init__.py +0 -1
  128. tests/benchmark/__init__.py +0 -1
  129. tests/benchmark/test_eval.py +0 -385
  130. tests/benchmark/test_image_edit.py +0 -65
  131. tests/benchmark/test_t2i.py +0 -142
  132. tests/benchmark/test_vlm.py +0 -80
  133. tests/cli/__init__.py +0 -1
  134. tests/cli/test_all.py +0 -269
  135. tests/cli/test_collection.py +0 -99
  136. tests/cli/test_custom.py +0 -268
  137. tests/cli/test_reasoning.py +0 -81
  138. tests/common.py +0 -73
  139. tests/perf/__init__.py +0 -1
  140. tests/perf/test_perf.py +0 -178
  141. tests/rag/test_clip_benchmark.py +0 -87
  142. tests/rag/test_mteb.py +0 -213
  143. tests/rag/test_ragas.py +0 -128
  144. tests/swift/__init__.py +0 -1
  145. tests/swift/test_run_swift_eval.py +0 -146
  146. tests/swift/test_run_swift_vlm_eval.py +0 -128
  147. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  148. tests/test_run_all.py +0 -12
  149. tests/utils.py +0 -13
  150. tests/vlm/__init__.py +0 -1
  151. tests/vlm/test_vlmeval.py +0 -102
  152. {tests/rag → evalscope/benchmarks/ai2d}/__init__.py +0 -0
  153. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
  154. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
  155. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
@@ -1,385 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import unittest
7
-
8
- from evalscope.constants import EvalType, JudgeStrategy, OutputType
9
- from evalscope.utils.logger import get_logger
10
- from tests.common import TestBenchmark
11
-
12
- logger = get_logger()
13
-
14
-
15
- class TestNativeBenchmark(TestBenchmark):
16
- """Benchmark evaluation test cases."""
17
-
18
- def setUp(self):
19
- """Setup common test configuration."""
20
- self.base_config = {
21
- 'model': 'qwen-plus',
22
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
23
- 'api_key': env.get('DASHSCOPE_API_KEY'),
24
- 'eval_type': EvalType.SERVICE,
25
- 'eval_batch_size': 5,
26
- 'limit': 5,
27
- 'generation_config': {
28
- 'max_tokens': 4096,
29
- 'temperature': 0.0,
30
- 'seed': 42,
31
- 'parallel_tool_calls': True
32
- },
33
- 'judge_strategy': JudgeStrategy.AUTO,
34
- 'judge_worker_num': 5,
35
- 'judge_model_args': {
36
- 'model_id': 'qwen2.5-72b-instruct',
37
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
38
- 'api_key': env.get('DASHSCOPE_API_KEY'),
39
- 'generation_config': {
40
- 'temperature': 0.0,
41
- 'max_tokens': 4096,
42
- }
43
- },
44
- 'debug': True,
45
- }
46
-
47
-
48
- # Math & Reasoning datasets
49
- def test_gsm8k(self):
50
- """Test GSM8K math reasoning dataset."""
51
- self._run_dataset_test('gsm8k')
52
-
53
- def test_gsm8k_local(self):
54
- """Test GSM8K math reasoning dataset with local path."""
55
- dataset_args = {
56
- 'local_path': 'data/gsm8k',
57
- }
58
- self._run_dataset_test('gsm8k', dataset_args=dataset_args, use_mock=True)
59
-
60
- def test_mmlu(self):
61
- """Test MMLU reasoning dataset."""
62
- dataset_args = {
63
- 'few_shot_num': 0,
64
- 'subset_list': ['abstract_algebra', 'computer_security']
65
- }
66
- self._run_dataset_test('mmlu', use_mock=True, dataset_args=dataset_args)
67
-
68
- def test_mmlu_pro(self):
69
- """Test MMLU-Pro reasoning dataset."""
70
- dataset_args = {
71
- 'few_shot_num': 2,
72
- 'subset_list': ['computer science', 'math']
73
- }
74
- self._run_dataset_test('mmlu_pro', use_mock=False, dataset_args=dataset_args, repeats=2)
75
-
76
- def test_mmlu_redux(self):
77
- """Test MMLU-Redux reasoning dataset."""
78
- dataset_args = {
79
- 'subset_list': ['abstract_algebra', 'computer_security'],
80
- }
81
- # self._run_dataset_load_test('mmlu_redux', dataset_args)
82
- self._run_dataset_test('mmlu_redux', dataset_args=dataset_args)
83
-
84
- def test_cmmlu(self):
85
- """Test C-MMLU reasoning dataset."""
86
- dataset_args = {
87
- 'subset_list': ['agronomy', 'computer_security'],
88
- 'few_shot_num': 0,
89
- }
90
- # self._run_dataset_load_test('cmmlu')
91
- self._run_dataset_test('cmmlu', dataset_args=dataset_args)
92
-
93
- def test_math_500(self):
94
- """Test MATH 500 dataset."""
95
- # self._run_dataset_load_test('math_500')
96
- dataset_args = {
97
- 'subset_list': ['Level 1', 'Level 2'],
98
- 'few_shot_num': 0,
99
- }
100
- self._run_dataset_test('math_500', dataset_args=dataset_args)
101
-
102
- def test_aime24(self):
103
- """Test AIME 2024 dataset."""
104
- self._run_dataset_test('aime24')
105
-
106
- def test_aime25(self):
107
- """Test AIME 2025 dataset."""
108
- self._run_dataset_test('aime25')
109
-
110
- def test_competition_math(self):
111
- """Test Competition Math dataset."""
112
- dataset_args = {
113
- 'subset_list': ['Level 4']
114
- }
115
- self._run_dataset_test('competition_math', dataset_args)
116
-
117
- # Knowledge & QA datasets
118
- def test_arc(self):
119
- """Test ARC dataset."""
120
- # self._run_dataset_load_test('arc')
121
- dataset_args = {
122
- 'subset_list': ['ARC-Easy', 'ARC-Challenge'],
123
- 'few_shot_num': 2,
124
- }
125
- self._run_dataset_test('arc', dataset_args=dataset_args)
126
-
127
- def test_ceval(self):
128
- """Test CEval dataset."""
129
- dataset_args = {
130
- 'subset_list': ['logic', 'law'],
131
- # 'few_shot_num': 0,
132
- }
133
- # self._run_dataset_load_test('ceval')
134
- self._run_dataset_test('ceval', dataset_args=dataset_args)
135
-
136
- def test_super_gpqa(self):
137
- """Test Super GPQA dataset."""
138
- # self._run_dataset_load_test('super_gpqa')
139
-
140
- dataset_args = {
141
- 'subset_list': ['History', 'Psychology'],
142
- 'few_shot_num': 0,
143
- }
144
- self._run_dataset_test('super_gpqa', dataset_args=dataset_args, ignore_errors=True)
145
-
146
- def test_gpqa(self):
147
- """Test GPQA dataset."""
148
- # self._run_dataset_load_test('gpqa_diamond')
149
- dataset_args = {
150
- 'few_shot_num': 0,
151
- }
152
- self._run_dataset_test('gpqa_diamond', dataset_args=dataset_args, ignore_errors=True)
153
-
154
- def test_iquiz(self):
155
- """Test IQuiz dataset."""
156
- dataset_args = {
157
- 'subset_list': ['IQ', 'EQ'],
158
- 'few_shot_num': 0,
159
- }
160
- self._run_dataset_test('iquiz', dataset_args=dataset_args)
161
-
162
- def test_maritime_bench(self):
163
- """Test MaritimeBench dataset."""
164
- dataset_args = {
165
- 'subset_list': ['default'],
166
- 'few_shot_num': 0,
167
- }
168
- self._run_dataset_test('maritime_bench', dataset_args=dataset_args)
169
-
170
- def test_musr(self):
171
- """Test MuSR dataset."""
172
- dataset_args = {
173
- 'subset_list': ['murder_mysteries', 'object_placements', 'team_allocation'],
174
- 'few_shot_num': 0,
175
- }
176
- self._run_dataset_test('musr', dataset_args=dataset_args)
177
-
178
- def test_hellaswag(self):
179
- """Test HellaSwag dataset."""
180
- self._run_dataset_test('hellaswag')
181
-
182
- def test_truthful_qa(self):
183
- """Test TruthfulQA dataset."""
184
- dataset_args = {
185
- 'extra_params': {
186
- 'multiple_correct': True
187
- }
188
- }
189
- self._run_dataset_test('truthful_qa', dataset_args=dataset_args)
190
-
191
- def test_trivia_qa(self):
192
- """Test TriviaQA dataset."""
193
- self._run_dataset_test('trivia_qa')
194
-
195
- def test_race(self):
196
- """Test RACE dataset."""
197
- self._run_dataset_test('race')
198
-
199
- def test_winogrande(self):
200
- """Test winogrande"""
201
- self._run_dataset_test('winogrande')
202
-
203
- def test_bbh(self):
204
- dataset_args = {
205
- 'subset_list': ['temporal_sequences', 'navigate'],
206
- }
207
- self._run_dataset_test('bbh', dataset_args=dataset_args)
208
-
209
- def test_simple_qa(self):
210
- """Test SimpleQA dataset."""
211
- self._run_dataset_test('simple_qa')
212
-
213
- def test_chinese_simpleqa(self):
214
- """Test Chinese SimpleQA dataset."""
215
- dataset_args = {
216
- 'subset_list': ['中华文化']
217
- }
218
- self._run_dataset_test('chinese_simpleqa', dataset_args)
219
-
220
- # Code datasets
221
- def test_live_code_bench(self):
222
- """Test LiveCodeBench dataset."""
223
- dataset_args = {
224
- 'extra_params': {
225
- 'start_date': '2024-08-01',
226
- 'end_date': '2025-02-28'
227
- },
228
- 'local_path': '/root/.cache/modelscope/hub/datasets/AI-ModelScope/code_generation_lite'
229
- }
230
- self._run_dataset_test('live_code_bench', dataset_args)
231
-
232
- def test_humaneval(self):
233
- """Test HumanEval dataset."""
234
- self._run_dataset_test('humaneval')
235
-
236
- # Custom & specialized datasets
237
- def test_general_qa(self):
238
- """Test custom general QA dataset."""
239
- dataset_args = {
240
- 'local_path': 'custom_eval/text/qa',
241
- 'subset_list': ['example']
242
- }
243
- self._run_dataset_test('general_qa', dataset_args)
244
-
245
- def test_general_mcq(self):
246
- """Test custom general MCQ dataset."""
247
- dataset_args = {
248
- 'local_path': 'custom_eval/text/mcq',
249
- 'subset_list': ['example']
250
- }
251
- self._run_dataset_test('general_mcq', dataset_args)
252
-
253
- def test_alpaca_eval(self):
254
- """Test AlpacaEval dataset."""
255
- self._run_dataset_test('alpaca_eval')
256
-
257
- def test_arena_hard(self):
258
- """Test Arena Hard dataset."""
259
- self._run_dataset_test('arena_hard', use_cache='outputs/20250818_211353')
260
-
261
- def test_frames(self):
262
- """Test Frames dataset."""
263
- dataset_args = {
264
- # 'local_path': '/root/.cache/modelscope/hub/datasets/iic/frames'
265
- }
266
- self._run_dataset_test('frames', dataset_args)
267
-
268
- def test_docmath(self):
269
- """Test DocMath dataset."""
270
- self._run_dataset_test('docmath')
271
-
272
- def test_drop(self):
273
- """Test DROP dataset."""
274
- dataset_args = {
275
- 'few_shot_num': 3,
276
- }
277
- self._run_dataset_test('drop', dataset_args=dataset_args)
278
-
279
- def test_ifeval(self):
280
- """Test IFEval dataset."""
281
- self._run_dataset_test('ifeval')
282
-
283
- def test_needle_haystack(self):
284
- """Test Needle in Haystack dataset."""
285
- dataset_args = {
286
- 'subset_list': ['english'],
287
- 'extra_params': {
288
- 'context_lengths_max': 10000,
289
- 'context_lengths_num_intervals': 5,
290
- 'document_depth_percent_intervals': 5,
291
- 'show_score': True,
292
- }
293
- }
294
- self._run_dataset_test('needle_haystack', dataset_args)
295
-
296
- def test_ifeval(self):
297
- """Test IFEval dataset."""
298
- self._run_dataset_test('ifeval')
299
-
300
- def test_hle(self):
301
- """Test HLE dataset."""
302
- dataset_args = {
303
- 'subset_list': ['Math', 'Other'],
304
- 'extra_params': {
305
- 'include_multi_modal': False
306
- }
307
- }
308
- self._run_dataset_test('hle', dataset_args)
309
-
310
- def test_process_bench(self):
311
- """Test ProcessBench dataset."""
312
- dataset_args = {
313
- 'subset_list': ['gsm8k', 'math'],
314
- }
315
- self._run_dataset_test('process_bench', dataset_args, use_cache='outputs/20250819_161844')
316
-
317
- def test_humaneval(self):
318
- """Test HumanEval dataset."""
319
- dataset_args = {
320
- 'metric_list': ['Pass@1', 'Pass@2', 'Pass@5']
321
- }
322
- self._run_dataset_test('humaneval', dataset_args, repeats=5)
323
-
324
- def test_live_code_bench(self):
325
- """Test LiveCodeBench dataset."""
326
- dataset_args = {
327
- 'subset_list': ['v6'],
328
- 'extra_params': {
329
- 'start_date': '2024-08-01',
330
- 'end_date': '2025-02-28'
331
- },
332
- }
333
- self._run_dataset_test('live_code_bench', dataset_args, judge_worker_num=1)
334
-
335
- def test_tool_bench(self):
336
- """Test ToolBench dataset."""
337
- self._run_dataset_test('tool_bench')
338
-
339
- def test_bfcl(self):
340
- """Test BFCL dataset."""
341
- dataset_args = {
342
- 'subset_list': ['simple', 'live_multiple', 'multi_turn_base'],
343
- 'extra_params': {
344
- 'is_fc_model': True,
345
- 'underscore_to_dot': True
346
- }
347
- }
348
- self._run_dataset_test('bfcl_v3', dataset_args, model='qwq-plus', stream=True)
349
-
350
- def test_tau_bench(self):
351
- dataset_args = {
352
- 'subset_list': [
353
- 'airline',
354
- 'retail'
355
- ],
356
- 'extra_params': {
357
- 'user_model': 'qwen-plus',
358
- 'api_key': env.get('DASHSCOPE_API_KEY'),
359
- 'api_base': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
360
- 'generation_config': {
361
- 'temperature': 0.0,
362
- 'max_tokens': 12000,
363
- 'stream': True
364
- }
365
- }
366
- }
367
- self._run_dataset_test('tau_bench', dataset_args, limit=5, model='qwq-plus', stream=True)
368
-
369
- def test_r1_collection(self):
370
- dataset_args = {
371
- 'dataset_id': 'evalscope/R1-Distill-Math-Test-v2'
372
- }
373
- self._run_dataset_test('data_collection', dataset_args)
374
-
375
- def test_qwen3_collection(self):
376
- dataset_args = {
377
- 'dataset_id': 'evalscope/Qwen3-Test-Collection'
378
- }
379
- self._run_dataset_test('data_collection', dataset_args)
380
-
381
-
382
- if __name__ == '__main__':
383
- # Run specific test: python -m unittest test_eval.TestBenchmark.test_gsm8k
384
- # Run all tests: python -m unittest test_eval.TestBenchmark
385
- unittest.main()
@@ -1,65 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import unittest
7
-
8
- from evalscope.constants import EvalType, JudgeStrategy, ModelTask
9
- from evalscope.utils.logger import get_logger
10
- from tests.common import TestBenchmark
11
-
12
- logger = get_logger()
13
-
14
-
15
- class TestImageEditBenchmark(TestBenchmark):
16
- def setUp(self):
17
- """Setup common test configuration."""
18
- self.base_config = {
19
- 'model': 'Qwen/Qwen-Image-Edit',
20
- 'model_args':{
21
- 'precision': 'bfloat16',
22
- 'device_map': 'cuda:2'
23
- },
24
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
25
- 'api_key': env.get('DASHSCOPE_API_KEY'),
26
- 'model_task': ModelTask.IMAGE_GENERATION,
27
- 'eval_type': EvalType.IMAGE_EDITING,
28
- 'eval_batch_size': 1,
29
- 'limit': 5,
30
- 'generation_config': {
31
- 'true_cfg_scale': 4.0,
32
- 'num_inference_steps': 50,
33
- 'negative_prompt': ' ',
34
- },
35
- 'judge_strategy': JudgeStrategy.AUTO,
36
- 'judge_worker_num': 5,
37
- 'judge_model_args': {
38
- 'model_id': 'qwen2.5-vl-72b-instruct',
39
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
40
- 'api_key': env.get('DASHSCOPE_API_KEY'),
41
- 'generation_config': {
42
- 'temperature': 0.0,
43
- 'max_tokens': 4096,
44
- }
45
- },
46
- 'debug': True,
47
- }
48
-
49
- def test_gedit(self):
50
- """Test GEdit dataset."""
51
- dataset_args = {
52
- 'extra_params':{
53
- 'language': 'cn',
54
- }
55
- }
56
- self._run_dataset_test('gedit', dataset_args=dataset_args, use_cache='outputs/20250829_150058')
57
-
58
- def test_gedit_local(self):
59
- dataset_args = {
60
- 'extra_params':{
61
- 'language': 'cn',
62
- 'local_file': 'outputs/example_edit.jsonl',
63
- }
64
- }
65
- self._run_dataset_test('gedit', dataset_args=dataset_args, model=None, model_id='offline_model')
@@ -1,142 +0,0 @@
1
- from dotenv import dotenv_values
2
-
3
- env = dotenv_values('.env')
4
-
5
- import os
6
- import unittest
7
-
8
- from evalscope.config import TaskConfig
9
- from evalscope.constants import EvalType, JudgeStrategy, ModelTask, OutputType
10
- from evalscope.run import run_task
11
- from evalscope.utils.logger import get_logger
12
- from tests.utils import test_level_list
13
-
14
- os.environ['EVALSCOPE_LOG_LEVEL'] = 'DEBUG'
15
-
16
- logger = get_logger()
17
-
18
-
19
- class TestRun(unittest.TestCase):
20
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
21
- def test_run_general(self):
22
- from evalscope.config import TaskConfig
23
-
24
- task_cfg = TaskConfig(
25
- datasets=[
26
- 'general_t2i'
27
- ],
28
- model_task=ModelTask.IMAGE_GENERATION, # must be IMAGE_GENERATION
29
- dataset_args={
30
- 'general_t2i': {
31
- 'metric_list': [
32
- 'PickScore',
33
- # 'CLIPScore',
34
- # 'HPSv2Score',
35
- # 'HPSv2.1Score',
36
- # 'BLIPv2Score',
37
- # 'ImageRewardScore',
38
- # 'VQAScore',
39
- # 'FGA_BLIP2Score',
40
- # 'MPS'
41
- ],
42
- 'dataset_id': 'custom_eval/multimodal/t2i/example.jsonl',
43
- }
44
- }
45
- )
46
-
47
- run_task(task_cfg=task_cfg)
48
-
49
- def test_run_local_evalmuse(self):
50
- from evalscope import TaskConfig, run_task
51
-
52
- task_cfg = TaskConfig(
53
- model_id='T2I-Model', # 只用于展示,实际运行时不需要指定模型ID
54
- model_task=ModelTask.IMAGE_GENERATION,
55
- datasets=[
56
- 'evalmuse', # 使用 EvalMuse benchmark
57
- ],
58
- dataset_args={
59
- 'evalmuse': {
60
- 'dataset_id': 'data/example.jsonl', # 构建的jsonl路径
61
- }
62
- },
63
- )
64
-
65
- run_task(task_cfg=task_cfg)
66
-
67
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
68
- def test_run_benchmark(self):
69
-
70
- task_cfg = TaskConfig(
71
- model='stabilityai/stable-diffusion-xl-base-1.0', # model on modelscope
72
- model_task=ModelTask.IMAGE_GENERATION, # must be IMAGE_GENERATION
73
- model_args={
74
- 'use_safetensors': True,
75
- 'variant': 'fp16',
76
- 'torch_dtype': 'torch.float16',
77
- },
78
- datasets=[
79
- # 'tifa160',
80
- # 'genai_bench',
81
- 'evalmuse',
82
- # 'hpdv2',
83
- ],
84
- dataset_args={
85
- 'tifa160': {
86
- 'metric_list': [
87
- # 'PickScore',
88
- # 'CLIPScore',
89
- # 'HPSv2Score',
90
- # 'BLIPv2Score',
91
- # 'ImageRewardScore',
92
- # 'VQAScore',
93
- 'FGA_BLIP2Score',
94
- ]
95
- }
96
- },
97
- limit=5,
98
- generation_config={
99
- 'num_inference_steps': 50,
100
- 'guidance_scale': 7.5
101
- },
102
- # use_cache='outputs/20250427_134122',
103
- )
104
-
105
- run_task(task_cfg=task_cfg)
106
-
107
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
108
- def test_run_benchmark_flux(self):
109
-
110
- task_cfg = TaskConfig(
111
- model='black-forest-labs/FLUX.1-dev', # model on modelscope
112
- model_task=ModelTask.IMAGE_GENERATION, # must be IMAGE_GENERATION
113
- model_args={
114
- 'torch_dtype': 'torch.float16',
115
- },
116
- datasets=[
117
- # 'tifa160',
118
- # 'genai_bench',
119
- 'evalmuse',
120
- # 'hpdv2',
121
- ],
122
- dataset_args={
123
- 'tifa160': {
124
- 'metric_list': [
125
- 'PickScore',
126
- # 'CLIPScore',
127
- # 'HPSv2Score',
128
- # 'BLIPv2Score',
129
- # 'ImageRewardScore',
130
- # 'VQAScore',
131
- # 'FGA_BLIP2Score',
132
- ]
133
- }
134
- },
135
- generation_config={
136
- 'num_inference_steps': 50,
137
- 'guidance_scale': 3.5
138
- },
139
- use_cache='outputs/20250520_112314'
140
- )
141
-
142
- run_task(task_cfg=task_cfg)
@@ -1,80 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import unittest
7
-
8
- from evalscope.constants import EvalType, JudgeStrategy, OutputType
9
- from evalscope.utils.logger import get_logger
10
- from tests.common import TestBenchmark
11
-
12
- logger = get_logger()
13
-
14
-
15
- class TestVLMBenchmark(TestBenchmark):
16
- """Benchmark evaluation test cases."""
17
-
18
- def setUp(self):
19
- """Setup common test configuration."""
20
- self.base_config = {
21
- 'model': 'qwen-vl-plus',
22
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
23
- 'api_key': env.get('DASHSCOPE_API_KEY'),
24
- 'eval_type': EvalType.SERVICE,
25
- 'eval_batch_size': 5,
26
- 'limit': 5,
27
- 'generation_config': {
28
- 'max_tokens': 4096,
29
- 'temperature': 0.0,
30
- 'seed': 42,
31
- 'parallel_tool_calls': True
32
- },
33
- 'judge_strategy': JudgeStrategy.AUTO,
34
- 'judge_worker_num': 5,
35
- 'judge_model_args': {
36
- 'model_id': 'qwen2.5-72b-instruct',
37
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
38
- 'api_key': env.get('DASHSCOPE_API_KEY'),
39
- 'generation_config': {
40
- 'temperature': 0.0,
41
- 'max_tokens': 4096,
42
- }
43
- },
44
- 'debug': True,
45
- }
46
-
47
- def test_mmmu(self):
48
- dataset_args = {
49
- 'subset_list':[
50
- 'Accounting',
51
- 'Agriculture',
52
- # 'Architecture_and_Engineering'
53
- ]
54
- }
55
- self._run_dataset_test('mmmu', dataset_args=dataset_args)
56
-
57
- def test_math_vista(self):
58
- dataset_args = {
59
- 'subset_list': ['default']
60
- }
61
- self._run_dataset_test('math_vista', dataset_args=dataset_args)
62
-
63
- def test_mmmu_pro(self):
64
- dataset_args = {
65
- 'subset_list':[
66
- 'Accounting',
67
- # 'Agriculture',
68
- ],
69
- 'extra_params': {
70
- 'dataset_format': 'standard (4 options)', # 'standard (4 options)', 'standard (10 options)', 'vision'
71
- },
72
- }
73
- self._run_dataset_test('mmmu_pro', dataset_args=dataset_args, limit=10)
74
-
75
- def test_qwen3_collection(self):
76
- dataset_args = {
77
- 'dataset_id': 'outputs/qwen3_vl_test.jsonl',
78
- 'shuffle': True,
79
- }
80
- self._run_dataset_test('data_collection', dataset_args)
tests/cli/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.