evalscope 1.0.2__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (87) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +12 -0
  2. evalscope/app/ui/multi_model.py +6 -1
  3. evalscope/app/ui/single_model.py +8 -2
  4. evalscope/app/utils/data_utils.py +3 -2
  5. evalscope/app/utils/visualization.py +2 -2
  6. evalscope/benchmarks/ai2d/ai2d_adapter.py +3 -2
  7. evalscope/benchmarks/bfcl/bfcl_adapter.py +10 -45
  8. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  9. evalscope/benchmarks/chartqa/__init__.py +0 -0
  10. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  11. evalscope/benchmarks/chartqa/utils.py +38 -0
  12. evalscope/benchmarks/docvqa/__init__.py +0 -0
  13. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  14. evalscope/benchmarks/general_arena/utils.py +2 -1
  15. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  16. evalscope/benchmarks/infovqa/__init__.py +0 -0
  17. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  18. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +2 -2
  19. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  20. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  21. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  22. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  23. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  24. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  25. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  26. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  27. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  28. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  29. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  30. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  31. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  32. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  33. evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
  34. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  35. evalscope/metrics/metric.py +51 -0
  36. evalscope/metrics/metrics.py +16 -0
  37. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  38. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  39. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  40. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  41. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  42. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  43. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  44. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  45. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  46. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  47. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  48. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  49. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  50. evalscope/report/__init__.py +9 -1
  51. evalscope/report/combinator.py +52 -2
  52. evalscope/utils/json_schema.py +8 -6
  53. evalscope/utils/multi_choices.py +16 -1
  54. evalscope/version.py +2 -2
  55. {evalscope-1.0.2.dist-info → evalscope-1.1.0.dist-info}/METADATA +6 -32
  56. {evalscope-1.0.2.dist-info → evalscope-1.1.0.dist-info}/RECORD +51 -54
  57. {evalscope-1.0.2.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
  58. tests/__init__.py +0 -1
  59. tests/benchmark/__init__.py +0 -1
  60. tests/benchmark/test_eval.py +0 -429
  61. tests/benchmark/test_image_edit.py +0 -65
  62. tests/benchmark/test_sandbox.py +0 -81
  63. tests/benchmark/test_t2i.py +0 -142
  64. tests/benchmark/test_vlm.py +0 -137
  65. tests/cli/__init__.py +0 -1
  66. tests/cli/test_all.py +0 -269
  67. tests/cli/test_collection.py +0 -99
  68. tests/cli/test_custom.py +0 -268
  69. tests/cli/test_reasoning.py +0 -81
  70. tests/common.py +0 -73
  71. tests/perf/__init__.py +0 -1
  72. tests/perf/test_perf.py +0 -206
  73. tests/rag/test_clip_benchmark.py +0 -87
  74. tests/rag/test_mteb.py +0 -213
  75. tests/rag/test_ragas.py +0 -128
  76. tests/swift/__init__.py +0 -1
  77. tests/swift/test_run_swift_eval.py +0 -146
  78. tests/swift/test_run_swift_vlm_eval.py +0 -128
  79. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  80. tests/test_run_all.py +0 -12
  81. tests/utils.py +0 -13
  82. tests/vlm/__init__.py +0 -1
  83. tests/vlm/test_vlmeval.py +0 -102
  84. {tests/rag → evalscope/benchmarks/blink}/__init__.py +0 -0
  85. {evalscope-1.0.2.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
  86. {evalscope-1.0.2.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
  87. {evalscope-1.0.2.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
tests/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
@@ -1 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
@@ -1,429 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import unittest
7
-
8
- from evalscope.constants import EvalType, JudgeStrategy, OutputType
9
- from evalscope.utils.logger import get_logger
10
- from tests.common import TestBenchmark
11
-
12
- logger = get_logger()
13
-
14
-
15
- class TestNativeBenchmark(TestBenchmark):
16
- """Benchmark evaluation test cases."""
17
-
18
- def setUp(self):
19
- """Setup common test configuration."""
20
- self.base_config = {
21
- 'model': 'qwen-plus',
22
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
23
- 'api_key': env.get('DASHSCOPE_API_KEY'),
24
- 'eval_type': EvalType.SERVICE,
25
- 'eval_batch_size': 5,
26
- 'limit': 5,
27
- 'generation_config': {
28
- 'max_tokens': 4096,
29
- 'temperature': 0.0,
30
- 'seed': 42,
31
- 'parallel_tool_calls': True
32
- },
33
- 'judge_strategy': JudgeStrategy.AUTO,
34
- 'judge_worker_num': 5,
35
- 'judge_model_args': {
36
- 'model_id': 'qwen3-235b-a22b',
37
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
38
- 'api_key': env.get('DASHSCOPE_API_KEY'),
39
- 'generation_config': {
40
- 'temperature': 0.0,
41
- 'max_tokens': 4096,
42
- 'extra_body': {'enable_thinking': False}
43
- }
44
- },
45
- 'debug': True,
46
- }
47
-
48
-
49
- # Math & Reasoning datasets
50
- def test_gsm8k(self):
51
- """Test GSM8K math reasoning dataset."""
52
- self._run_dataset_test('gsm8k')
53
-
54
- def test_gsm8k_local(self):
55
- """Test GSM8K math reasoning dataset with local path."""
56
- dataset_args = {
57
- 'local_path': 'data/gsm8k',
58
- }
59
- self._run_dataset_test('gsm8k', dataset_args=dataset_args, use_mock=True)
60
-
61
- def test_mmlu(self):
62
- """Test MMLU reasoning dataset."""
63
- dataset_args = {
64
- 'few_shot_num': 0,
65
- 'subset_list': ['abstract_algebra', 'computer_security']
66
- }
67
- self._run_dataset_test('mmlu', use_mock=True, dataset_args=dataset_args)
68
-
69
- def test_mmlu_reasoning(self):
70
- """Test MMLU reasoning dataset."""
71
- dataset_args = {
72
- 'few_shot_num': 0,
73
- 'subset_list': ['abstract_algebra', 'computer_security']
74
- }
75
- self._run_dataset_test('mmlu', dataset_args=dataset_args, model='qwen3-0.6b', stream=True)
76
-
77
- def test_mmlu_pro(self):
78
- """Test MMLU-Pro reasoning dataset."""
79
- dataset_args = {
80
- 'few_shot_num': 2,
81
- 'subset_list': ['computer science', 'math']
82
- }
83
- self._run_dataset_test('mmlu_pro', use_mock=False, dataset_args=dataset_args, repeats=2)
84
-
85
- def test_mmlu_redux(self):
86
- """Test MMLU-Redux reasoning dataset."""
87
- dataset_args = {
88
- 'subset_list': ['abstract_algebra', 'computer_security'],
89
- }
90
- # self._run_dataset_load_test('mmlu_redux', dataset_args)
91
- self._run_dataset_test('mmlu_redux', dataset_args=dataset_args)
92
-
93
- def test_cmmlu(self):
94
- """Test C-MMLU reasoning dataset."""
95
- dataset_args = {
96
- 'subset_list': ['agronomy', 'computer_security'],
97
- 'few_shot_num': 0,
98
- }
99
- # self._run_dataset_load_test('cmmlu')
100
- self._run_dataset_test('cmmlu', dataset_args=dataset_args)
101
-
102
- def test_math_500(self):
103
- """Test MATH 500 dataset."""
104
- # self._run_dataset_load_test('math_500')
105
- dataset_args = {
106
- 'subset_list': ['Level 1', 'Level 2'],
107
- 'few_shot_num': 0,
108
- }
109
- self._run_dataset_test('math_500', dataset_args=dataset_args)
110
-
111
- def test_aime24(self):
112
- """Test AIME 2024 dataset."""
113
- self._run_dataset_test('aime24')
114
-
115
- def test_aime25(self):
116
- """Test AIME 2025 dataset."""
117
- self._run_dataset_test('aime25')
118
-
119
- def test_competition_math(self):
120
- """Test Competition Math dataset."""
121
- dataset_args = {
122
- 'subset_list': ['Level 4']
123
- }
124
- self._run_dataset_test('competition_math', dataset_args)
125
-
126
- # Knowledge & QA datasets
127
- def test_arc(self):
128
- """Test ARC dataset."""
129
- # self._run_dataset_load_test('arc')
130
- dataset_args = {
131
- 'subset_list': ['ARC-Easy', 'ARC-Challenge'],
132
- 'few_shot_num': 2,
133
- }
134
- self._run_dataset_test('arc', dataset_args=dataset_args)
135
-
136
- def test_ceval(self):
137
- """Test CEval dataset."""
138
- dataset_args = {
139
- 'subset_list': ['logic', 'law'],
140
- # 'few_shot_num': 0,
141
- }
142
- # self._run_dataset_load_test('ceval')
143
- self._run_dataset_test('ceval', dataset_args=dataset_args)
144
-
145
- def test_super_gpqa(self):
146
- """Test Super GPQA dataset."""
147
- # self._run_dataset_load_test('super_gpqa')
148
-
149
- dataset_args = {
150
- 'subset_list': ['History', 'Psychology'],
151
- 'few_shot_num': 0,
152
- }
153
- self._run_dataset_test('super_gpqa', dataset_args=dataset_args, ignore_errors=True)
154
-
155
- def test_gpqa(self):
156
- """Test GPQA dataset."""
157
- # self._run_dataset_load_test('gpqa_diamond')
158
- dataset_args = {
159
- 'few_shot_num': 0,
160
- }
161
- self._run_dataset_test('gpqa_diamond', dataset_args=dataset_args, ignore_errors=True)
162
-
163
- def test_iquiz(self):
164
- """Test IQuiz dataset."""
165
- dataset_args = {
166
- 'subset_list': ['IQ', 'EQ'],
167
- 'few_shot_num': 0,
168
- }
169
- self._run_dataset_test('iquiz', dataset_args=dataset_args)
170
-
171
- def test_maritime_bench(self):
172
- """Test MaritimeBench dataset."""
173
- dataset_args = {
174
- 'subset_list': ['default'],
175
- 'few_shot_num': 0,
176
- }
177
- self._run_dataset_test('maritime_bench', dataset_args=dataset_args)
178
-
179
- def test_musr(self):
180
- """Test MuSR dataset."""
181
- dataset_args = {
182
- 'subset_list': ['murder_mysteries', 'object_placements', 'team_allocation'],
183
- 'few_shot_num': 0,
184
- }
185
- self._run_dataset_test('musr', dataset_args=dataset_args)
186
-
187
- def test_hellaswag(self):
188
- """Test HellaSwag dataset."""
189
- self._run_dataset_test('hellaswag')
190
-
191
- def test_truthful_qa(self):
192
- """Test TruthfulQA dataset."""
193
- dataset_args = {
194
- 'extra_params': {
195
- 'multiple_correct': True
196
- }
197
- }
198
- self._run_dataset_test('truthful_qa', dataset_args=dataset_args)
199
-
200
- def test_trivia_qa(self):
201
- """Test TriviaQA dataset."""
202
- self._run_dataset_test('trivia_qa')
203
-
204
- def test_race(self):
205
- """Test RACE dataset."""
206
- self._run_dataset_test('race')
207
-
208
- def test_winogrande(self):
209
- """Test winogrande"""
210
- self._run_dataset_test('winogrande')
211
-
212
- def test_bbh(self):
213
- dataset_args = {
214
- 'subset_list': ['temporal_sequences', 'navigate'],
215
- 'few_shot_num': 0,
216
- }
217
- self._run_dataset_test('bbh', dataset_args=dataset_args)
218
-
219
- def test_simple_qa(self):
220
- """Test SimpleQA dataset."""
221
- self._run_dataset_test('simple_qa')
222
-
223
- def test_chinese_simpleqa(self):
224
- """Test Chinese SimpleQA dataset."""
225
- dataset_args = {
226
- 'subset_list': ['中华文化']
227
- }
228
- self._run_dataset_test('chinese_simpleqa', dataset_args)
229
-
230
- # Code datasets
231
- def test_live_code_bench(self):
232
- """Test LiveCodeBench dataset."""
233
- dataset_args = {
234
- 'extra_params': {
235
- 'start_date': '2024-08-01',
236
- 'end_date': '2025-02-28'
237
- },
238
- 'local_path': '/root/.cache/modelscope/hub/datasets/AI-ModelScope/code_generation_lite'
239
- }
240
- self._run_dataset_test('live_code_bench', dataset_args)
241
-
242
- def test_humaneval(self):
243
- """Test HumanEval dataset."""
244
- self._run_dataset_test('humaneval')
245
-
246
- # Custom & specialized datasets
247
- def test_general_qa(self):
248
- """Test custom general QA dataset."""
249
- dataset_args = {
250
- 'local_path': 'custom_eval/text/qa',
251
- 'subset_list': ['example']
252
- }
253
- self._run_dataset_test('general_qa', dataset_args)
254
-
255
- def test_general_mcq(self):
256
- """Test custom general MCQ dataset."""
257
- dataset_args = {
258
- 'local_path': 'custom_eval/text/mcq',
259
- 'subset_list': ['example']
260
- }
261
- self._run_dataset_test('general_mcq', dataset_args)
262
-
263
- def test_alpaca_eval(self):
264
- """Test AlpacaEval dataset."""
265
- self._run_dataset_test('alpaca_eval')
266
-
267
- def test_arena_hard(self):
268
- """Test Arena Hard dataset."""
269
- self._run_dataset_test('arena_hard', use_cache='outputs/20250818_211353')
270
-
271
- def test_frames(self):
272
- """Test Frames dataset."""
273
- dataset_args = {
274
- # 'local_path': '/root/.cache/modelscope/hub/datasets/iic/frames'
275
- }
276
- self._run_dataset_test('frames', dataset_args)
277
-
278
- def test_docmath(self):
279
- """Test DocMath dataset."""
280
- self._run_dataset_test('docmath')
281
-
282
- def test_drop(self):
283
- """Test DROP dataset."""
284
- dataset_args = {
285
- 'few_shot_num': 3,
286
- }
287
- self._run_dataset_test('drop', dataset_args=dataset_args)
288
-
289
- def test_ifeval(self):
290
- """Test IFEval dataset."""
291
- self._run_dataset_test('ifeval')
292
-
293
- def test_needle_haystack(self):
294
- """Test Needle in Haystack dataset."""
295
- dataset_args = {
296
- 'subset_list': ['english'],
297
- 'extra_params': {
298
- 'context_lengths_max': 10000,
299
- 'context_lengths_num_intervals': 5,
300
- 'document_depth_percent_intervals': 5,
301
- 'show_score': True,
302
- }
303
- }
304
- self._run_dataset_test('needle_haystack', dataset_args)
305
-
306
- def test_ifeval(self):
307
- """Test IFEval dataset."""
308
- self._run_dataset_test('ifeval')
309
-
310
- def test_hle(self):
311
- """Test HLE dataset."""
312
- dataset_args = {
313
- 'subset_list': ['Math', 'Other'],
314
- 'extra_params': {
315
- 'include_multi_modal': False
316
- }
317
- }
318
- self._run_dataset_test('hle', dataset_args)
319
-
320
- def test_process_bench(self):
321
- """Test ProcessBench dataset."""
322
- dataset_args = {
323
- 'subset_list': ['gsm8k', 'math'],
324
- }
325
- self._run_dataset_test('process_bench', dataset_args, use_cache='outputs/20250819_161844')
326
-
327
- def test_humaneval(self):
328
- """Test HumanEval dataset."""
329
- dataset_args = {
330
- 'metric_list': ['Pass@1']
331
- }
332
- self._run_dataset_test('humaneval', dataset_args)
333
-
334
- def test_live_code_bench(self):
335
- """Test LiveCodeBench dataset."""
336
- dataset_args = {
337
- 'subset_list': ['v5'],
338
- 'review_timeout': 6,
339
- 'extra_params': {
340
- 'start_date': '2024-08-01',
341
- 'end_date': '2025-02-28'
342
- },
343
- }
344
- self._run_dataset_test('live_code_bench', dataset_args, limit=20, use_cache='outputs/20250918_200232', rerun_review=True)
345
-
346
- def test_tool_bench(self):
347
- """Test ToolBench dataset."""
348
- self._run_dataset_test('tool_bench')
349
-
350
- def test_bfcl(self):
351
- """Test BFCL dataset."""
352
- dataset_args = {
353
- 'subset_list': [
354
- # 'simple',
355
- # 'live_multiple',
356
- # 'multi_turn_base',
357
- 'multi_turn_miss_func'
358
- ],
359
- 'extra_params': {
360
- 'is_fc_model': True,
361
- 'underscore_to_dot': True
362
- }
363
- }
364
- self._run_dataset_test('bfcl_v3', dataset_args, model='qwen-plus', limit=30, eval_batch_size=5)
365
-
366
- def test_tau_bench(self):
367
- dataset_args = {
368
- 'subset_list': [
369
- 'airline',
370
- 'retail'
371
- ],
372
- 'extra_params': {
373
- 'user_model': 'qwen-plus',
374
- 'api_key': env.get('DASHSCOPE_API_KEY'),
375
- 'api_base': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
376
- 'generation_config': {
377
- 'temperature': 0.0,
378
- 'max_tokens': 12000,
379
- 'stream': True
380
- }
381
- }
382
- }
383
- self._run_dataset_test('tau_bench', dataset_args, limit=5, model='qwq-plus', stream=True)
384
-
385
- def test_r1_collection(self):
386
- dataset_args = {
387
- 'dataset_id': 'evalscope/R1-Distill-Math-Test-v2'
388
- }
389
- self._run_dataset_test('data_collection', dataset_args)
390
-
391
- def test_qwen3_collection(self):
392
- dataset_args = {
393
- 'dataset_id': 'evalscope/Qwen3-Test-Collection'
394
- }
395
- self._run_dataset_test('data_collection', dataset_args)
396
-
397
- def test_multi_if(self):
398
- dataset_args = {
399
- 'subset_list': ['English', 'Chinese'],
400
- 'few_shot_num': 0,
401
- }
402
- self._run_dataset_test('multi_if', dataset_args, limit=5)
403
-
404
- def test_healthbench(self):
405
- dataset_args = {
406
- 'subset_list': ['health_data_tasks'],
407
- 'extra_params': {
408
- 'version': 'Hard'
409
- }
410
- }
411
- self._run_dataset_test('health_bench', dataset_args, limit=5)
412
-
413
-
414
- def test_amc(self):
415
- dataset_args = {
416
- 'subset_list': ['amc22'],
417
- }
418
- self._run_dataset_test('amc', dataset_args)
419
-
420
- def test_minerva_math(self):
421
- dataset_args = {
422
- 'subset_list': ['default'],
423
- }
424
- self._run_dataset_test('minerva_math', dataset_args)
425
-
426
- if __name__ == '__main__':
427
- # Run specific test: python -m unittest test_eval.TestBenchmark.test_gsm8k
428
- # Run all tests: python -m unittest test_eval.TestBenchmark
429
- unittest.main()
@@ -1,65 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import unittest
7
-
8
- from evalscope.constants import EvalType, JudgeStrategy, ModelTask
9
- from evalscope.utils.logger import get_logger
10
- from tests.common import TestBenchmark
11
-
12
- logger = get_logger()
13
-
14
-
15
- class TestImageEditBenchmark(TestBenchmark):
16
- def setUp(self):
17
- """Setup common test configuration."""
18
- self.base_config = {
19
- 'model': 'Qwen/Qwen-Image-Edit',
20
- 'model_args':{
21
- 'precision': 'bfloat16',
22
- 'device_map': 'cuda:2'
23
- },
24
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
25
- 'api_key': env.get('DASHSCOPE_API_KEY'),
26
- 'model_task': ModelTask.IMAGE_GENERATION,
27
- 'eval_type': EvalType.IMAGE_EDITING,
28
- 'eval_batch_size': 1,
29
- 'limit': 5,
30
- 'generation_config': {
31
- 'true_cfg_scale': 4.0,
32
- 'num_inference_steps': 50,
33
- 'negative_prompt': ' ',
34
- },
35
- 'judge_strategy': JudgeStrategy.AUTO,
36
- 'judge_worker_num': 5,
37
- 'judge_model_args': {
38
- 'model_id': 'qwen2.5-vl-72b-instruct',
39
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
40
- 'api_key': env.get('DASHSCOPE_API_KEY'),
41
- 'generation_config': {
42
- 'temperature': 0.0,
43
- 'max_tokens': 4096,
44
- }
45
- },
46
- 'debug': True,
47
- }
48
-
49
- def test_gedit(self):
50
- """Test GEdit dataset."""
51
- dataset_args = {
52
- 'extra_params':{
53
- 'language': 'cn',
54
- }
55
- }
56
- self._run_dataset_test('gedit', dataset_args=dataset_args, use_cache='outputs/20250829_150058')
57
-
58
- def test_gedit_local(self):
59
- dataset_args = {
60
- 'extra_params':{
61
- 'language': 'cn',
62
- 'local_file': 'outputs/example_edit.jsonl',
63
- }
64
- }
65
- self._run_dataset_test('gedit', dataset_args=dataset_args, model=None, model_id='offline_model')
@@ -1,81 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import unittest
7
-
8
- from evalscope.constants import EvalType, JudgeStrategy, OutputType
9
- from evalscope.utils.logger import get_logger
10
- from tests.common import TestBenchmark
11
-
12
- logger = get_logger()
13
-
14
-
15
- class TestCodeBenchmark(TestBenchmark):
16
- """Benchmark evaluation test cases."""
17
-
18
- def setUp(self):
19
- """Setup common test configuration."""
20
- self.base_config = {
21
- 'model': 'qwen-plus',
22
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
23
- 'api_key': env.get('DASHSCOPE_API_KEY'),
24
- 'eval_type': EvalType.SERVICE,
25
- 'eval_batch_size': 5,
26
- 'limit': 5,
27
- 'generation_config': {
28
- 'max_tokens': 4096,
29
- 'temperature': 0.0,
30
- 'seed': 42,
31
- 'parallel_tool_calls': True
32
- },
33
- 'judge_strategy': JudgeStrategy.AUTO,
34
- 'judge_worker_num': 5,
35
- 'judge_model_args': {
36
- 'model_id': 'qwen2.5-72b-instruct',
37
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
38
- 'api_key': env.get('DASHSCOPE_API_KEY'),
39
- 'generation_config': {
40
- 'temperature': 0.0,
41
- 'max_tokens': 4096,
42
- }
43
- },
44
- 'use_sandbox': True,
45
- 'sandbox_type': 'docker',
46
- 'debug': True,
47
- }
48
-
49
- def test_humaneval(self):
50
- """Test Humaneval dataset."""
51
- self._run_dataset_test('humaneval', limit=5)
52
-
53
- def test_humaneval_remote_sandbox(self):
54
- """Test Humaneval dataset with remote sandbox manager."""
55
- sandbox_manager_config = {'base_url': 'http://localhost:8000'}
56
- self._run_dataset_test('humaneval', limit=5, sandbox_manager_config=sandbox_manager_config)
57
-
58
- def test_live_code_bench(self):
59
- """Test Live Code Bench dataset."""
60
- dataset_args = {
61
- 'subset_list': ['v5'],
62
- 'review_timeout': 6,
63
- 'extra_params': {
64
- 'start_date': '2024-08-01',
65
- 'end_date': '2025-02-28'
66
- },
67
- }
68
- self._run_dataset_test('live_code_bench', limit=5, dataset_args=dataset_args, use_cache='outputs/20250918_200232', rerun_review=True)
69
-
70
- def test_live_code_bench_remote_sandbox(self):
71
- """Test Live Code Bench dataset."""
72
- dataset_args = {
73
- 'subset_list': ['v5'],
74
- 'review_timeout': 6,
75
- 'extra_params': {
76
- 'start_date': '2024-08-01',
77
- 'end_date': '2025-02-28'
78
- },
79
- }
80
- sandbox_manager_config = {'base_url': 'http://localhost:8000'}
81
- self._run_dataset_test('live_code_bench', limit=20, dataset_args=dataset_args, sandbox_manager_config=sandbox_manager_config, use_cache='outputs/20250918_200232_2', rerun_review=True)