evalscope 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (155) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +18 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/app/ui/multi_model.py +6 -1
  17. evalscope/app/ui/single_model.py +8 -2
  18. evalscope/app/utils/data_utils.py +3 -2
  19. evalscope/app/utils/visualization.py +2 -2
  20. evalscope/arguments.py +6 -0
  21. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  22. evalscope/benchmarks/amc/__init__.py +0 -0
  23. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  24. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  25. evalscope/benchmarks/bfcl/bfcl_adapter.py +106 -2
  26. evalscope/benchmarks/bfcl/generation.py +7 -7
  27. evalscope/benchmarks/blink/__init__.py +0 -0
  28. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  29. evalscope/benchmarks/chartqa/__init__.py +0 -0
  30. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  31. evalscope/benchmarks/chartqa/utils.py +38 -0
  32. evalscope/benchmarks/docvqa/__init__.py +0 -0
  33. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  34. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  35. evalscope/benchmarks/general_arena/utils.py +2 -1
  36. evalscope/benchmarks/healthbench/__init__.py +0 -0
  37. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  38. evalscope/benchmarks/healthbench/utils.py +102 -0
  39. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  40. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  41. evalscope/benchmarks/humaneval/utils.py +235 -0
  42. evalscope/benchmarks/infovqa/__init__.py +0 -0
  43. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  44. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  45. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  46. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  47. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  48. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  49. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  50. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  51. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  52. evalscope/benchmarks/mm_star/__init__.py +0 -0
  53. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  54. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  55. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  56. evalscope/benchmarks/multi_if/__init__.py +0 -0
  57. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  58. evalscope/benchmarks/multi_if/metrics.py +120 -0
  59. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  60. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  61. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  62. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  63. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  64. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  65. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  66. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  67. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  68. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  69. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  70. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  71. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  72. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  73. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  74. evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
  75. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  76. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  77. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  78. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  79. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  80. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  81. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  82. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  83. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  84. evalscope/config.py +24 -1
  85. evalscope/constants.py +3 -0
  86. evalscope/evaluator/evaluator.py +25 -7
  87. evalscope/metrics/metric.py +78 -2
  88. evalscope/metrics/metrics.py +16 -0
  89. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  90. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  91. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  92. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  93. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  94. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  95. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  96. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  97. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  98. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  99. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  100. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  101. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  102. evalscope/models/model_apis.py +10 -8
  103. evalscope/models/utils/openai.py +1 -2
  104. evalscope/perf/arguments.py +2 -0
  105. evalscope/perf/plugin/api/base.py +2 -2
  106. evalscope/perf/plugin/api/default_api.py +7 -7
  107. evalscope/perf/plugin/api/openai_api.py +83 -19
  108. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  109. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  110. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  111. evalscope/perf/utils/benchmark_util.py +1 -2
  112. evalscope/report/__init__.py +9 -1
  113. evalscope/report/combinator.py +45 -20
  114. evalscope/report/report.py +8 -4
  115. evalscope/run.py +1 -1
  116. evalscope/utils/function_utils.py +41 -0
  117. evalscope/utils/import_utils.py +63 -13
  118. evalscope/utils/io_utils.py +19 -11
  119. evalscope/utils/json_schema.py +25 -2
  120. evalscope/utils/logger.py +19 -0
  121. evalscope/utils/model_utils.py +1 -1
  122. evalscope/utils/multi_choices.py +16 -1
  123. evalscope/version.py +2 -2
  124. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/METADATA +10 -40
  125. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/RECORD +120 -95
  126. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
  127. tests/__init__.py +0 -1
  128. tests/benchmark/__init__.py +0 -1
  129. tests/benchmark/test_eval.py +0 -385
  130. tests/benchmark/test_image_edit.py +0 -65
  131. tests/benchmark/test_t2i.py +0 -142
  132. tests/benchmark/test_vlm.py +0 -80
  133. tests/cli/__init__.py +0 -1
  134. tests/cli/test_all.py +0 -269
  135. tests/cli/test_collection.py +0 -99
  136. tests/cli/test_custom.py +0 -268
  137. tests/cli/test_reasoning.py +0 -81
  138. tests/common.py +0 -73
  139. tests/perf/__init__.py +0 -1
  140. tests/perf/test_perf.py +0 -178
  141. tests/rag/test_clip_benchmark.py +0 -87
  142. tests/rag/test_mteb.py +0 -213
  143. tests/rag/test_ragas.py +0 -128
  144. tests/swift/__init__.py +0 -1
  145. tests/swift/test_run_swift_eval.py +0 -146
  146. tests/swift/test_run_swift_vlm_eval.py +0 -128
  147. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  148. tests/test_run_all.py +0 -12
  149. tests/utils.py +0 -13
  150. tests/vlm/__init__.py +0 -1
  151. tests/vlm/test_vlmeval.py +0 -102
  152. {tests/rag → evalscope/benchmarks/ai2d}/__init__.py +0 -0
  153. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
  154. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
  155. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
tests/cli/test_all.py DELETED
@@ -1,269 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- env = dotenv_values('.env')
5
-
6
- import os
7
- import unittest
8
-
9
- from evalscope.config import TaskConfig
10
- from evalscope.constants import EvalType, JudgeStrategy, OutputType
11
- from evalscope.run import run_task
12
- from evalscope.utils.logger import get_logger
13
- from tests.utils import test_level_list
14
-
15
- os.environ['EVALSCOPE_LOG_LEVEL'] = 'DEBUG'
16
-
17
- logger = get_logger()
18
-
19
- datasets=[
20
- 'iquiz',
21
- 'ifeval',
22
- 'mmlu',
23
- 'mmlu_pro',
24
- 'musr',
25
- 'process_bench',
26
- 'race',
27
- 'trivia_qa',
28
- 'cmmlu',
29
- 'humaneval',
30
- 'gsm8k',
31
- 'bbh',
32
- 'competition_math',
33
- 'math_500',
34
- 'aime24',
35
- 'gpqa_diamond',
36
- 'arc',
37
- 'ceval',
38
- 'hellaswag',
39
- 'general_mcq',
40
- 'general_qa',
41
- 'super_gpqa',
42
- # 'live_code_bench',
43
- 'mmlu_redux',
44
- 'simple_qa',
45
- 'chinese_simpleqa',
46
- 'alpaca_eval',
47
- 'arena_hard',
48
- 'maritime_bench',
49
- 'drop',
50
- 'winogrande',
51
- 'tool_bench',
52
- 'frames',
53
- 'docmath',
54
- 'needle_haystack',
55
- 'bfcl_v3',
56
- 'hle',
57
- 'tau_bench',
58
- ]
59
-
60
- # Reverse the datasets list to ensure the order is from most recent to oldest
61
- datasets.reverse()
62
-
63
- dataset_args={
64
- 'mmlu': {
65
- 'subset_list': ['elementary_mathematics', 'high_school_european_history', 'nutrition'],
66
- 'few_shot_num': 0
67
- },
68
- 'mmlu_pro': {
69
- 'subset_list': ['math', 'health'],
70
- 'few_shot_num': 4
71
- },
72
- 'ceval': {
73
- 'subset_list': [
74
- 'computer_network', 'operating_system', 'computer_architecture'
75
- ],
76
- 'few_shot_num': 0
77
- },
78
- 'cmmlu': {
79
- 'subset_list': ['elementary_chinese'],
80
- 'few_shot_num': 0
81
- },
82
- 'bbh': {
83
- 'subset_list': ['word_sorting', 'movie_recommendation'],
84
- },
85
- 'gpqa_diamond': {
86
- 'few_shot_num': 0,
87
- },
88
- 'humaneval': {
89
- 'metric_list': ['Pass@1', 'Pass@2', 'Pass@5'],
90
- },
91
- 'competition_math': {
92
- 'subset_list': ['Level 1']
93
- },
94
- 'math_500': {
95
- 'subset_list': ['Level 1']
96
- },
97
- 'process_bench': {
98
- 'subset_list': ['gsm8k'],
99
- },
100
- 'musr': {
101
- 'subset_list': ['murder_mysteries']
102
- },
103
- 'general_mcq': {
104
- 'local_path': 'custom_eval/text/mcq', # 自定义数据集路径
105
- 'subset_list': [
106
- 'example' # 评测数据集名称,上述 *_dev.csv 中的 *
107
- ],
108
- },
109
- 'general_qa': {
110
- 'local_path': 'custom_eval/text/qa', # 自定义数据集路径
111
- 'subset_list': [
112
- 'example', # 评测数据集名称,上述 *_dev.csv 中的 *
113
- # 'test'
114
- ]
115
- },
116
- 'super_gpqa': {
117
- 'subset_list': ['Philosophy', 'Education'],
118
- 'few_shot_num': 0
119
- },
120
- 'live_code_bench': {
121
- 'subset_list': ['v4_v5'],
122
- 'extra_params': {
123
- 'start_date': '2024-12-01',
124
- 'end_date': '2025-01-01'
125
- },
126
- },
127
- 'chinese_simpleqa': {
128
- 'subset_list': ['中华文化']
129
- },
130
- 'mmlu_redux':{
131
- 'subset_list': ['abstract_algebra']
132
- },
133
- 'docmath':{
134
- 'subset_list': ['simpshort_testmini']
135
- },
136
- 'bfcl_v3':{
137
- 'subset_list': ['simple', 'multiple']
138
- },
139
- 'hle': {
140
- 'subset_list': ['Math', 'Other'],
141
- },
142
- 'tau_bench': {
143
- 'extra_params': {
144
- 'user_model': 'qwen-plus',
145
- 'api_key': env.get('DASHSCOPE_API_KEY'),
146
- 'api_base': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
147
- },
148
- 'subset_list': ['airline'],
149
- },
150
- }
151
-
152
- class TestRun(unittest.TestCase):
153
- def test_benchmarks(self):
154
- from evalscope.config import TaskConfig
155
-
156
- task_cfg = TaskConfig(
157
- model='qwen-plus',
158
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
159
- api_key= env.get('DASHSCOPE_API_KEY'),
160
- eval_type=EvalType.SERVICE,
161
- datasets=datasets,
162
- dataset_args=dataset_args,
163
- eval_batch_size=1,
164
- limit=1,
165
- stream=True,
166
- generation_config={
167
- 'temperature': 0,
168
- 'n': 1,
169
- 'max_tokens': 4096,
170
- },
171
- judge_worker_num=5,
172
- judge_strategy=JudgeStrategy.AUTO,
173
- judge_model_args={
174
- 'model_id': 'qwen2.5-72b-instruct',
175
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
176
- 'api_key': env.get('DASHSCOPE_API_KEY'),
177
- }
178
- )
179
-
180
- run_task(task_cfg=task_cfg)
181
-
182
- def test_vlm_benchmark(self):
183
- from evalscope.config import TaskConfig
184
-
185
- task_cfg = TaskConfig(
186
- model='qwen-vl-plus',
187
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
188
- api_key= env.get('DASHSCOPE_API_KEY'),
189
- eval_type=EvalType.SERVICE,
190
- datasets=[
191
- 'mmmu',
192
- # 'math_vista',
193
- ],
194
- dataset_args={
195
- 'mmmu': {
196
- 'subset_list': ['Accounting']
197
- },
198
- 'math_vista': {
199
- 'subset_list': ['default']
200
- }
201
- },
202
- eval_batch_size=1,
203
- limit=1,
204
- stream=True,
205
- generation_config={
206
- 'temperature': 0,
207
- 'n': 1,
208
- 'max_tokens': 4096,
209
- 'image_height': 512,
210
- 'image_width': 512,
211
- 'image_num': 2,
212
- },
213
- judge_worker_num=5,
214
- judge_strategy=JudgeStrategy.AUTO,
215
- judge_model_args={
216
- 'model_id': 'qwen2.5-72b-instruct',
217
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
218
- 'api_key': env.get('DASHSCOPE_API_KEY'),
219
- }
220
- )
221
-
222
- run_task(task_cfg=task_cfg)
223
-
224
- def test_ci_lite(self):
225
- from evalscope.config import TaskConfig
226
-
227
- api_key = env.get('DASHSCOPE_API_KEY')
228
-
229
- task_cfg = TaskConfig(
230
- model='qwen-plus',
231
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
232
- api_key=api_key,
233
- eval_type=EvalType.SERVICE if api_key else EvalType.MOCK_LLM,
234
- datasets=[
235
- 'general_mcq',
236
- 'iquiz',
237
- ],
238
- dataset_args={
239
- 'general_mcq': {
240
- 'local_path': 'custom_eval/text/mcq',
241
- 'subset_list': [
242
- 'example'
243
- ],
244
- },
245
- 'general_qa': {
246
- 'local_path': 'custom_eval/text/qa',
247
- 'subset_list': [
248
- 'example'
249
- ]
250
- }
251
- },
252
- eval_batch_size=1,
253
- limit=1,
254
- stream=True,
255
- generation_config={
256
- 'temperature': 0,
257
- 'n': 1,
258
- 'max_tokens': 4096,
259
- },
260
- judge_worker_num=1,
261
- judge_strategy=JudgeStrategy.AUTO,
262
- judge_model_args={
263
- 'model_id': 'qwen2.5-72b-instruct',
264
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
265
- 'api_key': env.get('DASHSCOPE_API_KEY'),
266
- }
267
- )
268
-
269
- run_task(task_cfg=task_cfg)
@@ -1,99 +0,0 @@
1
- from dotenv import dotenv_values
2
-
3
- env = dotenv_values('.env')
4
- import json
5
- import os
6
- import unittest
7
-
8
- from evalscope.collections import CollectionSchema, DatasetInfo, WeightedSampler
9
- from evalscope.constants import EvalType, JudgeStrategy
10
- from evalscope.utils.io_utils import dump_jsonl_data
11
- from tests.utils import test_level_list
12
-
13
-
14
- class TestCollection(unittest.TestCase):
15
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
16
- def test_create_collection(self):
17
- schema = CollectionSchema(name='math&reasoning', datasets=[
18
- CollectionSchema(name='math', datasets=[
19
- CollectionSchema(name='generation', datasets=[
20
- DatasetInfo(name='gsm8k', weight=1, task_type='math', tags=['en', 'math']),
21
- ]),
22
- CollectionSchema(name='multiple_choice', datasets=[
23
- DatasetInfo(name='cmmlu', weight=2, task_type='math', tags=['zh', 'math'], args={'subset_list': ['college_mathematics', 'high_school_mathematics']}),
24
- DatasetInfo(name='ceval', weight=3, task_type='math', tags=['zh', 'math'], args={'subset_list': ['advanced_mathematics', 'high_school_mathematics', 'discrete_mathematics', 'middle_school_mathematics']}),
25
- ]),
26
- ]),
27
- CollectionSchema(name='reasoning', datasets=[
28
- DatasetInfo(name='arc', weight=1, task_type='reasoning', tags=['en', 'reasoning']),
29
- DatasetInfo(name='ceval', weight=1, task_type='reasoning', tags=['zh', 'reasoning'], args={'subset_list': ['logic']}),
30
- DatasetInfo(name='race', weight=1, task_type='reasoning', tags=['en', 'reasoning']),
31
- ]),
32
- ])
33
- print(schema.to_dict())
34
- print(schema.flatten())
35
- schema.dump_json('outputs/schema_test.json')
36
-
37
-
38
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
39
- def test_generate_data(self):
40
- schema = CollectionSchema.from_dict(json.load(open('outputs/schema_test.json', 'r')))
41
- print(schema.to_dict())
42
- mixed_data = WeightedSampler(schema).sample(100)
43
- dump_jsonl_data(mixed_data, 'outputs/mixed_data_test.jsonl')
44
-
45
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
46
- def test_evaluate_collection(self):
47
- from evalscope import TaskConfig, run_task
48
-
49
- task_cfg = TaskConfig(
50
- model='qwen-plus',
51
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
52
- api_key=env.get('DASHSCOPE_API_KEY'),
53
- eval_type=EvalType.SERVICE,
54
- datasets=['data_collection'],
55
- dataset_args={
56
- 'data_collection': {
57
- # 'local_path': 'outputs/test_mix.jsonl'
58
- 'local_path': 'outputs/mixed_data_test.jsonl',
59
- 'shuffle': True,
60
- }
61
- },
62
- eval_batch_size=5,
63
- generation_config = {
64
- 'max_tokens': 10000,
65
- 'temperature': 0.0,
66
- },
67
- limit=10,
68
- # use_cache='outputs/20250822_161804'
69
- )
70
- run_task(task_cfg=task_cfg)
71
-
72
-
73
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
74
- def test_evaluate_collection_with_judge(self):
75
- from evalscope import TaskConfig, run_task
76
-
77
- task_cfg = TaskConfig(
78
- model='qwen2.5-7b-instruct',
79
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
80
- api_key= os.getenv('DASHSCOPE_API_KEY'),
81
- eval_type=EvalType.SERVICE,
82
- datasets=['data_collection'],
83
- dataset_args={'data_collection': {
84
- 'local_path': 'outputs/mixed_data_test.jsonl'
85
- # 'local_path': 'outputs/weighted_mixed_data.jsonl'
86
- }},
87
- limit=5,
88
- judge_strategy=JudgeStrategy.AUTO,
89
- judge_model_args={
90
- 'model_id': 'qwen2.5-72b-instruct',
91
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
92
- 'api_key': os.getenv('DASHSCOPE_API_KEY'),
93
- },
94
- analysis_report=True,
95
- ignore_errors=True,
96
- # use_cache='outputs/20250522_204520'
97
- )
98
- res = run_task(task_cfg=task_cfg)
99
- print(res)
tests/cli/test_custom.py DELETED
@@ -1,268 +0,0 @@
1
- # Copyright (c) Alibaba, Inc. and its affiliates.
2
- from dotenv import dotenv_values
3
-
4
- from tests.utils import test_level_list
5
-
6
- env = dotenv_values('.env')
7
-
8
- import os
9
- import subprocess
10
- import unittest
11
-
12
- from evalscope.config import TaskConfig
13
- from evalscope.constants import EvalType, JudgeStrategy, OutputType
14
- from evalscope.run import run_task
15
- from evalscope.utils.import_utils import is_module_installed
16
- from evalscope.utils.logger import get_logger
17
-
18
- os.environ['EVALSCOPE_LOG_LEVEL'] = 'DEBUG'
19
-
20
- logger = get_logger()
21
-
22
-
23
- class TestRunCustom(unittest.TestCase):
24
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
25
- def test_run_custom_task(self):
26
- from evalscope.config import TaskConfig
27
-
28
- task_cfg = TaskConfig(
29
- model='Qwen/Qwen3-0.6B',
30
- datasets=[
31
- 'general_mcq',
32
- 'general_qa'
33
- ],
34
- dataset_args={
35
- 'general_mcq': {
36
- 'local_path': 'custom_eval/text/mcq', # 自定义数据集路径
37
- 'subset_list': [
38
- 'example' # 评测数据集名称,上述 *_dev.csv 中的 *
39
- ],
40
- 'query_template': 'Question: {question}\n{choices}\nAnswer: {answer}' # 问题模板
41
- },
42
- 'general_qa': {
43
- 'local_path': 'custom_eval/text/qa', # 自定义数据集路径
44
- 'subset_list': [
45
- 'example' # 评测数据集名称,上述 *_dev.csv 中的 *
46
- ]
47
- }
48
- },
49
- )
50
- res = run_task(task_cfg=task_cfg)
51
- print(res)
52
-
53
-
54
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
55
- def test_run_local_dataset(self):
56
- from evalscope.config import TaskConfig
57
-
58
- task_cfg = TaskConfig(
59
- model='qwen-plus',
60
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
61
- api_key= env.get('DASHSCOPE_API_KEY'),
62
- eval_type=EvalType.SERVICE,
63
- datasets=[
64
- # 'mmlu',
65
- # 'race',
66
- 'trivia_qa',
67
- # 'cmmlu',
68
- # 'humaneval',
69
- # 'gsm8k',
70
- # 'bbh',
71
- # 'competition_math',
72
- # 'arc',
73
- # 'ceval',
74
- ],
75
- dataset_args={
76
- 'mmlu': {
77
- 'subset_list': ['elementary_mathematics', 'high_school_european_history', 'nutrition'],
78
- 'few_shot_num': 0,
79
- 'dataset_id': 'data/data/mmlu',
80
- },
81
- 'ceval': {
82
- 'subset_list': [
83
- 'computer_network', 'operating_system', 'computer_architecture'
84
- ],
85
- 'few_shot_num': 0,
86
- 'dataset_id': 'data/data/ceval',
87
- },
88
- 'cmmlu': {
89
- 'subset_list': ['elementary_chinese'],
90
- 'dataset_id': 'data/data/cmmlu',
91
- 'few_shot_num': 0
92
- },
93
- 'bbh': {
94
- 'subset_list': ['word_sorting', 'movie_recommendation'],
95
- },
96
- 'humaneval': {
97
- 'metric_list': ['Pass@1', 'Pass@2', 'Pass@5'],
98
- },
99
- 'trivia_qa': {
100
- 'dataset_id': 'data/data/trivia_qa',
101
- },
102
- },
103
- eval_batch_size=10,
104
- limit=5,
105
- debug=True,
106
- stream=True,
107
- generation_config={
108
- 'temperature': 0,
109
- 'n': 1,
110
- 'max_tokens': 4096,
111
- },
112
- ignore_errors=False,
113
- )
114
-
115
- run_task(task_cfg=task_cfg)
116
-
117
-
118
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
119
- def test_run_general_no_answer(self):
120
- from evalscope.config import TaskConfig
121
-
122
- task_cfg = TaskConfig(
123
- model='qwen2.5-7b-instruct',
124
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
125
- api_key= env.get('DASHSCOPE_API_KEY'),
126
- eval_type=EvalType.SERVICE,
127
- datasets=[
128
- 'general_qa',
129
- ],
130
- dataset_args={
131
- 'general_qa': {
132
- 'dataset_id': 'custom_eval/text/qa',
133
- 'subset_list': [
134
- 'arena',
135
- # 'example'
136
- ],
137
- }
138
- },
139
- eval_batch_size=10,
140
- limit=10,
141
- debug=True,
142
- stream=True,
143
- generation_config={
144
- 'temperature': 0,
145
- 'n': 1,
146
- 'max_tokens': 4096,
147
- },
148
- ignore_errors=False,
149
- judge_model_args={
150
- 'model_id': 'qwen2.5-7b-instruct',
151
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
152
- 'api_key': env.get('DASHSCOPE_API_KEY'),
153
- 'generation_config': {
154
- 'temperature': 0.0,
155
- 'max_tokens': 4096
156
- },
157
- 'score_type': 'numeric',
158
- 'prompt_template': """Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response.
159
- Begin your evaluation by providing a short explanation. Be as objective as possible.
160
- After providing your explanation, you must rate the response on a scale of 0 (worst) to 100 (best) by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\"
161
-
162
- [Question]
163
- {question}
164
-
165
- [Response]
166
- {pred}
167
- """
168
- },
169
- judge_worker_num=5,
170
- judge_strategy=JudgeStrategy.LLM,
171
- )
172
-
173
- run_task(task_cfg=task_cfg)
174
-
175
-
176
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
177
- def test_run_general_with_answer(self):
178
- from evalscope.config import TaskConfig
179
-
180
- task_cfg = TaskConfig(
181
- model='qwen-plus',
182
- api_url='https://dashscope.aliyuncs.com/compatible-mode/v1',
183
- api_key= env.get('DASHSCOPE_API_KEY'),
184
- eval_type=EvalType.SERVICE,
185
- datasets=[
186
- 'general_qa',
187
- ],
188
- dataset_args={
189
- 'general_qa': {
190
- 'dataset_id': 'custom_eval/text/qa',
191
- 'subset_list': [
192
- 'example'
193
- ],
194
- }
195
- },
196
- eval_batch_size=10,
197
- limit=10,
198
- debug=True,
199
- stream=True,
200
- generation_config={
201
- 'temperature': 0,
202
- 'n': 1,
203
- 'max_tokens': 4096,
204
- },
205
- ignore_errors=False,
206
- judge_model_args={
207
- 'model_id': 'qwen2.5-72b-instruct',
208
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
209
- 'api_key': env.get('DASHSCOPE_API_KEY'),
210
- 'generation_config': {
211
- 'temperature': 0.0,
212
- 'max_tokens': 4096
213
- },
214
- 'score_type': 'pattern',
215
- },
216
- judge_worker_num=1,
217
- judge_strategy=JudgeStrategy.LLM_RECALL,
218
- use_cache='outputs/20250818_170420'
219
- )
220
-
221
- run_task(task_cfg=task_cfg)
222
-
223
-
224
- @unittest.skipUnless(0 in test_level_list(), 'skip test in current test level')
225
- def test_run_general_arena(self):
226
- from evalscope.config import TaskConfig
227
-
228
- task_cfg = TaskConfig(
229
- model_id='Arena',
230
- datasets=[
231
- 'general_arena',
232
- ],
233
- dataset_args={
234
- 'general_arena': {
235
- 'extra_params':{
236
- 'models':[
237
- {
238
- 'name': 'qwen2.5-7b',
239
- 'report_path': 'outputs/20250819_165034/reports/qwen2.5-7b-instruct'
240
- },
241
- {
242
- 'name': 'qwen2.5-72b',
243
- 'report_path': 'outputs/20250819_164926/reports/qwen2.5-72b-instruct'
244
- }
245
- ],
246
- 'baseline': 'qwen2.5-72b'
247
- }
248
- }
249
- },
250
- eval_batch_size=10,
251
- limit=10,
252
- debug=True,
253
- stream=True,
254
- ignore_errors=False,
255
- judge_model_args={
256
- 'model_id': 'qwen-plus',
257
- 'api_url': 'https://dashscope.aliyuncs.com/compatible-mode/v1',
258
- 'api_key': env.get('DASHSCOPE_API_KEY'),
259
- 'generation_config': {
260
- 'temperature': 0.0,
261
- 'max_tokens': 8000
262
- },
263
- },
264
- judge_worker_num=5,
265
- # use_cache='outputs/20250819_173546'
266
- )
267
-
268
- run_task(task_cfg=task_cfg)